repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
gheed87/opinion-mining
https://github.com/gheed87/opinion-mining
524c960dbd418e2aac5c5ddca4dfc5036f03bdf5
d21f152aeb9adc4d31aa2717cd3869ecffdd26f9
6512339be4b91f73747969f4d589571bda3ecea2
refs/heads/master
2020-05-17T15:26:07.974919
2014-05-26T14:02:34
2014-05-26T14:02:34
18,589,668
2
0
null
2014-04-09T07:56:54
2014-05-19T15:13:26
2014-05-19T15:13:26
C++
[ { "alpha_fraction": 0.4990711510181427, "alphanum_fraction": 0.5067206025123596, "avg_line_length": 47.62234115600586, "blob_id": "cebf63c566a7939e35ae436f14788b4510b66481", "content_id": "1e389fa52cf5c3fa958cfe2aee448dac1c2a4a20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9151, "license_type": "no_license", "max_line_length": 155, "num_lines": 188, "path": "/foursquare-sentiment-mining/miner/sentiwords.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 12.05.2014\n\n@author: schieberle\n'''\nimport re\nimport sys\nimport codecs\n\nclass sentiwords(object):\n def __init__(self):\n\n self.lexicon_avg_score = []\n \n self.lexiconRaw = {}\n self.lexicon = {}\n \n def parseRawSentiWordNet_en(self, fname=r'.\\lib\\SentimentWords\\en\\SentiWordNet_3.0.0_20130122.txt', verbose=False):\n \"\"\"\n Parse SentiWordNet from txt file to a list of dictionaries and stores the result in self.lexicon\n \"\"\"\n \n \"\"\"Parse Data from File into a dictionary. The key of the dictionary is a tuple of pos and the synset ID (offset)\"\"\"\n if verbose: print \"Parsing SentiWordnet File...\"\n lines = codecs.open(fname, \"r\", \"utf8\").read().splitlines()\n lines = filter((lambda x : not re.search(r\"^\\s*#\", x)), lines)\n for i, line in enumerate(lines):\n fields = re.split(r\"\\t+\", line)\n fields = map(unicode.strip, fields)\n try: \n pos, offset, pos_score, neg_score, synset_terms, gloss = fields\n except:\n sys.stderr.write(\"Line %s formatted incorrectly: %s\\n\" % (i, line))\n if pos and offset:\n offset = int(offset)\n self.lexiconRaw[(pos, offset)] = {'synset_terms': synset_terms, 'pos_score': float(pos_score), \n 'neg_score': float(neg_score)}\n \n \n \"\"\"Split Synset terms to multiple dictionary entries. The key for the dictionary is a tuple of pos tag and term\"\"\" \n if verbose: print \"Split synset terms...\" \n for k, v in self.lexiconRaw.iteritems():\n pos = k[0]\n for term in re.findall(r\"(\\w+[^\\d\\#])+\", v.get('synset_terms')):\n if not self.lexicon.has_key((pos, term)):\n self.lexicon[(pos, term)] = {'pos': pos, 'term': term, 'pos_score': [], 'neg_score': []}\n \n self.lexicon[(pos, term)]['pos_score'].append(v.get('pos_score'))\n self.lexicon[(pos, term)]['neg_score'].append(v.get('neg_score'))\n \n \"\"\"Combine polysems of a string to one entry. Two sentiment scores will be calculated:\n avg_pos/neg_score: calculates the average of all sentiment scores of the different entries for one string as the sentiment score\n maxpos/neg_score: takes the max value of the sentiment scores from different entries for one string as the sentiment score\"\"\"\n if verbose: print \"Calculate sentiment score...\" \n for k,v in self.lexicon.iteritems():\n self.lexicon[k].update({'max_pos_score': max(v['pos_score']), \n 'max_neg_score': max(v['neg_score']),\n 'avg_pos_score': sum(v['pos_score']) / len(v['pos_score']),\n 'avg_neg_score': sum(v['neg_score']) / len(v['neg_score'])})\n \n \n def parseRawSentiWS_de(self,\n fname_neg=r'.\\lib\\SentimentWords\\de\\SentiWS_v1.8c_Negative.txt',\n fname_pos=r'.\\lib\\SentimentWords\\de\\SentiWS_v1.8c_Positive.txt'): \n \"\"\"\n Parse SentiWS Files from .txt to a list of dictionaries and stores the result in self.lexicon\n \"\"\"\n sentiwords = []\n for fname in [fname_pos, fname_neg]:\n print fname\n with open(fname, 'r') as fp:\n for line in fp: \n row = line.split(\"\\t\")\n row[0].split('|')[0]\n sentiwords.append(dict(pos=row[0].split('|')[1],\n term=row[0].split('|')[0],\n score=float(row[1].strip())))\n result = []\n for element in sentiwords:\n if element.get('score') <= 0:\n element.update(NegScore=element.pop('score') * -1, PosScore=0.0)\n for duplicate in sentiwords[sentiwords.index(element) + 1:]:\n if duplicate.get('term') == element.get('term'):\n element.update(PosScore=duplicate.get('score'))\n sentiwords.remove(duplicate) \n else:\n element.update(PosScore=element.pop('score'), NegScore=0.0)\n \n result.append(element)\n self.lexicon = result\n return self.lexicon\n \n def parseRawGermanPolarityClues(self, directory_path=r\".\\lib\\SentimentWords\\de\\GermanPolarityClues\\\\\"):\n filelist = [r\"GermanPolarityClues-Negative-21042012.tsv\",\n r\"GermanPolarityClues-Positive-21042012.tsv\"]\n \n tags = {\"A\": \"a\", \"N\": \"n\", \"R\": \"r\", \"M\": \"v\", \"V\":\"v\"}\n \n for fname in filelist:\n lines = codecs.open(directory_path + fname, \"r\", \"utf8\").read().splitlines()\n lines = filter((lambda x : not re.search(r\"^\\s*#\", x)), lines)\n for i, line in enumerate(lines):\n fields = re.split(r\"\\t+\", line)\n fields = map(unicode.strip, fields)\n try: \n feature, term, pos, propability, rating, thing = fields\n rating_fields = re.split(r\"/+\", rating)\n pos_score, neg_score, neutral_score = rating_fields\n try:\n pos_score = float(pos_score)\n except:\n pos_score = 0.0\n try:\n neg_score = float(neg_score)\n except:\n neg_score = 0.0\n except:\n sys.stderr.write(\"Line %s formatted incorrectly: %s\\n\" % (i, line))\n \n pos = tags.get(pos[0])\n self.lexiconRaw[(pos, feature)] = {'feature': feature, 'pos': pos, 'term': term, 'pos_score': pos_score, 'neg_score': neg_score}\n \n for k, v in self.lexiconRaw.iteritems():\n if not self.lexicon.has_key((v.get('pos'), v.get('term'))):\n self.lexicon[(v.get('pos'), v.get('term'))] = {'pos': v.get('pos'), 'term': v.get('term'), 'pos_score': [], 'neg_score': [], 'feature': []}\n \n self.lexicon[(v.get('pos'), v.get('term'))].get('pos_score').append(v.get('pos_score'))\n \n \"\"\" change to negative if the negativity score is positive. Don't know, why there are two different notations....\"\"\"\n if v.get('neg_score') > 0: v.get('neg_score') * -1\n \n self.lexicon[(v.get('pos'), v.get('term'))].get('neg_score').append(v.get('neg_score'))\n self.lexicon[(v.get('pos'), v.get('term'))].get('feature').append(v.get('feature'))\n \n for k, v in self.lexicon.iteritems():\n self.lexicon[k].update({'max_pos_score': max(v['pos_score']), \n 'max_neg_score': max(v['neg_score']),\n 'avg_pos_score': sum(v['pos_score']) / len(v['pos_score']),\n 'avg_neg_score': sum(v['neg_score']) / len(v['neg_score'])})\n\n return self.lexicon\n \n\n \n def calcAverageScoreSentiWordNet(self):\n \"\"\"\n Generates a set of all terms in self.lexicon and sums up the sentiment score of each term \n to calculate an average score for on specific term. \n \n The ambiguity of words will be ignored => each word has one entry in the result with an \n average sentiment score. The result is stored in self.lexicon_avg_score\n \"\"\"\n \n result = []\n \n termset = set()\n for item in self.lexicon:\n termset.add(item['term'])\n i = 0\n print \"Start calculation of average sentiment score\"\n print i\n for term in termset:\n newDict = dict(term=term, score=list())\n for item in self.lexicon:\n if term == item['term']:\n newDict['score'].append(item['PosScore'] - item['NegScore'])\n newDict.update(score= sum(newDict['score']) / len(newDict['score']))\n i += 1\n result.append(newDict)\n if i % 100 == 0: print str(i) + \"/\" + str(len(termset))\n print i, \"SentiWords \"\n self.lexicon_avg_score = result\n return self.lexicon_avg_score\n\n \n def exportLexicon(self, fname):\n with open(fname, 'w') as f:\n f.write(\"#\" + \"\\t\".join([\"Term\", \"Tag\", \"PosScore\", \"NegScore\"]) + \"\\n\")\n for element in self.lexicon:\n f.write(\"\\t\".join([element.get('term'), element.get('pos'), unicode(element.get('PosScore')), unicode(element.get('NegScore'))]) + \"\\n\") \n\n\n def exportLexiconAVGScore(self, fname):\n with open(fname, 'w') as f:\n f.write(\"#\" + \"\\t\".join([\"Term\", \"Tag\", \"Score\"]) + \"\\n\")\n for element in self.lexicon_avg_score:\n f.write(\"\\t\".join([element.get('term'), str(element.get('score'))]) + \"\\n\") \n \n" }, { "alpha_fraction": 0.7323232293128967, "alphanum_fraction": 0.743686854839325, "avg_line_length": 23.65625, "blob_id": "5a0b3a49b2598e99fd9961699f93303474490010", "content_id": "3c70f1f5cae57a5e1e92352e2353fe4b0cb80d54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 105, "num_lines": 32, "path": "/foursquare-sentiment-mining/miner/test.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 11.05.2014\n\n@author: Andreas\n'''\nimport pprint\nimport cProfile\nfrom numpy import arange\n\nfrom miner.analysis import analysis\nfrom miner.evaluation import evaluation\nfrom miner.sentiwords import sentiwords\n\nberlin_tagged = r\"C:\\Users\\schieberle\\Documents\\opinion-mining-daten\\Berlin-tagged.pkl\"\nmanuelle_Auswertung = r\"C:\\Users\\schieberle\\Documents\\opinion-mining-daten\\berlin-komplett_erweitert.csv\"\noutput_file = r\"C:\\Users\\Schieberle\\Documents\\output.csv\"\n\ndef test():\n\t\"\"\" Load Analysis\"\"\"\n\ta = analysis()\n\ta.load_from_pickle(berlin_tagged)\n\t\n\t\"\"\"Load Sentiment Data\"\"\"\n\tsenti_de = sentiwords()\n\tsenti_de.parseRawGermanPolarityClues()\n\t\n\t\"\"\"Evaluate\"\"\"\n\teval_de = evaluation(a, senti_de)\n\teval_de.evaluate_de()\n\tpprint.pprint(eval_de.data)\ntest()\n\t\n\t" }, { "alpha_fraction": 0.63338303565979, "alphanum_fraction": 0.6467958092689514, "avg_line_length": 27.130434036254883, "blob_id": "0bab7dc9b7e128a0aad519330c7376a79a3d1104", "content_id": "a787220637c634b8adb9caa1a0df0ed6973e4962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 671, "license_type": "no_license", "max_line_length": 116, "num_lines": 23, "path": "/foursquare-sentiment-mining/miner/postagging.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 24.04.2014\n\n@author: schieberle\n'''\n\nfrom nltk.tokenize import WordPunctTokenizer\nfrom treetagger import TreeTagger\n\n\nclass POSTagger(object):\n \"\"\"Class for part of speech tagging with TreeTagger ( http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/ )\n\n \"\"\"\n def __init__(self, treetagger, parameters):\n self.treetagger = treetagger\n self.parameters = parameters\n \n def tag(self, text):\n t = WordPunctTokenizer()\n tagger = TreeTagger(path_to_home=self.treetagger, parameters=[r'-token', r'-lemma', self.parameters])\n return tagger.tag(t.tokenize(text))\n " }, { "alpha_fraction": 0.535520076751709, "alphanum_fraction": 0.544527530670166, "avg_line_length": 33.19186019897461, "blob_id": "9bb7f7a1e6f40967ccce66c2015ec734996dd71d", "content_id": "402cf17c413d85d93a0dd53b3728787d7f49edbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5884, "license_type": "no_license", "max_line_length": 160, "num_lines": 172, "path": "/foursquare-sentiment-mining/miner/__init__.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 17.04.2014\n\n@author: Andreas Schieberle\n'''\n\n\nimport ConfigParser\nimport os, csv, json, pprint\n\nimport foursquare\n\n\nclass miner(object):\n \"\"\"A miner object to access the foursquare API. Provides Methods\n to search for Venues and Tips and export the received data \n to csv. A valid API Key must be provided in the api.conf file\"\"\"\n \n def __init__(self):\n \"\"\"Constructor - Initializes the miner object with the API Key \n provided in the api.conf file and creates a Foursquare object to\n access the Foursquare API\"\"\"\n try: \n config = ConfigParser.ConfigParser()\n config.readfp(open('api.conf'))\n \n API_CLIENT_ID = config.get('Foursquare API Key', 'API_CLIENT_ID')\n API_CLIENT_SECRET = config.get('Foursquare API Key', 'API_CLIENT_SECRET')\n \n self.client = foursquare.Foursquare(API_CLIENT_ID, API_CLIENT_SECRET)\n except IOError as e:\n print e\n \n def searchVenuesNear(self, place):\n \"\"\"Searches the Fourquare API for venues near a given place and stores the data in \n the member variable self.venues\n \n Parameters:\n place -- a string with the name of a place\"\"\"\n \n # API call. For details see https://developer.foursquare.com/docs/venues/search\n data = self.client.venues.search(params={'near' : place, 'radius' : 5000, 'limit' : 50, 'categoryId' : '4d4b7105d754a06374d81259', 'intent' : 'browse'})\n venuesList = []\n \n # Iterates through the response Data, create venue objects\n # and store them in venuesList\n for v in data['venues']:\n item = venue(venueID=v ['id'], name=v['name'], loc=location(obj=v['location']))\n venuesList.append(item)\n return venuesList\n \n def searchTipsByVenue(self, venueId):\n \"\"\" Searches the Foursquare API for tips to a given venues id and stores\n the data in the member variable self.tips\n \n Parameters:\n venueID -- the ID of the venue\"\"\"\n \n # API call. For details see https://developer.foursquare.com/docs/venues/tips\n data = self.client.venues.tips(VENUE_ID=venueId)\n tipList = []\n \n # Iterates through the response Data, create tip objects\n # and store them in tipList\n for t in data.get('tips').get('items'):\n item = tip(tipdata={'id' : t.get('id'), 'venueId' : venueId, 'text' : t.get('text'),\n 'canonicalUrl' : t.get('canonicalUrl'), 'user' : t.get('user'),\n 'likes' : t.get('likes')})\n tipList.append(item) \n return tipList\n \n def exportToCSV(self, filename='export.csv'):\n \"\"\"Exports the venue data to csv\n \n Parameters:\n filename -- name of the file that will be exported (default 'export.csv')\"\"\"\n\n rows = []\n csvWriter = csv.DictWriter(open(filename, 'wb'),\n fieldnames=['id', 'name', 'lat', 'lng',\n 'country', 'cc', 'city', 'postalCode',\n 'address'],\n dialect='excel',\n delimiter=';',\n quotechar='\"',\n quoting=csv.QUOTE_ALL)\n \n for d in self.venues.get('list'): \n rows.append(dict(zip(['id', 'name'], [d.id, d.name]) \n + d.location.getDict().items()))\n\n csvWriter.writeheader()\n csvWriter.writerows(rows)\n \n #### Debugging\n print csvWriter.fieldnames\n\n\nclass venue(object):\n \"\"\" Represents a Foursquare venue\"\"\"\n \n def __init__(self, venueID, name, loc):\n \"\"\" Constructor\n \n Parameters:\n id -- the id of the venue as string\n name -- the name of the venue as string\n location -- the location of the venue as location object\"\"\"\n \n self.id = venueID\n self.name = name\n self.location = loc\n \nclass location(object):\n \"\"\" Represents a location. Latitude and longitude are necessary\n keys, city, country, cc (country code), postalCode, and address \n are optional.\n \n Parameters:\n obj -- dictionary of location data\"\"\"\n \n def __init__(self, obj):\n self.lat = obj['lat']\n self.lng = obj['lng']\n if 'city' in obj:\n self.city = obj['city']\n else:\n self.city = None\n if 'country' in obj:\n self.country = obj['country']\n else:\n self.country = None\n if 'cc' in obj:\n self.cc = obj['cc']\n else:\n self.cc = None\n if 'postalCode' in obj:\n self.postalCode = obj['postalCode']\n else:\n self.postalCode = None\n if 'address' in obj:\n self.address = obj['address']\n else:\n self.address = None\n \nclass tip(object):\n \"\"\" Represents a tip. \n All keys listed in the init method are necessary\n \n Parameters:\n tipdata -- dictionary of tip data\"\"\"\n def __init__(self, tipdata={}):\n self.id = tipdata.get('id')\n self.venueId = tipdata.get('venueId')\n self.text = tipdata.get('text')\n self.canonicalUrl = tipdata.get('canonicalUrl')\n self.user = tipdata.get('user')\n self.likes = tipdata.get('likes')\n\n#### Debugging Venues\n# test = miner().searchVenuesNear('sossenheim')\n# for v in test.venues.get('list'):\n# pprint.pprint(v.getDict())\n# \n# test.exportToCSV('test.csv')\n \n#### Debugging Tips\n# test = miner()\n# test.searchTipsByVenue('4b0ae100f964a520552923e3')\n\n# pprint.pprint(test.tips.get('list')[0].id)\n\n\n\n" }, { "alpha_fraction": 0.5844155550003052, "alphanum_fraction": 0.6255871653556824, "avg_line_length": 18.0473690032959, "blob_id": "8af87ce6bf54b3ced46ebb722b6bf8d7dc7fc8a9", "content_id": "aa0c5892e5cc6ed1b0aca0ef736bea0235df7834", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3619, "license_type": "permissive", "max_line_length": 90, "num_lines": 190, "path": "/foursquare-sentiment-mining/miner/lib/ldig/maxsubst/cybozulib/include/cybozu/itoa.hpp", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#pragma once\n/**\n\t@file\n\t@brief convert integer to string(ascii)\n\n\tCopyright (C) 2008-2012 Cybozu Labs, Inc., all rights reserved.\n*/\n#include <limits.h>\n#include <string>\n#include <cybozu/inttype.hpp>\n\nnamespace cybozu {\n\nnamespace itoa_local {\n\ntemplate<typename T, typename UT, int n>\nvoid convertFromInt(std::string& out, T x, T minusMax, const char (&minusMaxStr)[n])\n{\n\tif (x == minusMax) {\n\t\tout.assign(minusMaxStr, minusMaxStr + n - 1);\n\t\treturn;\n\t}\n\tif (x == 0) {\n\t\tout.assign(1, '0');\n\t\treturn;\n\t}\n\tUT absX = x < 0 ? -x : x;\n\tchar buf[40];\n\tint p = 0;\n\twhile (absX > 0) {\n\t\tbuf[p++] = '0' + (int)(absX % 10);\n\t\tabsX /= 10;\n\t}\n\tif (x < 0) {\n\t\tbuf[p++] = '-';\n\t}\n\tout.resize(p);\n\tfor (int i = 0; i < p; i++) {\n\t\tout[i] = buf[p - 1 - i];\n\t}\n}\n\ntemplate<typename T>\nvoid convertFromUint(std::string& out, T x)\n{\n\tif (x == 0) {\n\t\tout.assign(1, '0');\n\t\treturn;\n\t}\n\tchar buf[40];\n\tint p = 0;\n\twhile (x > 0) {\n\t\tbuf[p++] = '0' + (int)(x % 10);\n\t\tx /= 10;\n\t}\n\tout.resize(p);\n\tfor (int i = 0; i < p; i++) {\n\t\tout[i] = buf[p - 1 - i];\n\t}\n}\n\n/**\n\tconvert to to zero padding hex\n*/\ntemplate<typename T>\nvoid convertFromUintToHexWithZero(std::string& out, T x, bool upCase)\n{\n\tconst size_t len = sizeof(T) * 2;\n\tout.resize(len);\n\tstatic const char *hexTbl[] = {\n\t\t\"0123456789abcdef\",\n\t\t\"0123456789ABCDEF\"\n\t};\n\tconst char *tbl = hexTbl[upCase];\n\tfor (size_t i = 0; i < len; i++) {\n\t\tout[len - i - 1] = tbl[x % 16];\n\t\tx /= 16;\n\t}\n}\n\n} // itoa_local\n\n/**\n\tconvert int to string\n\t@param out [out] string\n\t@param x [in] int\n*/\ninline void itoa(std::string& out, int x)\n{\n\titoa_local::convertFromInt<int, unsigned int>(out, x, INT_MIN, \"-2147483648\");\n}\n\n/**\n\tconvert int64_t to string\n\t@param out [out] string\n\t@param x [in] int64_t\n*/\ninline void itoa(std::string& out, int64_t x)\n{\n\titoa_local::convertFromInt<int64_t, uint64_t>(out, x, LLONG_MIN, \"-9223372036854775808\");\n}\n\n/**\n\tconvert unsigned int to string\n\t@param out [out] string\n\t@param x [in] unsigned int\n*/\ninline void itoa(std::string& out, unsigned int x)\n{\n\titoa_local::convertFromUint(out, x);\n}\n\n/**\n\tconvert uint64_t to string\n\t@param out [out] string\n\t@param x [in] uint64_t\n*/\ninline void itoa(std::string& out, uint64_t x)\n{\n\titoa_local::convertFromUint(out, x);\n}\n\n/**\n\tconvert integer to string\n\t@param x [in] int\n*/\ntemplate<typename T>\ninline std::string itoa(T x)\n{\n\tstd::string ret;\n\titoa(ret, x);\n\treturn ret;\n}\n\ninline void itohex(std::string& out, unsigned char x, bool upCase = true)\n{\n\titoa_local::convertFromUintToHexWithZero(out, x, upCase);\n}\n\ninline void itohex(std::string& out, unsigned short x, bool upCase = true)\n{\n\titoa_local::convertFromUintToHexWithZero(out, x, upCase);\n}\n\ninline void itohex(std::string& out, unsigned int x, bool upCase = true)\n{\n\titoa_local::convertFromUintToHexWithZero(out, x, upCase);\n}\n\ninline void itohex(std::string& out, uint64_t x, bool upCase = true)\n{\n\titoa_local::convertFromUintToHexWithZero(out, x, upCase);\n}\n\ntemplate<typename T>\ninline std::string itohex(T x, bool upCase = true)\n{\n\tstd::string out;\n\titohex(out, x, upCase);\n\treturn out;\n}\n\n/**\n\tconvert integer to string with zero padding\n\t@param x [in] int\n\t@param len [in] minimum lengh of string\n\t@param c [in] padding character\n\t@note\n\titoa(12, 4) == \"0012\"\n\titoa(1234, 4) == \"1234\"\n\titoa(12345, 4) == \"12345\"\n\titoa(-12, 4) == \"-012\"\n*/\ntemplate<typename T>\ninline std::string itoaWithZero(T x, size_t len, char c = '0')\n{\n\tstd::string ret;\n\titoa(ret, x);\n\tif (ret.size() < len) {\n\t\tstd::string zero(len - ret.size(), c);\n\t\tif (x >= 0) {\n\t\t\tret = zero + ret;\n\t\t} else {\n\t\t\tret = \"-\" + zero + ret.substr(1);\n\t\t}\n\t}\n\treturn ret;\n}\n\n} // cybozu\n" }, { "alpha_fraction": 0.6533457040786743, "alphanum_fraction": 0.6905204653739929, "avg_line_length": 21.41666603088379, "blob_id": "1f269d444b6a31d35dc75f9876bf3343a66bd3d2", "content_id": "ea45069335e31312cd62f51ade74911d1eacf2f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1076, "license_type": "permissive", "max_line_length": 64, "num_lines": 48, "path": "/foursquare-sentiment-mining/miner/lib/ldig/maxsubst/cybozulib/include/cybozu/inttype.hpp", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#pragma once\n/**\n\t@file\n\t@brief int type definition and macros\n\n\tCopyright (C) 2008-2012 Cybozu Labs, Inc., all rights reserved.\n*/\n\n#if defined(_MSC_VER) && (MSC_VER <= 1500)\n\ttypedef __int64 int64_t;\n\ttypedef unsigned __int64 uint64_t;\n\ttypedef unsigned int uint32_t;\n\ttypedef int int32_t;\n\ttypedef unsigned short uint16_t;\n\ttypedef short int16_t;\n\ttypedef unsigned char uint8_t;\n\ttypedef signed char int8_t;\n#else\n\t#include <stdint.h>\n#endif\n\n#ifdef _MSC_VER\n\t#ifndef CYBOZU_DEFINED_SSIZE_T\n\t\t#define CYBOZU_DEFINED_SSIZE_T\n\t\t#ifdef _WIN64\n\t\t\ttypedef int64_t ssize_t;\n\t\t#else\n\t\t\ttypedef int32_t ssize_t;\n\t\t#endif\n\t#endif\n#else\n\t#include <unistd.h> // for ssize_t\n#endif\n\n// std::vector<int> v; CYBOZU_FOREACH(auto x, v) {...}\n#if defined(_MSC_VER) && (_MSC_VER >= 1400)\n\t#define CYBOZU_FOREACH(type_x, xs) for each (type_x in xs)\n#elif defined(__GNUC__)\n\t#define CYBOZU_FOREACH(type_x, xs) for (type_x : xs)\n#endif\n\n#define CYBOZU_NUM_OF_ARRAY(x) (sizeof(x) / sizeof(*x))\n\n#ifdef _MSC_VER\n\t#define CYBOZU_SNPRINTF _snprintf_s\n#else\n\t#define CYBOZU_SNPRINTF snprintf\n#endif\n" }, { "alpha_fraction": 0.7041420340538025, "alphanum_fraction": 0.7199211120605469, "avg_line_length": 21, "blob_id": "0926ff42a790172e5cb1bcdb6e54a69e3b6f7064", "content_id": "37c4827f576b45641ac659209b51d1d2c0c89db7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 507, "license_type": "permissive", "max_line_length": 62, "num_lines": 23, "path": "/foursquare-sentiment-mining/miner/lib/ldig/maxsubst/readme.md", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "maxsubst (Maximal Substring Extractor for ldig)\n======================\n\nThis module is to extract maximal substring from text.\nldig invokes this module at model initialization.\n\n\nRemarks\n\nmaxsubst uses the below libraries.\n-----\n\nesaxx by Daisuke Okanohara\nhttp://code.google.com/p/esaxx/\n\ncybozulib by Mitsunari / Cybozu Labs\nhttps://github.com/herumi/cybozulib\n\n\nCopyright & License\n-----\n- (c)2011-2012 Nakatani Shuyo / Cybozu Labs Inc. All rights reserved.\n- All codes and resources are available under the MIT License.\n\n" }, { "alpha_fraction": 0.6228086352348328, "alphanum_fraction": 0.640392005443573, "avg_line_length": 22.776323318481445, "blob_id": "dc8ae8786b4aa62617b246c26bc8223393422acb", "content_id": "899b44a5fbdc0f37b507745a87d0671882a5791e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 38161, "license_type": "permissive", "max_line_length": 124, "num_lines": 1605, "path": "/foursquare-sentiment-mining/miner/lib/ldig/maxsubst/cybozulib/include/cybozu/string.hpp", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#pragma once\n/**\n\t@file\n\t@brief unicode string class like std::string\n\tsupport char*, std::string with UTF-8\n\n\tCopyright (C) 2008-2012 Cybozu Labs, Inc., all rights reserved.\n*/\n\n#include <string>\n#include <cstring>\n#include <assert.h>\n#include <stddef.h>\n#include <stdio.h> // for printf\n#include <iosfwd> // for istream, ostream\n#include <functional> // for unary_function\n\n#include <cybozu/exception.hpp>\n\nnamespace cybozu {\n\nstruct StringException : public cybozu::Exception {\n\tStringException() : cybozu::Exception(\"string\") { }\n};\n\n#ifdef __GNUC__\n\t/* avoid to use uint32_t because compiling boost::regex fails */\n\ttypedef wchar_t Char; //!< Char for Linux\n\ttypedef unsigned short Char16; /* unsigned is necessary for gcc */\n#else\n\t/* can't compile with singed */\n\ttypedef unsigned __int32 Char; //!< Char for Windows\n\ttypedef wchar_t Char16;\n#endif\n\ntypedef std::basic_string<Char16> String16;\n\n/**\n\tutility function\n*/\nnamespace string {\n\n/*\n\t code point[a, b] 1byte 2ybte 3byte 4byte\n\t U+0000 U+007f 00..7f ; 128\n\t U+0080 U+07ff c2..df 80..bf ; 30 x 64 = 1920\n\n\t U+0800 U+0fff e0 a0..bf 80..bf ; 1 x 32 x 64 = 2048\n\t U+1000 U+cfff e1..ec 80..bf 80..bf ; 12 x 64 x 64 = 49152\n\t U+d000 U+d7ff ed 80..9f 80..bf ; 1 x 32 x 64 = 2048\n\n\t U+e000 U+ffff ee..ef 80..bf 80..bf ; 2 x 64 x 64 = 8192\n\n\t U+10000 U+3ffff f0 90..bf 80..bf 80..bf ; 1 x 48 x 64 x 64 = 196608\n\t U+40000 U+fffff f1..f3 80..bf 80..bf 80..bf ; 3 x 64 x 64 x 64 = 786432\n\tU+100000 U+10ffff f4 80..8f 80..bf 80..bf ; 1 x 16 x 64 x 64 = 65536\n*/\ninline int GetCharSize(Char c)\n{\n\tif (c <= 0x7f) return 1;\n\tif (c <= 0x7ff) return 2;\n\tif (c <= 0xd7ff) return 3;\n\tif (c <= 0xdfff || c > 0x10ffff) return 0;\n\tif (c <= 0xffff) return 3;\n\treturn 4;\n}\n\n// for Char/char\ninline bool IsValidChar(Char c)\n{\n\treturn GetCharSize(c) != 0;\n}\n\nnamespace local {\n\n/* true if c in [min, max] */\ninline bool in(unsigned char c, int min, int max)\n{\n//\t return min <= c && c <= max;\n\treturn static_cast<unsigned int>(c - min) <= static_cast<unsigned int>(max - min);\n}\n\n} // local\n\n/*\n\tget one character from UTF-8 string and seek begin to next char\n\t@note begin != end\n\t@note begin is not determined if false\n*/\ntemplate<class Iterator>\nbool GetCharFromUtf8(Char *c, Iterator& begin, const Iterator& end)\n{\n\tunsigned char c0 = *begin++;\n\tif (c0 <= 0x7f) {\n\t\t*c = c0;\n\t\treturn true;\n\t}\n\tif (local::in(c0, 0xc2, 0xdf)) {\n\t\tif (begin != end) {\n\t\t\tunsigned char c1 = *begin++;\n\t\t\tif (local::in(c1, 0x80, 0xbf)) {\n\t\t\t\t*c = ((c0 << 6) | (c1 & 0x3f)) - 0x3000;\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t} else if (c0 <= 0xef) {\n\t\tif (begin != end) {\n\t\t\tunsigned char c1 = *begin++;\n\t\t\tif (begin != end) {\n\t\t\t\tunsigned char c2 = *begin++;\n\t\t\t\tif (local::in(c2, 0x80, 0xbf)) {\n\t\t\t\t\tif ((c0 == 0xe0 && local::in(c1, 0xa0, 0xbf))\n\t\t\t\t\t || (local::in(c0, 0xe1, 0xec) && local::in(c1, 0x80, 0xbf))\n\t\t\t\t\t || (c0 == 0xed && local::in(c1, 0x80, 0x9f))\n\t\t\t\t\t || (local::in(c0, 0xee, 0xef) && local::in(c1, 0x80, 0xbf))) {\n\t\t\t\t\t\t*c = ((c0 << 12) | ((c1 & 0x3f) << 6) | (c2 & 0x3f)) - 0xe0000;\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if (local::in(c0, 0xf0, 0xf4)) {\n\t\tif (begin != end) {\n\t\t\tunsigned char c1 = *begin++;\n\t\t\tif (begin != end) {\n\t\t\t\tunsigned char c2 = *begin++;\n\t\t\t\tif (begin != end) {\n\t\t\t\t\tunsigned char c3 = *begin++;\n\t\t\t\t\tif (local::in(c2, 0x80, 0xbf) && local::in(c3, 0x80, 0xbf)) {\n\t\t\t\t\t\tif ((c0 == 0xf0 && local::in(c1, 0x90, 0xbf))\n\t\t\t\t\t\t || (local::in(c0, 0xf1, 0xf3) && local::in(c1, 0x80, 0xbf))\n\t\t\t\t\t\t || (c0 == 0xf4 && local::in(c1, 0x80, 0x8f))) {\n\t\t\t\t\t\t\t*c = ((c0 << 18) | ((c1 & 0x3f) << 12) | ((c2 & 0x3f) << 6) | (c3 & 0x3f)) - 0x3c00000;\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false;\n}\n\n/*\n\tget one character from UTF-16 string and seek begin to next char\n\t@note begin != end\n\t@note begin is not determined if false\n*/\ntemplate<class Iterator>\nbool GetCharFromUtf16(Char& c, Iterator& begin, const Iterator& end)\n{\n\tstruct local {\n\t\tstatic inline bool isLead(Char c) { return (c & 0xfffffc00) == 0xd800; }\n\t\tstatic inline bool isTrail(Char c) { return (c & 0xfffffc00) == 0xdc00; }\n\t};\n\tChar16 c0 = *begin++;\n\tif (!local::isLead(c0)) {\n\t\tc = c0;\n\t\treturn true;\n\t}\n\tif (begin != end) {\n\t\tChar16 c1 = *begin++;\n\t\tif (local::isTrail(c1)) {\n\t\t\tconst Char offset = (0xd800 << 10UL) + 0xdc00 - 0x10000;\n\t\t\tc = (c0 << 10) + c1 - offset;\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\ninline bool AppendUtf8(std::string& out, Char c)\n{\n\tif (c <= 0x7f) {\n\t\tout += static_cast<char>(c);\n\t\treturn true;\n\t} else if (c <= 0x7ff) {\n\t\tchar buf[2];\n\t\tbuf[0] = static_cast<char>((c >> 6) | 0xc0);\n\t\tbuf[1] = static_cast<char>((c & 0x3f) | 0x80);\n\t\tout.append(buf, 2);\n\t\treturn true;\n\t} else if (c <= 0xffff) {\n\t\tif (0xd7ff < c && c <= 0xdfff) {\n\t\t\treturn false;\n\t\t}\n\t\tchar buf[3];\n\t\tbuf[0] = static_cast<char>((c >> 12) | 0xe0);\n\t\tbuf[1] = static_cast<char>(((c >> 6) & 0x3f) | 0x80);\n\t\tbuf[2] = static_cast<char>((c & 0x3f) | 0x80);\n\t\tout.append(buf, 3);\n\t\treturn true;\n\t} else if (c <= 0x10ffff) {\n\t\tchar buf[4];\n\t\tbuf[0] = static_cast<char>((c >> 18) | 0xf0);\n\t\tbuf[1] = static_cast<char>(((c >> 12) & 0x3f) | 0x80);\n\t\tbuf[2] = static_cast<char>(((c >> 6) & 0x3f) | 0x80);\n\t\tbuf[3] = static_cast<char>((c & 0x3f) | 0x80);\n\t\tout.append(buf, 4);\n\t\treturn true;\n\t}\n\treturn false;\n}\n\ninline bool AppendUtf16(String16 *out, Char c)\n{\n\tif (c <= 0xffff) {\n\t\t*out += static_cast<Char16>(c);\n\t\treturn true;\n\t} else if (c <= 0x0010ffff) {\n\t\tChar16 buf[2];\n\t\tbuf[0] = static_cast<Char16>((c >> 10) + 0xd7c0);\n\t\tbuf[1] = static_cast<Char16>((c & 0x3ff) | 0xdc00);\n\t\tout->append(buf, 2);\n\t\treturn true;\n\t}\n\treturn false;\n}\n\n} // string\n\n/**\n\t@brief template class for cybozu::String\n*/\ntemplate<class CharT, class Traits = std::char_traits<CharT>, class Alloc = std::allocator<CharT> >\nclass StringT {\npublic:\n\t//@{ standard typedef\n\ttypedef std::basic_string<CharT, Traits, Alloc> BasicString;\n\ttypedef CharT value_type;\n\ttypedef size_t size_type;\n\ttypedef ptrdiff_t difference_type;\n\ttypedef CharT& reference;\n\ttypedef const CharT& const_reference;\n\ttypedef CharT* pointer;\n\ttypedef const CharT* const_pointer;\n\ttypedef typename BasicString::iterator iterator;\n\ttypedef typename BasicString::const_iterator const_iterator;\n\ttypedef std::reverse_iterator<iterator> reverse_iterator;\n\ttypedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n\t//@}\n\tstatic const size_t npos = size_t(-1); //!< standard npos\n\n\t/**\n\t\tdump unicode of string for debug\n\t\t@param msg [local::in] user message\n\t*/\n\tvoid dump(const char *msg = 0) const\n\t{\n\t\tif (msg) printf(\"%s\", msg);\n\t\tfor (size_t i = 0; i < size(); i++) {\n\t\t\tprintf(\"%08x \", str_[i]);\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n\n\t/**\n\t\tconstruct empty string\n\t*/\n\tStringT() { }\n\n\t/**\n\t\tconstruct from str [off, off + count)\n\t\t@param str [local::in] original string\n\t\t@param off [local::in] offset\n\t\t@param count [local::in] count of character(default npos)\n\t*/\n\tStringT(const StringT& str, size_type off, size_type count = npos)\n\t\t: str_(str.str_, off, count)\n\t{ }\n\n\t/**\n\t\tconstruct from [str, str + count)\n\t\t@param str [local::in] original string\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT(const CharT *str, size_type count)\n\t\t: str_(str, count)\n\t{\n\t}\n\n\t/**\n\t\tconstruct from [str, NUL)\n\t\t@param str [local::in] original string\n\t*/\n\tStringT(const CharT *str)\n\t\t: str_(str)\n\t{\n\t}\n\n\t/**\n\t\tconstruct from count * c\n\t\t@param count [local::in] count of character\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT(size_type count, CharT c)\n\t\t: str_(count, c)\n\t{\n\t}\n\n\t/**\n\t\tconstruct from [begin, end)\n\t\t@param begin [local::in] begin of iterator\n\t\t@param end [local::in] end of iterator\n\t*/\n\ttemplate<class Iterator>\n\tStringT(Iterator begin, Iterator end)\n\t{\n\t\tappend(begin, end);\n\t}\n\n\t// construct from [begin, end), const pointers\n// StringT(const_pointer begin, const_pointer end);\n\t// construct from [begin, end), const_iterators\n// StringT(const_iterator begin, const_iterator end);\n\n\t/**\n\t\tconstruct by copying str\n\t\t@param str [local::in] original string\n\t*/\n\tStringT(const StringT& str)\n\t\t: str_(str.str_)\n\t{\n\t}\n\n\t/**\n\t\tconstruct by [str, str + count)\n\t\t@param str [local::in] original string\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT(const char *str, size_type count) // A\n\t{\n\t\tappend(str, count);\n\t}\n\n\t/**\n\t\tconstruct from [str, NUL)\n\t\t@param str [local::in] original string\n\t*/\n\tStringT(const char *str) // A\n\t{\n\t\tappend(str);\n\t}\n\t/**\n\t\tconstruct by copying str\n\t\t@param str [local::in] original string\n\t*/\n\tStringT(const std::string& str) // A\n\t{\n\t\tappend(str);\n\t}\n\n\t/**\n\t\tconstrut by Char16(same ICU::UChar)\n\t\t@param str [local::in] UTF-16 format string\n\t*/\n\tStringT(const String16& str) // A\n\t{\n\t\tString16::const_iterator begin = str.begin(), end = str.end();\n\t\twhile (begin != end) {\n\t\t\tChar c;\n\t\t\tif (!string::GetCharFromUtf16(c, begin, end)) {\n\t\t\t\tcybozu::StringException e;\n\t\t\t\te << \"cstr UTF-16\";\n\t\t\t\tthrow e;\n\t\t\t}\n\t\t\tstr_ += c;\n\t\t}\n\t}\n\t/**\n\t\tconstruct by BasicString\n\t\t@param str [local::in] UTF-32 string\n\t*/\n\tStringT(const BasicString& str) // A\n\t\t: str_(str)\n\t{\n\t}\n\n\t/**\n\t\tassign str\n\t\t@param str [local::in] assign string\n\t*/\n\tStringT& operator=(const StringT& str)\n\t{\n\t\treturn assign(str);\n\t}\n\n\t/**\n\t\tassign [str, NUL)\n\t\t@param str [local::in] assign string\n\t*/\n\tStringT& operator=(const CharT *str)\n\t{\n\t\treturn assign(str);\n\t}\n\n\t/**\n\t\tassign 1 * c\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT& operator=(CharT c)\n\t{\n\t\treturn assign(1, c);\n\t}\n\n\t/**\n\t\tassign [str, NUL)\n\t\t@param str [local::in] assign string\n\t*/\n\tStringT& operator=(const char *str) // A\n\t{\n\t\treturn assign(str);\n\t}\n\t/**\n\t\tassign str\n\t\t@param str [local::in] assign string\n\t*/\n\tStringT& operator=(const std::string& str) // A\n\t{\n\t\treturn assign(str);\n\t}\n\n\t/**\n\t\tappend str\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& operator+=(const StringT& str)\n\t{\n\t\treturn append(str);\n\t}\n\n\t/**\n\t\tappend [str, NUL)\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& operator+=(const CharT *str)\n\t{\n\t\treturn append(str);\n\t}\n\n\t/**\n\t\tappend 1 * c\n\t\t@param c [local::in] append character\n\t*/\n\tStringT& operator+=(CharT c)\n\t{\n\t\treturn append(1, c);\n\t}\n\n\t/**\n\t\tappend str\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& append(const StringT& str)\n\t{\n\t\tstr_.append(str.str_); return *this;\n\t}\n\n\t/**\n\t\tappend str [off, off + count)\n\t\t@param str [local::in] append string\n\t\t@param off [local::in] string offset\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT& append(const StringT& str, size_type off, size_type count)\n\t{\n\t\tstr_.append(str.str_, off, count); return *this;\n\t}\n\n\t/**\n\t\tappend [str, str + count)\n\t\t@param str [local::in] append string\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT& append(const CharT *str, size_type count)\n\t{\n\t\treturn append(str, str + count);\n\t}\n\n\t/**\n\t\tappend [str, NUL)\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& append(const CharT *str)\n\t{\n\t\tstr_.append(str); return *this;\n\t}\n\n\t/**\n\t\tappend count * c\n\t\t@param count [local::in] count of character\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT& append(size_type count, CharT c)\n\t{\n\t\tstr_.append(count, c); return *this;\n\t}\n\n\t/**\n\t\tappend [begin, end)\n\t\t@param begin [local::in] begin of iterator\n\t\t@param end [local::in] end of iterator\n\t*/\n\ttemplate<class Iterator>\n\tStringT& append(Iterator begin, Iterator end)\n\t{\n\t\twhile (begin != end) {\n\t\t\tCharT c;\n\t\t\tc = getOneChar(begin, end);\n\t\t\tstr_.push_back(c);\n\t\t}\n\t\treturn *this;\n\t}\n\n\t// append [begin, end), const pointers\n// StringT& append(const_pointer begin, const_pointer end);\n\t// append [begin, end), const_iterators\n// StringT& append(const_iterator begin, const_iterator end);\n\n\t/**\n\t\tappend [str, str + count)\n\t\t@param str [local::in] append string\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT& append(const char *str, size_type count) // A\n\t{\n\t\treturn append(str, str + count);\n\t}\n\n\t/**\n\t\tappend [str, NUL)\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& append(const char *str) // A\n\t{\n\t\treturn append(str, std::strlen(str));\n\t}\n\t/**\n\t\tappend str\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& append(const std::string& str) // A\n\t{\n\t\treturn append(str.begin(), str.end());\n\t}\n\n\t/**\n\t\tassign str\n\t\t@param str [local::in] assign str\n\t*/\n\tStringT& assign(const StringT& str)\n\t{\n\t\tclear(); return append(str);\n\t}\n\n\t/**\n\t\tassign str [off, off + count)\n\t\t@param str [local::in] assign string\n\t\t@param off [local::in] offset\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT& assign(const StringT& str, size_type off, size_type count)\n\t{\n\t\tclear(); return append(str, off, count);\n\t}\n\n\t/**\n\t\tassign [str, str + count)\n\t\t@param str [local::in] assign string\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT& assign(const CharT *str, size_type count)\n\t{\n\t\treturn assign(str, str + count);\n\t}\n\n\t/**\n\t\tassign [str, NUL)\n\t\t@param str [local::in] assign string\n\t*/\n\tStringT& assign(const CharT *str)\n\t{\n\t\tclear(); return append(str);\n\t}\n\n\t/**\n\t\tassign count * c\n\t\t@param count [local::in] count of character\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT& assign(size_type count, CharT c)\n\t{\n\t\tclear(); return append(count, c);\n\t}\n\n\t/**\n\t\tassign [First, end)\n\t\t@param begin [local::in] begin of iterator\n\t\t@param end [local::in] end of iterator\n\t*/\n\ttemplate<class Iterator>\n\tStringT& assign(Iterator begin, Iterator end)\n\t{\n\t\tclear(); return append(begin, end);\n\t}\n\n\t// assign [First, end), const pointers\n// StringT& assign(const_pointer begin, const_pointer end);\n\n\t// assign [First, end), const_iterators\n// StringT& assign(const_iterator begin, const_iterator end);\n\n\t/**\n\t\tassign [str, str + count)\n\t\t@param str [local::in] original string\n\t\t@param count [local::in] count of character\n\t*/\n\tStringT& assign(const char *str, size_type count) // A\n\t{\n\t\treturn assign(str, str + count);\n\t}\n\n\t/**\n\t\tassign [str, NUL)\n\t\t@param str [local::in] original string\n\t*/\n\tStringT& assign(const char *str) // A\n\t{\n\t\tclear(); return append(str);\n\t}\n\t/**\n\t\tassign str\n\t\t@param str [local::in] original string\n\t*/\n\tStringT& assign(const std::string& str) // A\n\t{\n\t\tclear(); return append(str);\n\t}\n\n\t/**\n\t\tinsert str at off\n\t\t@param off [local::in] offset\n\t\t@param str [local::in] insert str\n\t*/\n\tStringT& insert(size_type off, const StringT& str)\n\t{\n\t\tstr_.insert(off, str.str_); return *this;\n\t}\n\n\t/**\n\t\tinsert str [off, off + count) at off\n\t\t@param off [local::in] offset of destination\n\t\t@param rhs [local::in] source str\n\t\t@param rhsOff [local::in] offset of source str\n\t\t@param count [local::in] count of source str\n\t*/\n\tStringT& insert(size_type off, const StringT& rhs, size_type rhsOff, size_type count)\n\t{\n\t\tstr_.insert(off, rhs.str_, rhsOff, count); return *this;\n\t}\n\n\t/**\n\t\tinsert [str, str + count) at off\n\t\t@param off [local::in] offset of destination\n\t\t@param str [local::in] source str\n\t\t@param count [local::in] count of source str\n\t*/\n\tStringT& insert(size_type off, const CharT *str, size_type count)\n\t{\n\t\tstr_.insert(off, str, count); return *this;\n\t}\n\n\t/**\n\t\tinsert [str, NUL) at off\n\t\t@param off [local::in] offset of destination\n\t\t@param str [local::in] source str\n\t*/\n\tStringT& insert(size_type off, const CharT *str)\n\t{\n\t\tstr_.insert(off, str); return *this;\n\t}\n\n\t/**\n\t\tinsert count * c at off\n\t\t@param off [local::in] offset of destination\n\t\t@param count [local::in] count of source str\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT& insert(size_type off, size_type count, CharT c)\n\t{\n\t\tstr_.insert(off, count, c); return *this;\n\t}\n\t/**\n\t\tinsert c at here\n\t\t@param here [local::in] offset of destination\n\t\t@param c [local::in] initial character(default 0)\n\t*/\n\titerator insert(iterator here, CharT c = 0)\n\t{\n\t\treturn str_.insert(here, c);\n\t}\n\n\t/**\n\t\tinsert count * CharT at here\n\t\t@param here [local::in] offset of destination\n\t\t@param count [local::in] count of str\n\t\t@param c [local::in] initial character\n\t*/\n\tvoid insert(iterator here, size_type count, CharT c)\n\t{\n\t\tstr_.insert(here, count, c);\n\t}\n\n\t/**\n\t\tinsert [begin, end) at here\n\t\t@param here [local::in] offset of destination\n\t\t@param begin [local::in] begin of iterator\n\t\t@param end [local::in] end of iterator\n\t*/\n\ttemplate<class Iterator>\n\tvoid insert(iterator here, Iterator begin, Iterator end)\n\t{\n\t\tStringT str(begin, end);\n\t\tstr_.insert(here, str.begin(), str.end());\n\t}\n\n\t// insert [begin, end) at here, const pointers\n// void insert(iterator here, const_pointer begin, const_pointer end);\n\t// insert [begin, end) at here, const_iterators\n// void insert(iterator here, const_iterator begin, const_iterator end);\n\n\t/**\n\t\terase elements [off, off + count)\n\t\t@param off [local::in] offset\n\t\t@param count [local::in] count of character(default npos)\n\t*/\n\tStringT& erase(size_type off = 0, size_type count = npos)\n\t{\n\t\tstr_.erase(off, count); return *this;\n\t}\n\n\t/**\n\t\terase element at here\n\t\t@param here [local::in] erase from here\n\t*/\n\titerator erase(iterator here)\n\t{\n\t\treturn str_.erase(here);\n\t}\n\n\t/**\n\t\terase substring [begin, end)\n\t\t@param begin [local::in] begin of iterator\n\t\t@param end [local::in] end of iterator\n\t*/\n\titerator erase(iterator begin, iterator end)\n\t{\n\t\treturn str_.erase(begin, end);\n\t}\n\n\t/**\n\t\terase all\n\t*/\n\tvoid clear() { str_.clear(); }\n\n\t/**\n\t\treplace [off, off + n) with rhs\n\t\t@param off [local::in] start offset\n\t\t@param n [local::in] count of remove character\n\t\t@param rhs [local::in] append string\n\t*/\n\tStringT& replace(size_type off, size_type n, const StringT& rhs)\n\t{\n\t\tstr_.replace(off, n, rhs.str_); return *this;\n\t}\n\n\t/**\n\t\treplace [off, off + n) with rhs [rhsOff, rhsOff + count)\n\t\t@param off [local::in] start offset\n\t\t@param n [local::in] count of remove character\n\t\t@param rhs [local::in] append string\n\t\t@param rhsOff [local::in] append from\n\t\t@param count [local::in] count of append\n\t*/\n\tStringT& replace(size_type off, size_type n, const StringT& rhs, size_type rhsOff, size_type count)\n\t{\n\t\tstr_.replace(off, n, rhs.str_, rhsOff, count); return *this;\n\t}\n\n\t/**\n\t\treplace [off, off + n) with [str, str + count)\n\t\t@param off [local::in] start offset\n\t\t@param n [local::in] count of remove character\n\t\t@param str [local::in] append string\n\t\t@param count [local::in] count of append\n\t*/\n\tStringT& replace(size_type off, size_type n, const CharT *str, size_type count)\n\t{\n\t\tstr_.replace(off, n, str, count); return *this;\n\t}\n\n\t/**\n\t\treplace [off, off + n) with [str, NUL)\n\t\t@param off [local::in] start offset\n\t\t@param n [local::in] count of remove character\n\t\t@param str [local::in] append string\n\t*/\n\tStringT& replace(size_type off, size_type n, const CharT *str)\n\t{\n\t\tstr_.replace(off, n, str); return *this;\n\t}\n\n\t/**\n\t\treplace [off, off + n) with count * c\n\t\t@param off [local::in] start offset\n\t\t@param n [local::in] count of remove character\n\t\t@param count [local::in] count of append\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT& replace(size_type off, size_type n, size_type count, CharT c)\n\t{\n\t\tstr_.replace(off, n, count, c); return *this;\n\t}\n\n\t/**\n\t\treplace [begin, end) with rhs\n\t\t@param begin [local::in] begin to remove\n\t\t@param end [local::in] end to remove\n\t\t@param rhs [local::in] append str\n\t*/\n\tStringT& replace(iterator begin, iterator end, const StringT& rhs)\n\t{\n\t\tstr_.replace(begin, end, rhs.str_); return *this;\n\t}\n\n\t/**\n\t\treplace [begin, end) with [str, str + count)\n\t\t@param begin [local::in] begin to remove\n\t\t@param end [local::in] end to remove\n\t\t@param str local::in] append str\n\t\t@param count [local::in] count of append\n\t*/\n\tStringT& replace(iterator begin, iterator end, const CharT *str, size_type count)\n\t{\n\t\tstr_.replace(begin, end, str, count); return *this;\n\t}\n\n\t/**\n\t\treplace [begin, end) with [str, NUL)\n\t\t@param begin [local::in] begin to remove\n\t\t@param end [local::in] end to remove\n\t\t@param str local::in] append str\n\t*/\n\tStringT& replace(iterator begin, iterator end, const CharT *str)\n\t{\n\t\tstr_.replace(begin, end, str); return *this;\n\t}\n\n\t/**\n\t\treplace [begin, end) with count * c\n\t\t@param begin [local::in] begin to remove\n\t\t@param end [local::in] end to remove\n\t\t@param count [local::in] count of append\n\t\t@param c [local::in] initial character\n\t*/\n\tStringT& replace(iterator begin, iterator end, size_type count, CharT c)\n\t{\n\t\tstr_.replace(begin, end, count, c); return *this;\n\t}\n\n\t/**\n\t\treplace [begin, end) with [begin2, end2)\n\t\t@param begin [local::in] begin to remove\n\t\t@param end [local::in] end to remove\n\t\t@param begin2 [local::in] begin to append\n\t\t@param end2 [local::in] end to append\n\t*/\n\ttemplate<class Iterator>\n\tStringT& replace(iterator begin, iterator end, Iterator begin2, Iterator end2)\n\t{\n\t\tStringT str(begin2, end2);\n\t\tstr_.replace(begin, end, str.begin(), str.end());\n\t\treturn *this;\n\t}\n\n\t// replace [begin, end) with [begin2, end2), const pointers\n// StringT& replace(iterator begin, iterator end, const_pointer begin2, const_pointer end2);\n\n\t// replace [begin, end) with [begin2, end2), const_iterators\n// StringT& replace(iterator begin, iterator end, const_iterator begin2, const_iterator end2);\n\n\t/**\n\t\treturn iterator for beginning of mutable sequence\n\t*/\n\titerator begin() { return str_.begin(); }\n\n\t/**\n\t\treturn iterator for beginning of nonmutable sequence\n\t*/\n\tconst_iterator begin() const { return str_.begin(); }\n\n\t/**\n\t\treturn iterator for end of mutable sequence\n\t*/\n\titerator end() { return str_.end(); }\n\n\t/**\n\t\treturn iterator for end of nonmutable sequence\n\t*/\n\tconst_iterator end() const { return str_.end(); }\n\n\t/**\n\t\treturn iterator for beginning of reversed mutable sequence\n\t*/\n\treverse_iterator rbegin() { return str_.rbegin(); }\n\n\t/**\n\t\treturn iterator for beginning of reversed nonmutable sequence\n\t*/\n\tconst_reverse_iterator rbegin() const { return str_.rbegin(); }\n\n\t/**\n\t\treturn iterator for end of reversed mutable sequence\n\t*/\n\treverse_iterator rend() { return str_.rend(); }\n\n\t/**\n\t\treturn iterator for end of reversed nonmutable sequence\n\t*/\n\tconst_reverse_iterator rend() const { return str_.rend(); }\n\n\t/**\n\t\tsubscript mutable sequence with checking\n\t\t@param off [local::in] offset\n\t*/\n\treference at(size_type off) { return str_.at(off); }\n\n\t/**\n\t\tget element at off\n\t\t@param off [local::in] offset\n\t*/\n\tconst_reference at(size_type off) const { return str_.at(off); }\n\n\t/**\n\t\tsubscript mutable sequence\n\t\t@param off [local::in] offset\n\t*/\n\treference operator[](size_type off) { return str_[off]; }\n\n\t/**\n\t\tsubscript nonmutable sequence\n\t\t@param off [local::in] offset\n\t*/\n\tconst_reference operator[](size_type off) const { return str_[off]; }\n\n\t/**\n\t\tinsert element at end\n\t\t@param c [local::in] append character\n\t*/\n\tvoid push_back(CharT c)\n\t{\n\t\tstr_.push_back(c);\n\t}\n\n\t/**\n\t\treturn pointer to null-terminated nonmutable array\n\t*/\n\tconst CharT *c_str() const { return str_.c_str(); }\n\n\t/**\n\t\treturn pointer to nonmutable array\n\t*/\n\tconst CharT *data() const { return str_.data(); }\n\n\t/**\n\t\treturn length of sequence\n\t*/\n\tsize_type length() const { return str_.length(); }\n\n\t/**\n\t\treturn length of sequence\n\t*/\n\tsize_type size() const { return str_.size(); }\n\n\t/**\n\t\treturn maximum possible length of sequence\n\t*/\n\tsize_type max_size() const { return str_.max_size(); }\n\n\t/**\n\t\tdetermine new length, padding with null elements as needed\n\t*/\n\tvoid resize(size_type newSize) { str_.resize(newSize); }\n\n\t/**\n\t\tdetermine new length, padding with c elements as needed\n\t\t@param newSize [local::in] new length\n\t\t@param c [local::in] initial character\n\t*/\n\tvoid resize(size_type newSize, CharT c)\n\t{\n\t\tstr_.resize(newSize, c);\n\t}\n\n\t/**\n\t\treturn current length of allocated storage\n\t*/\n\tsize_type capacity() const { return str_.capacity(); }\n\n\t/**\n\t\tdetermine new minimum length of allocated storage\n\t\t@param newSize [local::in] reserve size\n\t*/\n\tvoid reserve(size_type newSize = 0) { str_.reserve(newSize); }\n\n\t/**\n\t\ttest if sequence is empty\n\t\t@return true if empty\n\t*/\n\tbool empty() const { return str_.empty(); }\n\n\t/**\n\t\tcopy [off, off + count) to [dest, dest + count)\n\t\t@param dest [local::in] destination\n\t\t@param count [local::in] count of copy\n\t\t@param off [local::in] copy from here\n\t*/\n\tsize_type copy(CharT *dest, size_type count, size_type off = 0) const\n\t{\n#if defined(_MSC_VER) && (_MSC_VER < 1600)\n\t\treturn str_._Copy_s(dest, count, count, off);\n#else\n\t\treturn str_.copy(dest, count, off);\n#endif\n\t}\n\n\t/**\n\t\texchange contents with rhs\n\t\t@param rhs [local::in] swap string\n\t*/\n\tvoid swap(StringT& rhs) { str_.swap(rhs.str_); }\n\n\t/**\n\t\tlook for rhs beginnng at or after off\n\t\t@param rhs [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@return position\n\t*/\n\tsize_type find(const StringT& rhs, size_type off = 0) const\n\t{\n\t\treturn str_.find(rhs.str_, off);\n\t}\n\n\t/**\n\t\tlook for [str, str + count) beginnng at or after off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@param count [local::in] count of str\n\t*/\n\tsize_type find(const CharT *str, size_type off, size_type count) const\n\t{\n\t\treturn str_.find(str, off, count);\n\t}\n\n\t/**\n\t\tlook for [str, NUL) beginnng at or after off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find(const CharT *str, size_type off = 0) const\n\t{\n\t\treturn str_.find(str, off);\n\t}\n\n\t/**\n\t\tlook for c at or after off\n\t\t@param c [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find(CharT c, size_type off = 0) const\n\t{\n\t\treturn str_.find(c, off);\n\t}\n\n\t/**\n\t\tlook for rhs beginning before off\n\t\t@param rhs [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type rfind(const StringT& rhs, size_type off = npos) const\n\t{\n\t\treturn str_.rfind(rhs.str_, off);\n\t}\n\n\t/**\n\t\tlook for [str, str + count) beginning before off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@param count [local::in] count of character\n\t*/\n\tsize_type rfind(const CharT *str, size_type off, size_type count) const\n\t{\n\t\treturn str_.rfind(str, off, count);\n\t}\n\n\t/**\n\t\tlook for [str, NUL) beginning before off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type rfind(const CharT *str, size_type off = npos) const\n\t{\n\t\treturn str_.rfind(str, off);\n\t}\n\n\t/**\n\t\tlook for c before off\n\t\t@param c [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type rfind(CharT c, size_type off = npos) const\n\t{\n\t\treturn str_.rfind(c, off);\n\t}\n\n\t/**\n\t\tlook for one of rhs at or after off\n\t\t@param rhs [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_first_of(const StringT& rhs, size_type off = 0) const\n\t{\n\t\treturn str_.find_first_of(rhs.str_, off);\n\t}\n\n\t/**\n\t\tlook for one of [str, str + count) at or after off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@param count [local::in] count of character\n\t*/\n\tsize_type find_first_of(const CharT *str, size_type off, size_type count) const\n\t{\n\t\treturn str_.find_first_of(str, off, count);\n\t}\n\n\t/**\n\t\tlook for one of [str, NUL) at or after off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_first_of(const CharT *str, size_type off = 0) const\n\t{\n\t\treturn str_.find_first_of(str, off);\n\t}\n\n\t/**\n\t\tlook for c at or after off\n\t\t@param c [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_first_of(CharT c, size_type off = 0) const\n\t{\n\t\treturn str_.find_first_of(c, off);\n\t}\n\n\t/**\n\t\tlook for one of rhs before off\n\t\t@param rhs [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_last_of(const StringT& rhs, size_type off = npos) const\n\t{\n\t\treturn str_.find_last_of(rhs.str_, off);\n\t}\n\n\t/**\n\t\tlook for one of [str, str + count) before off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@param count [local::in] count of character\n\t*/\n\tsize_type find_last_of(const CharT *str, size_type off, size_type count) const\n\t{\n\t\treturn str_.find_last_of(str, off, count);\n\t}\n\n\t/**\n\t\tlook for one of [str, NUL) before off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_last_of(const CharT *str, size_type off = npos) const\n\t{\n\t\treturn str_.find_last_of(str, off);\n\t}\n\n\t/**\n\t\tlook for c before off\n\t\t@param c [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_last_of(CharT c, size_type off = npos) const\n\t{\n\t\treturn str_.find_last_of(c, off);\n\t}\n\n\t/**\n\t\tlook for none of rhs at or after off\n\t\t@param rhs [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_first_not_of(const StringT& rhs, size_type off = 0) const\n\t{\n\t\treturn str_.find_first_not_of(rhs.str_, off);\n\t}\n\n\t/**\n\t\tlook for none of [str, str + count) at or after off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@param count [local::in] count of character\n\t*/\n\tsize_type find_first_not_of(const CharT *str, size_type off, size_type count) const\n\t{\n\t\treturn str_.find_first_not_of(str, off, count);\n\t}\n\n\t/**\n\t\tlook for one of [str, NUL) at or after off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_first_not_of(const CharT *str, size_type off = 0) const\n\t{\n\t\treturn str_.find_first_not_of(str, off);\n\t}\n\n\t/**\n\t\tlook for non c at or after off\n\t\t@param c [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_first_not_of(CharT c, size_type off = 0) const\n\t{\n\t\treturn str_.find_first_not_of(c, off);\n\t}\n\n\t/**\n\t\tlook for none of rhs before off\n\t\t@param rhs [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_last_not_of(const StringT& rhs, size_type off = npos) const\n\t{\n\t\treturn str_.find_last_not_of(rhs.str_, off);\n\t}\n\n\t/**\n\t\tlook for none of [str, str + count) before off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t\t@param count [local::in] count of character\n\t*/\n\tsize_type find_last_not_of(const CharT *str, size_type off, size_type count) const\n\t{\n\t\treturn str_.find_last_not_of(str, off, count);\n\t}\n\n\t/**\n\t\tlook for none of [str, NUL) before off\n\t\t@param str [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_last_not_of(const CharT *str, size_type off = npos) const\n\t{\n\t\treturn str_.find_last_not_of(str, off);\n\t}\n\n\t/**\n\t\tlook for non c before off\n\t\t@param c [local::in] target\n\t\t@param off [local::in] search from here\n\t*/\n\tsize_type find_last_not_of(CharT c, size_type off = npos) const\n\t{\n\t\treturn str_.find_last_not_of(c, off);\n\t}\n\t/**\n\t\treturn [off, off + count) as new string\n\t\t@param off [local::in] from here\n\t\t@param count [local::in] count of substring\n\t*/\n\tStringT substr(size_type off = 0, size_type count = npos) const\n\t{\n\t\treturn str_.substr(off, count);\n\t}\n\t/**\n\t\tcompare *this with rhs\n\t\t@param rhs [local::in] target\n\t*/\n\tint compare(const StringT& rhs) const\n\t{\n\t\treturn str_.compare(rhs.str_);\n\t}\n\n\t/**\n\t\tcompare [off, off + n) with rhs\n\t\t@param off [local::in] from here\n\t\t@param n [local::in] count of lhs\n\t\t@param rhs [local::in] target\n\t*/\n\tint compare(size_type off, size_type n, const StringT& rhs) const\n\t{\n\t\treturn str_.compare(off, n, rhs.str_);\n\t}\n\n\t/**\n\t\tcompare [off, off + n) with rhs [rhsOff, rhsOff + count)\n\t\t@param off [local::in] from here\n\t\t@param n [local::in] count of lhs\n\t\t@param rhs [local::in] target\n\t\t@param rhsOff [local::in] target from here\n\t\t@param count [local::in] count of rhs\n\t*/\n\tint compare(size_type off, size_type n, const StringT& rhs, size_type rhsOff, size_type count) const\n\t{\n\t\treturn str_.compare(off, n, rhs.str_, rhsOff, count);\n\t}\n\n\t/**\n\t\tcompare [0, _Mysize) with [str, NUL)\n\t\t@param str [local::in] target\n\t*/\n\tint compare(const CharT *str) const\n\t{\n\t\treturn str_.compare(str);\n\t}\n\n\t/**\n\t\tcompare [off, off + n) with [str, NUL)\n\t\t@param off [local::in] from here\n\t\t@param n [local::in] count of lhs\n\t\t@param str [local::in] target\n\t*/\n\tint compare(size_type off, size_type n, const CharT *str) const\n\t{\n\t\treturn str_.compare(off, n, str);\n\t}\n\n\t/**\n\t\tcompare [off, off + n) with [str, str + count)\n\t\t@param off [local::in] from here\n\t\t@param n [local::in] count of lhs\n\t\t@param str [local::in] target\n\t\t@param count [local::in] count of rhs\n\t*/\n\tint compare(size_type off,size_type n, const CharT *str, size_type count) const\n\t{\n\t\treturn str_.compare(off, n, str, count);\n\t}\n\t/**\n\t\tconvert to std::string with UTF-8\n\t*/\n\tvoid toUtf8(std::string& str) const\n\t{\n\t\tfor (size_t i = 0, n = str_.size(); i < n; i++) {\n\t\t\tif (!string::AppendUtf8(str, str_[i])) {\n\t\t\t\tcybozu::StringException e;\n\t\t\t\te << \"toUtf8\" << str;\n\t\t\t\tthrow e;\n\t\t\t}\n\t\t}\n\t}\n\tstd::string toUtf8() const\n\t{\n\t\tstd::string str;\n\t\ttoUtf8(str);\n\t\treturn str;\n\t}\n\t/**\n\t\tconvert to std::string with UTF-16LE\n\t*/\n\tvoid toUtf16(String16& str) const\n\t{\n\t\tfor (size_t i = 0, n = str_.size(); i < n; i++) {\n\t\t\tif (!string::AppendUtf16(&str, str_[i])) {\n\t\t\t\tcybozu::StringException e;\n\t\t\t\te << \"toUtf16\";\n\t\t\t\tthrow e;\n\t\t\t}\n\t\t}\n\t}\n\tString16 toUtf16() const\n\t{\n\t\tString16 str;\n\t\ttoUtf16(str);\n\t\treturn str;\n\t}\n\t/**\n\t\tis this valid unicode string?\n\t\t@return true correct string\n\t\t@return false bad string\n\t*/\n\tbool isValid() const\n\t{\n\t\tfor (size_t i = 0, n = str_.size(); i < n; i++) {\n\t\t\tif (!IsValidChar(str_[i])) return false;\n\t\t}\n\t\treturn true;\n\t}\n\t/**\n\t\tget internal const str(don't use this function)\n\t*/\n\tconst BasicString& get() const { return str_; }\n\t/**\n\t\tget internal str(don't use this function)\n\t*/\n\tBasicString& get() { return str_; }\nprivate:\n\ttemplate<class Iterator>\n\tCharT getOneChar(Iterator& begin, const Iterator& end)\n\t{\n\t\treturn getOneCharSub(begin, end, *begin);\n\t}\n\t// dispatch\n\ttemplate<class Iterator>\n\tCharT getOneCharSub(Iterator& begin, const Iterator&, CharT)\n\t{\n\t\treturn *begin++;\n\t}\n\ttemplate<class Iterator>\n\tCharT getOneCharSub(Iterator& begin, const Iterator& end, char)\n\t{\n\t\tCharT c;\n\t\tif (!cybozu::string::GetCharFromUtf8(&c, begin, end)) {\n\t\t\tcybozu::StringException e;\n\t\t\te << \"getOneCharSub\";\n\t\t\tthrow e;\n\t\t}\n\t\treturn c;\n\t}\n\tBasicString str_;\n};\n\nnamespace string {\ntypedef StringT<Char> String;\n}\n\nusing string::String;\n\n/**\n\tgetline from std::istream\n\t@param is [local::in] input stream\n\t@param str [out] one line string\n\t@param delim [local::in] delimiter\n\t@return is\n*/\ninline std::istream& getline(std::istream& is, cybozu::String& str, const char delim = '\\n')\n{\n\tstd::string tmp;\n\tstd::getline(is, tmp, delim);\n\tstr.assign(tmp);\n\treturn is;\n}\n\n/**\n\tinput stream operator as UTF-8\n\t@param is [local::in] input stream\n\t@param str [out] input string\n\t@return is\n*/\ninline std::istream& operator>>(std::istream& is, cybozu::String& str)\n{\n\tstd::string tmp;\n\tis >> tmp;\n\tstr.assign(tmp);\n\treturn is;\n}\n\n/**\n\toutput stream operator as UTF-8\n\t@param os [local::in] output stream\n\t@param str [local::in] output string\n\t@return os\n*/\ninline std::ostream& operator<<(std::ostream& os, const cybozu::String& str)\n{\n\treturn os << str.toUtf8();\n}\n\n/**\n\tconcatinate string(lhs + rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n\t@return concatinated string\n*/\ninline cybozu::String operator+(const cybozu::String& lhs, const cybozu::String& rhs) { return cybozu::String(lhs) += rhs; }\n/**\n\tconcatinate string(lhs + c)\n\t@param lhs [local::in] left string\n\t@param c [local::in] right character\n\t@return concatinated string\n*/\ninline cybozu::String operator+(const cybozu::String& lhs, const Char c) { return cybozu::String(lhs) += c; }\n/**\n\tconcatinate string(lhs + str[0, NUL))\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n\t@return concatinated string\n*/\ninline cybozu::String operator+(const cybozu::String& lhs, const Char* str) { return cybozu::String(lhs) += str; }\n\n/**\n\tcompare string(lhs == rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n*/\ninline bool operator==(const cybozu::String& lhs, const cybozu::String& rhs) { return lhs.compare(rhs) == 0; }\n\n/**\n\tcompare string(lhs != rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n*/\ninline bool operator!=(const cybozu::String& lhs, const cybozu::String& rhs) { return !(lhs == rhs); }\n/**\n\tcompare string(lhs < rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n*/\ninline bool operator<(const cybozu::String& lhs, const cybozu::String& rhs) { return lhs.compare(rhs) < 0; }\n/**\n\tcompare string(lhs > rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n*/\ninline bool operator>(const cybozu::String& lhs, const cybozu::String& rhs) { return lhs.compare(rhs) > 0; }\n/**\n\tcompare string(lhs <= rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n*/\ninline bool operator<=(const cybozu::String& lhs, const cybozu::String& rhs) { return lhs.compare(rhs) <= 0; }\n/**\n\tcompare string(lhs >= rhs)\n\t@param lhs [local::in] left string\n\t@param rhs [local::in] right string\n*/\ninline bool operator>=(const cybozu::String& lhs, const cybozu::String& rhs) { return lhs.compare(rhs) >= 0; }\n\ninline bool ConvertUtf16ToUtf8(std::string *out, const cybozu::Char16 *begin, const cybozu::Char16 *end)\n{\n\tout->clear();\n\tout->reserve((end - begin) * 3);\n\twhile (begin != end) {\n\t\tcybozu::Char c;\n\t\tif (!string::GetCharFromUtf16(c, begin, end)) return false;\n\t\tif (!string::AppendUtf8(*out, c)) return false;\n\t}\n\treturn true;\n}\ninline bool ConvertUtf16ToUtf8(std::string *out, const cybozu::String16& in)\n{\n\treturn ConvertUtf16ToUtf8(out, &in[0], &in[0] + in.size());\n}\n\ninline bool ConvertUtf8ToUtf16(cybozu::String16 *out, const char *begin, const char *end)\n{\n\tout->clear();\n\tout->reserve((end - begin) / 2);\n\twhile (begin != end) {\n\t\tcybozu::Char c;\n\t\tif (!string::GetCharFromUtf8(&c, begin, end)) return false;\n\t\tif (!string::AppendUtf16(out, c)) return false;\n\t}\n\treturn true;\n}\n\ninline bool ConvertUtf8ToUtf16(cybozu::String16 *out, const std::string& in)\n{\n\treturn ConvertUtf8ToUtf16(out, &in[0], &in[0] + in.size());\n}\n\n} // cybozu\n\n// specialization for boost::hash\nnamespace boost {\n\ntemplate<class T>\nstruct hash;\n\ntemplate<>\nstruct hash<cybozu::String> : public std::unary_function<cybozu::String, size_t> {\n\tsize_t operator()(const cybozu::String& str) const\n\t{\n\t\tsize_t seed = 0;\n\t\tfor (size_t i = 0, n = str.size(); i < n; i++) {\n\t\t\tseed ^= str[i] + 0x9e3779b9 + (seed << 6) + (seed >> 2); // copied from boost/functional/hash.hpp\n\t\t}\n\t\treturn seed;\n\t}\n};\n\n} // boost\n" }, { "alpha_fraction": 0.5090709328651428, "alphanum_fraction": 0.5217152237892151, "avg_line_length": 28.52458953857422, "blob_id": "357d45515433b3af530233f30ae35fb6f5a0870f", "content_id": "d99a623ad220bf4904448bc6db0a70ca3268ee63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 105, "num_lines": 61, "path": "/foursquare-sentiment-mining/miner/ldigwrapper.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 12.05.2014\n\n@author: schieberle\n'''\nimport numpy\n\nfrom lib.ldig import ldig\n\n\nclass Detector(object):\n def __init__(self, modeldir):\n self.ldig = ldig.ldig(modeldir)\n self.features = self.ldig.load_features()\n self.trie = self.ldig.load_da()\n self.labels = self.ldig.load_labels()\n self.param = numpy.load(self.ldig.param)\n\n def detect(self, st):\n label, text, org_text = ldig.normalize_text(st)\n events = self.trie.extract_features(u\"\\u0001\" + text + u\"\\u0001\")\n sum = numpy.zeros(len(self.labels))\n\n data = []\n for id in sorted(events, key=lambda id:self.features[id][0]):\n phi = self.param[id, ]\n sum += phi * events[id]\n data.append({\"id\":int(id), \"feature\":self.features[id][0], \"phi\":[\"%0.3f\" % x for x in phi]})\n exp_w = numpy.exp(sum - sum.max())\n prob = exp_w / exp_w.sum()\n return {\"labels\":self.labels, \"data\":data, \"prob\":[\"%0.3f\" % x for x in prob]} \n\n\nclass lang_detector(Detector):\n \"\"\"Takes a String and returns the label of the language. Possible labels are:\n \n - cs Czech\n - da Dannish\n - de German\n - en English\n - es Spanish\n - fi Finnish\n - fr French\n - id Indonesian\n - it Italian\n - nl Dutch\n - no Norwegian\n - pl Polish\n - pt Portuguese\n - ro Romanian\n - sv Swedish\n - tr Turkish\n - vi Vietnamese\"\"\"\n \n def __init__(self, modeldir):\n Detector.__init__(self, modeldir)\n \n def detect(self, st):\n r = Detector.detect(self, st)\n return r.get('labels')[r.get('prob').index(max(r.get('prob')))]\n \n \n" }, { "alpha_fraction": 0.4404374063014984, "alphanum_fraction": 0.45183807611465454, "avg_line_length": 52.82389831542969, "blob_id": "c80e8938ee85c62a8cd776a2c75f95d7d7dab3ea", "content_id": "613c68e25d178d80cd2207f84130f9367f60c142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8596, "license_type": "no_license", "max_line_length": 160, "num_lines": 159, "path": "/foursquare-sentiment-mining/miner/evaluation.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 21.05.2014\n\n@author: Andreas\n'''\nimport codecs\nimport csv\nfrom miner.analysis import analysis\nfrom miner.sentiwords import sentiwords\n\n\nclass evaluation(object):\n def __init__(self, tipsData, sentimentData):\n \n if not (isinstance(tipsData, analysis) and isinstance(sentimentData, sentiwords)):\n raise TypeError(\"An object of class analysis and sentiwords must be submitted\")\n \n self.tips = tipsData.tips\n self.sentiwords = sentimentData\n self.data = {}\n self.precision = {'avg': 0, 'max': 0}\n \n def evaluate_en(self, evalFile=None, exportFile=None, deviation=0):\n self.deviation=deviation\n tags = {\"J\": \"a\", \"N\": \"n\", \"R\": \"r\", \"M\": \"v\", \"V\":\"v\"}\n result = {}\n for tip in self.tips:\n if tip.lang['lang'] == \"en\":\n result.update({tip.id: {'text':tip.text, 'avg_score':0, 'max_score':0, 'sentiTags':[]}})\n for token in tip.pos:\n if tags.get(token[1][0], False):\n term = self.sentiwords.lexicon.get((tags.get(token[1][0]), token[2]))\n if term is not None:\n result.get(tip.id).update({'avg_score': result.get(tip.id).get('avg_score') + term.get('avg_pos_score') - term.get('avg_neg_score'),\n 'max_score': result.get(tip.id).get('max_score') + term.get('max_pos_score') - term.get('max_neg_score'),\n 'sentiTags': result.get(tip.id).get('sentiTags') + [term.get('term'),\n term.get('avg_pos_score') - term.get('avg_neg_score'),\n term.get('max_pos_score') - term.get('max_neg_score')]})\n self.data = result\n \"\"\" Evaluate manually tagged data \"\"\"\n if evalFile is not None: \n self.loadEvaluationData(evalFile)\n for item in result.iteritems():\n if item[1].get('avg_score') < -deviation and item[1].get('polarity').strip() == 'neg':\n item[1].update(avg_eval = True)\n elif item[1].get('avg_score') > deviation and item[1].get('polarity').strip() == 'pos':\n item[1].update(avg_eval = True)\n elif item[1].get('avg_score') <= deviation and item[1].get('avg_score') >= -deviation and item[1].get('polarity') is 'neu':\n item[1].update(avg_eval = True)\n else:\n item[1].update(avg_eval = False)\n \n if item[1].get('max_score') < -deviation and item[1].get('polarity').strip() == 'neg':\n item[1].update(max_eval = True)\n elif item[1].get('max_score') > deviation and item[1].get('polarity').strip() == 'pos':\n item[1].update(max_eval = True)\n elif item[1].get('max_score') <= deviation and item[1].get('max_score') >= -deviation and item[1].get('polarity') is 'neu':\n item[1].update(max_eval = True)\n else:\n item[1].update(max_eval = False)\n self.data = result\n self.calcPrecision()\n \n \"\"\" Export data to file\"\"\"\n if exportFile is not None: \n self.export(exportFile)\n\n def evaluate_de(self, evalFile=None, exportFile=None, deviation=0):\n self.deviation=deviation\n tags = {\"A\": \"a\", \"N\": \"n\", \"R\": \"r\", \"M\": \"v\", \"V\":\"v\"}\n result = {}\n for tip in self.tips:\n if tip.lang['lang'] == \"de\":\n result.update({tip.id: {'text':tip.text, 'pos':tip.pos, 'avg_score':0, 'max_score':0, 'sentiTags':[]}})\n for token in tip.pos:\n if tags.get(token[1][0], False):\n term = self.sentiwords.lexicon.get((tags.get(token[1][0]), token[2]))\n if term is not None:\n result.get(tip.id).update({'avg_score': result.get(tip.id).get('avg_score') + term.get('avg_pos_score') - term.get('avg_neg_score'),\n 'max_score': result.get(tip.id).get('max_score') + term.get('max_pos_score') - term.get('max_neg_score'),\n 'sentiTags': result.get(tip.id).get('sentiTags') + [term.get('term'),\n term.get('avg_pos_score') - term.get('avg_neg_score'),\n term.get('max_pos_score') - term.get('max_neg_score')]})\n self.data = result\n \"\"\" Evaluate manually tagged data \"\"\"\n if evalFile is not None: \n self.loadEvaluationData(evalFile)\n for item in result.iteritems():\n if item[1].get('avg_score') < -deviation and item[1].get('polarity').strip() == 'neg':\n item[1].update(avg_eval = True)\n elif item[1].get('avg_score') > deviation and item[1].get('polarity').strip() == 'pos':\n item[1].update(avg_eval = True)\n elif item[1].get('avg_score') <= deviation and item[1].get('avg_score') >= -deviation and item[1].get('polarity') is 'neu':\n item[1].update(avg_eval = True)\n else:\n item[1].update(avg_eval = False)\n \n if item[1].get('max_score') < -deviation and item[1].get('polarity').strip() == 'neg':\n item[1].update(max_eval = True)\n elif item[1].get('max_score') > deviation and item[1].get('polarity').strip() == 'pos':\n item[1].update(max_eval = True)\n elif item[1].get('max_score') <= deviation and item[1].get('max_score') >= -deviation and item[1].get('polarity') is 'neu':\n item[1].update(max_eval = True)\n else:\n item[1].update(max_eval = False)\n self.data = result\n self.calcPrecision()\n \n \"\"\" Export data to file\"\"\"\n if exportFile is not None: \n self.export(exportFile)\n \n def export(self, fname):\n \"\"\" Export data to file\"\"\"\n with codecs.open(fname, 'wb', \"utf-8\") as f:\n f.write(\";\".join([\"id\", \"polarity\", \"AVG Eval\", \"AVG Score\", \"MAX Eval\", \"MAX Score\", \"Text\", \"\\n\"]))\n for item in self.data.iteritems():\n f.write(\";\".join([\"\\\"\" + item[0] + \"\\\"\",\n \"\\\"\" + item[1].get('polarity') + \"\\\"\",\n \"\\\"\" + str(item[1].get('avg_eval')) + \"\\\"\",\n \"\\\"\" + \"{:10.2f}\".format(item[1].get('avg_score')) + \"\\\"\",\n \"\\\"\" + str(item[1].get('max_eval')) + \"\\\"\",\n \"\\\"\" + \"{:10.2f}\".format(item[1].get('max_score')) + \"\\\"\",\n \"\\\"\" + item[1].get('text') + \"\\\"\",\n \"\\n\"]))\n \n\n \n def loadEvaluationData(self, fname):\n \"\"\" Load manually Tagged Data from file \"\"\"\n import_file = csv.DictReader(open(fname), delimiter='\\t')\n for row in import_file:\n if row['lang'] == 'en' or row['lang'] == 'de':\n print row\n self.data.get(row['id']).update(polarity=row['polarity'].strip(),\n correctLang=row['lang?'].strip(), \n real=row['real?'].strip())\n \n def calcPrecision(self):\n \n ret = {'avg_true': 0, 'avg_false': 0, 'max_true': 0, 'max_false': 0}\n i = 0.0\n \n for item in self.data.iteritems():\n if item[1].get('avg_eval') == True: \n ret.update({'avg_true': ret.get('avg_true') + 1})\n else:\n ret.update({'avg_false': ret.get('avg_false') + 1})\n \n if item[1].get('max_eval') == True: \n ret.update({'max_true': ret.get('max_true') + 1})\n else:\n ret.update({'max_false': ret.get('max_false') + 1})\n \n i += 1\n\n self.precision.update({'avg': float(ret.get('avg_true') / i),\n 'max': float(ret.get('max_true') / i)})\n \n \n " }, { "alpha_fraction": 0.47040170431137085, "alphanum_fraction": 0.5457565784454346, "avg_line_length": 38.25691604614258, "blob_id": "3ac6dca19fee7a6eab96650382b921c3ec4d2534", "content_id": "bb26fa1ff326fbf3960731ef9b1665d849af76db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19866, "license_type": "permissive", "max_line_length": 149, "num_lines": 506, "path": "/foursquare-sentiment-mining/miner/lib/ldig/ldig.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ldig : Language Detector with Infinite-Gram\n# This code is available under the MIT License.\n# (c)2011 Nakatani Shuyo / Cybozu Labs Inc.\n\nimport htmlentitydefs\nimport optparse\nimport os, sys, re, codecs, json\nimport subprocess\n\nimport numpy\n\nfrom miner.lib.ldig import da\n\n\nclass ldig(object):\n def __init__(self, model_dir):\n self.features = os.path.join(model_dir, 'features')\n self.labels = os.path.join(model_dir, 'labels.json')\n self.param = os.path.join(model_dir, 'parameters.npy')\n self.doublearray = os.path.join(model_dir, 'doublearray.npz')\n\n def load_da(self):\n trie = da.DoubleArray()\n trie.load(self.doublearray)\n return trie\n\n def load_features(self):\n features = []\n with codecs.open(self.features, 'rb', 'utf-8') as f:\n pre_feature = \"\"\n for n, s in enumerate(f):\n m = re.match(r'(.+)\\t([0-9]+)', s)\n if not m:\n sys.exit(\"irregular feature : '%s' at %d\" % (s, n + 1))\n if pre_feature >= m.groups(1):\n sys.exit(\"unordered feature : '%s' at %d\" % (s, n + 1))\n pre_feature = m.groups(1)\n features.append(m.groups())\n return features\n\n def load_labels(self):\n with open(self.labels, 'rb') as f:\n return json.load(f)\n\n\n def init(self, temp_path, corpus_list, lbff, ngram_bound):\n \"\"\"\n Extract features from corpus and generate TRIE(DoubleArray) data\n - load corpus\n - generate temporary file for maxsubst\n - generate double array and save it\n - parameter: lbff = lower bound of feature frequency\n \"\"\"\n\n labels = []\n with codecs.open(temp_path, 'wb', 'utf-8') as f:\n for file in corpus_list:\n with codecs.open(file, 'rb', 'utf-8') as g:\n for i, s in enumerate(g):\n label, text, org_text = normalize_text(s)\n if label is None or label == \"\":\n sys.stderr.write(\"no label data at %d in %s \\n\" % (i + 1, file))\n continue\n if label not in labels:\n labels.append(label)\n f.write(text)\n f.write(\"\\n\")\n\n labels.sort()\n print \"labels: %d\" % len(labels)\n with open(self.labels, 'wb') as f:\n f.write(json.dumps(labels))\n\n print \"generating max-substrings...\"\n temp_features = self.features + \".temp\"\n maxsubst = options.maxsubst\n if os.name == 'nt': maxsubst += \".exe\"\n subprocess.call([maxsubst, temp_path, temp_features])\n\n # count features\n M = 0\n features = []\n r1 = re.compile(u'.\\u0001.')\n r2 = re.compile(u'[A-Za-z\\u00a1-\\u00a3\\u00bf-\\u024f\\u1e00-\\u1eff]')\n with codecs.open(temp_features, 'rb', 'utf-8') as f:\n for line in f:\n i = line.index('\\t')\n st = line[0:i]\n c = int(line[i + 1:-1])\n if c >= lbff and len(st) <= ngram_bound and (not r1.search(st)) and r2.search(st) and (st[0] != u'\\u0001' or st[-1] != u'\\u0001'):\n M += 1\n features.append((st, line))\n print \"# of features = %d\" % M\n\n features.sort()\n with codecs.open(self.features, 'wb', 'utf-8') as f:\n for s in features:\n f.write(s[1])\n\n generate_doublearray(self.doublearray, [s[0] for s in features])\n\n numpy.save(self.param, numpy.zeros((M, len(labels))))\n\n def shrink(self):\n features = self.load_features()\n param = numpy.load(self.param)\n\n list = (numpy.abs(param).sum(1) > 0.0000001)\n new_param = param[list]\n print \"# of features : %d => %d\" % (param.shape[0], new_param.shape[0])\n\n numpy.save(self.param, new_param)\n new_features = []\n with codecs.open(self.features, 'wb', 'utf-8') as f:\n for i, x in enumerate(list):\n if x:\n f.write(\"%s\\t%s\\n\" % features[i])\n new_features.append(features[i][0])\n\n generate_doublearray(self.doublearray, new_features)\n\n def debug(self, args):\n features = self.load_features()\n trie = self.load_da()\n labels = self.load_labels()\n param = numpy.load(self.param)\n\n for st in args:\n label, text, org_text = normalize_text(st)\n events = trie.extract_features(u\"\\u0001\" + text + u\"\\u0001\")\n print \"orig: '%s'\" % st\n print \"norm: '%s'\" % text\n sum = numpy.zeros(len(labels))\n print \"id\\tfeat\\tfreq\\t%s\" % \"\\t\".join(labels)\n for id in sorted(events, key=lambda id:features[id][0]):\n phi = param[id, ]\n sum += phi * events[id]\n print \"%d\\t%s\\t%d\\t%s\" % (id, features[id][0], events[id], \"\\t\".join([\"%0.2f\" % x for x in phi]))\n exp_w = numpy.exp(sum - sum.max())\n prob = exp_w / exp_w.sum()\n print \"\\t\\t\\t%s\" % \"\\t\".join([\"%0.2f\" % x for x in sum])\n print \"\\t\\t\\t%s\" % \"\\t\".join([\"%0.1f%%\" % (x * 100) for x in prob])\n\n def learn(self, options, args):\n trie = self.load_da()\n param = numpy.load(self.param)\n labels = self.load_labels()\n\n import time\n print \"loading corpus... \" + time.strftime(\"%H:%M:%S\", time.localtime())\n corpus, idlist = load_corpus(args, labels)\n print \"inference... \" + time.strftime(\"%H:%M:%S\", time.localtime())\n inference(param, labels, corpus, idlist, trie, options)\n print \"finish... \" + time.strftime(\"%H:%M:%S\", time.localtime())\n numpy.save(self.param, param)\n\n def detect(self, options, args):\n trie = self.load_da()\n param = numpy.load(self.param)\n labels = self.load_labels()\n\n log_likely = likelihood(param, labels, trie, args, options)\n\n\n\n\n# from http://www.programming-magic.com/20080820002254/\nreference_regex = re.compile(u'&(#x?[0-9a-f]+|[a-z]+);', re.IGNORECASE)\nnum16_regex = re.compile(u'#x\\d+', re.IGNORECASE)\nnum10_regex = re.compile(u'#\\d+', re.IGNORECASE)\ndef htmlentity2unicode(text):\n result = u''\n i = 0\n while True:\n match = reference_regex.search(text, i)\n if match is None:\n result += text[i:]\n break\n\n result += text[i:match.start()]\n i = match.end()\n name = match.group(1)\n\n if name in htmlentitydefs.name2codepoint.keys():\n result += unichr(htmlentitydefs.name2codepoint[name])\n elif num16_regex.match(name):\n result += unichr(int(u'0' + name[1:], 16))\n elif num10_regex.match(name):\n result += unichr(int(name[1:]))\n return result\n\n\ndef normalize_twitter(text):\n \"\"\"normalization for twitter\"\"\"\n text = re.sub(r'(@|#|https?:\\/\\/)[^ ]+', '', text)\n text = re.sub(r'(^| )[:;x]-?[\\(\\)dop]($| )', ' ', text) # facemark\n text = re.sub(r'(^| )(rt[ :]+)*', ' ', text)\n text = re.sub(r'([hj])+([aieo])+(\\1+\\2+){1,}', r'\\1\\2\\1\\2', text, re.IGNORECASE) # laugh\n text = re.sub(r' +(via|live on) *$', '', text)\n return text\n\n\nre_ignore_i = re.compile(r'[^I]')\nre_turkish_alphabet = re.compile(u'[\\u011e\\u011f\\u0130\\u0131]')\nvietnamese_norm = {\n\tu'\\u0041\\u0300':u'\\u00C0', u'\\u0045\\u0300':u'\\u00C8', u'\\u0049\\u0300':u'\\u00CC', u'\\u004F\\u0300':u'\\u00D2',\n\tu'\\u0055\\u0300':u'\\u00D9', u'\\u0059\\u0300':u'\\u1EF2', u'\\u0061\\u0300':u'\\u00E0', u'\\u0065\\u0300':u'\\u00E8',\n\tu'\\u0069\\u0300':u'\\u00EC', u'\\u006F\\u0300':u'\\u00F2', u'\\u0075\\u0300':u'\\u00F9', u'\\u0079\\u0300':u'\\u1EF3',\n\tu'\\u00C2\\u0300':u'\\u1EA6', u'\\u00CA\\u0300':u'\\u1EC0', u'\\u00D4\\u0300':u'\\u1ED2', u'\\u00E2\\u0300':u'\\u1EA7',\n\tu'\\u00EA\\u0300':u'\\u1EC1', u'\\u00F4\\u0300':u'\\u1ED3', u'\\u0102\\u0300':u'\\u1EB0', u'\\u0103\\u0300':u'\\u1EB1',\n\tu'\\u01A0\\u0300':u'\\u1EDC', u'\\u01A1\\u0300':u'\\u1EDD', u'\\u01AF\\u0300':u'\\u1EEA', u'\\u01B0\\u0300':u'\\u1EEB',\n\n\tu'\\u0041\\u0301':u'\\u00C1', u'\\u0045\\u0301':u'\\u00C9', u'\\u0049\\u0301':u'\\u00CD', u'\\u004F\\u0301':u'\\u00D3',\n\tu'\\u0055\\u0301':u'\\u00DA', u'\\u0059\\u0301':u'\\u00DD', u'\\u0061\\u0301':u'\\u00E1', u'\\u0065\\u0301':u'\\u00E9',\n\tu'\\u0069\\u0301':u'\\u00ED', u'\\u006F\\u0301':u'\\u00F3', u'\\u0075\\u0301':u'\\u00FA', u'\\u0079\\u0301':u'\\u00FD',\n\tu'\\u00C2\\u0301':u'\\u1EA4', u'\\u00CA\\u0301':u'\\u1EBE', u'\\u00D4\\u0301':u'\\u1ED0', u'\\u00E2\\u0301':u'\\u1EA5',\n\tu'\\u00EA\\u0301':u'\\u1EBF', u'\\u00F4\\u0301':u'\\u1ED1', u'\\u0102\\u0301':u'\\u1EAE', u'\\u0103\\u0301':u'\\u1EAF',\n\tu'\\u01A0\\u0301':u'\\u1EDA', u'\\u01A1\\u0301':u'\\u1EDB', u'\\u01AF\\u0301':u'\\u1EE8', u'\\u01B0\\u0301':u'\\u1EE9',\n\n\tu'\\u0041\\u0303':u'\\u00C3', u'\\u0045\\u0303':u'\\u1EBC', u'\\u0049\\u0303':u'\\u0128', u'\\u004F\\u0303':u'\\u00D5',\n\tu'\\u0055\\u0303':u'\\u0168', u'\\u0059\\u0303':u'\\u1EF8', u'\\u0061\\u0303':u'\\u00E3', u'\\u0065\\u0303':u'\\u1EBD',\n\tu'\\u0069\\u0303':u'\\u0129', u'\\u006F\\u0303':u'\\u00F5', u'\\u0075\\u0303':u'\\u0169', u'\\u0079\\u0303':u'\\u1EF9',\n\tu'\\u00C2\\u0303':u'\\u1EAA', u'\\u00CA\\u0303':u'\\u1EC4', u'\\u00D4\\u0303':u'\\u1ED6', u'\\u00E2\\u0303':u'\\u1EAB',\n\tu'\\u00EA\\u0303':u'\\u1EC5', u'\\u00F4\\u0303':u'\\u1ED7', u'\\u0102\\u0303':u'\\u1EB4', u'\\u0103\\u0303':u'\\u1EB5',\n\tu'\\u01A0\\u0303':u'\\u1EE0', u'\\u01A1\\u0303':u'\\u1EE1', u'\\u01AF\\u0303':u'\\u1EEE', u'\\u01B0\\u0303':u'\\u1EEF',\n\n\tu'\\u0041\\u0309':u'\\u1EA2', u'\\u0045\\u0309':u'\\u1EBA', u'\\u0049\\u0309':u'\\u1EC8', u'\\u004F\\u0309':u'\\u1ECE',\n\tu'\\u0055\\u0309':u'\\u1EE6', u'\\u0059\\u0309':u'\\u1EF6', u'\\u0061\\u0309':u'\\u1EA3', u'\\u0065\\u0309':u'\\u1EBB',\n\tu'\\u0069\\u0309':u'\\u1EC9', u'\\u006F\\u0309':u'\\u1ECF', u'\\u0075\\u0309':u'\\u1EE7', u'\\u0079\\u0309':u'\\u1EF7',\n\tu'\\u00C2\\u0309':u'\\u1EA8', u'\\u00CA\\u0309':u'\\u1EC2', u'\\u00D4\\u0309':u'\\u1ED4', u'\\u00E2\\u0309':u'\\u1EA9',\n\tu'\\u00EA\\u0309':u'\\u1EC3', u'\\u00F4\\u0309':u'\\u1ED5', u'\\u0102\\u0309':u'\\u1EB2', u'\\u0103\\u0309':u'\\u1EB3',\n\tu'\\u01A0\\u0309':u'\\u1EDE', u'\\u01A1\\u0309':u'\\u1EDF', u'\\u01AF\\u0309':u'\\u1EEC', u'\\u01B0\\u0309':u'\\u1EED',\n\n\tu'\\u0041\\u0323':u'\\u1EA0', u'\\u0045\\u0323':u'\\u1EB8', u'\\u0049\\u0323':u'\\u1ECA', u'\\u004F\\u0323':u'\\u1ECC',\n\tu'\\u0055\\u0323':u'\\u1EE4', u'\\u0059\\u0323':u'\\u1EF4', u'\\u0061\\u0323':u'\\u1EA1', u'\\u0065\\u0323':u'\\u1EB9',\n\tu'\\u0069\\u0323':u'\\u1ECB', u'\\u006F\\u0323':u'\\u1ECD', u'\\u0075\\u0323':u'\\u1EE5', u'\\u0079\\u0323':u'\\u1EF5',\n\tu'\\u00C2\\u0323':u'\\u1EAC', u'\\u00CA\\u0323':u'\\u1EC6', u'\\u00D4\\u0323':u'\\u1ED8', u'\\u00E2\\u0323':u'\\u1EAD',\n\tu'\\u00EA\\u0323':u'\\u1EC7', u'\\u00F4\\u0323':u'\\u1ED9', u'\\u0102\\u0323':u'\\u1EB6', u'\\u0103\\u0323':u'\\u1EB7',\n\tu'\\u01A0\\u0323':u'\\u1EE2', u'\\u01A1\\u0323':u'\\u1EE3', u'\\u01AF\\u0323':u'\\u1EF0', u'\\u01B0\\u0323':u'\\u1EF1',\n}\nre_vietnamese = re.compile(u'[AEIOUYaeiouy\\u00C2\\u00CA\\u00D4\\u00E2\\u00EA\\u00F4\\u0102\\u0103\\u01A0\\u01A1\\u01AF\\u01B0][\\u0300\\u0301\\u0303\\u0309\\u0323]')\nre_latin_cont = re.compile(u'([a-z\\u00e0-\\u024f])\\\\1{2,}')\nre_symbol_cont = re.compile(u'([^a-z\\u00e0-\\u024f])\\\\1{1,}')\ndef normalize_text(org):\n m = re.match(r'([-A-Za-z]+)\\t(.+)', org)\n if m:\n label, org = m.groups()\n else:\n label = \"\"\n m = re.search(r'\\t([^\\t]+)$', org)\n if m:\n s = m.group(0)\n else:\n s = org\n s = htmlentity2unicode(s)\n s = re.sub(u'[\\u2010-\\u2015]', '-', s)\n s = re.sub(u'[0-9]+', '0', s)\n s = re.sub(u'[^\\u0020-\\u007e\\u00a1-\\u024f\\u0300-\\u036f\\u1e00-\\u1eff]+', ' ', s)\n s = re.sub(u' +', ' ', s)\n\n # vietnamese normalization\n s = re_vietnamese.sub(lambda x:vietnamese_norm[x.group(0)], s)\n\n # lower case with Turkish\n s = re_ignore_i.sub(lambda x:x.group(0).lower(), s)\n # if re_turkish_alphabet.search(s):\n # s = s.replace(u'I', u'\\u0131')\n # s = s.lower()\n\n # Romanian normalization\n s = s.replace(u'\\u0219', u'\\u015f').replace(u'\\u021b', u'\\u0163')\n\n s = normalize_twitter(s)\n s = re_latin_cont.sub(r'\\1\\1', s)\n s = re_symbol_cont.sub(r'\\1', s)\n\n return label, s.strip(), org\n\n\n# load courpus\ndef load_corpus(filelist, labels):\n idlist = dict((x, []) for x in labels)\n corpus = []\n for filename in filelist:\n f = codecs.open(filename, 'rb', 'utf-8')\n for i, s in enumerate(f):\n label, text, org_text = normalize_text(s)\n if label not in labels:\n sys.exit(\"unknown label '%s' at %d in %s \" % (label, i + 1, filename))\n idlist[label].append(len(corpus))\n corpus.append((label, text, org_text))\n f.close()\n return corpus, idlist\n\n\ndef shuffle(idlist):\n n = max(len(idlist[lang]) for lang in idlist)\n list = []\n for lang in idlist:\n text_ids = idlist[lang]\n n_text = len(text_ids)\n list += text_ids * (n / n_text)\n numpy.random.shuffle(text_ids)\n list += text_ids[:n % n_text]\n numpy.random.shuffle(list)\n return list\n\n\n\n# prediction probability\ndef predict(param, events):\n sum_w = numpy.dot(param[events.keys(), ].T, events.values())\n exp_w = numpy.exp(sum_w - sum_w.max())\n return exp_w / exp_w.sum()\n\n\n# inference and learning\ndef inference(param, labels, corpus, idlist, trie, options):\n K = len(labels)\n M = param.shape[0]\n\n list = shuffle(idlist)\n N = len(list)\n WHOLE_REG_INT = (N / options.n_whole_reg) + 1\n\n # learning rate\n eta = options.eta\n if options.reg_const:\n penalties = numpy.zeros_like(param)\n alpha = pow(0.9, -1.0 / N)\n uk = 0\n\n corrects = numpy.zeros(K, dtype=int)\n counts = numpy.zeros(K, dtype=int)\n for m, target in enumerate(list):\n label, text, org_text = corpus[target]\n events = trie.extract_features(u\"\\u0001\" + text + u\"\\u0001\")\n label_k = labels.index(label)\n\n y = predict(param, events)\n predict_k = y.argmax()\n counts[label_k] += 1\n if label_k == predict_k:\n corrects[label_k] += 1\n\n # learning\n if options.reg_const:\n eta *= alpha\n uk += options.reg_const * eta / N\n y[label_k] -= 1\n y *= eta\n\n if options.reg_const:\n indexes = events\n if (N - m) % WHOLE_REG_INT == 1:\n print \"full regularization: %d / %d\" % (m, N)\n indexes = xrange(M)\n for id in indexes:\n prm = param[id]\n pnl = penalties[id]\n if id in events: prm -= y * events[id]\n\n for j in xrange(K):\n w = prm[j]\n if w > 0:\n w1 = w - uk - pnl[j]\n if w1 > 0:\n prm[j] = w1\n pnl[j] += w1 - w\n else:\n prm[j] = 0\n pnl[j] -= w\n elif w < 0:\n w1 = w + uk - pnl[j]\n if w1 < 0:\n prm[j] = w1\n pnl[j] += w1 - w\n else:\n prm[j] = 0\n pnl[j] -= w\n else:\n for id, freq in events.iteritems():\n param[id, ] -= y * freq\n\n for lbl, crct, cnt in zip(labels, corrects, counts):\n if cnt > 0:\n print \"> %s = %d / %d = %.2f\" % (lbl, crct, cnt, 100.0 * crct / cnt)\n print \"> total = %d / %d = %.2f\" % (corrects.sum(), N, 100.0 * corrects.sum() / N)\n list = (numpy.abs(param).sum(1) > 0.0000001)\n print \"> # of relevant features = %d / %d\" % (list.sum(), M)\n\n\ndef likelihood(param, labels, trie, filelist, options):\n K = len(labels)\n corrects = numpy.zeros(K, dtype=int)\n counts = numpy.zeros(K, dtype=int)\n\n label_map = dict((x, i) for i, x in enumerate(labels))\n\n n_available_data = 0\n log_likely = 0.0\n for filename in filelist:\n f = codecs.open(filename, 'rb', 'utf-8')\n for i, s in enumerate(f):\n label, text, org_text = normalize_text(s)\n\n if label not in label_map:\n sys.stderr.write(\"WARNING : unknown label '%s' at %d in %s (ignore the later same labels)\\n\" % (label, i + 1, filename))\n label_map[label] = -1\n label_k = label_map[label]\n\n events = trie.extract_features(u\"\\u0001\" + text + u\"\\u0001\")\n y = predict(param, events)\n predict_k = y.argmax()\n\n if label_k >= 0:\n log_likely -= numpy.log(y[label_k])\n n_available_data += 1\n counts[label_k] += 1\n if label_k == predict_k and y[predict_k] >= 0.6:\n corrects[predict_k] += 1\n\n predict_lang = labels[predict_k]\n if y[predict_k] < 0.6: predict_lang = \"\"\n print \"%s\\t%s\\t%s\" % (label, predict_lang, org_text)\n f.close()\n\n if n_available_data > 0:\n log_likely /= n_available_data\n\n for lbl, crct, cnt in zip(labels, corrects, counts):\n if cnt > 0:\n print \"> %s = %d / %d = %.2f\" % (lbl, crct, cnt, 100.0 * crct / cnt)\n print \"> total = %d / %d = %.2f\" % (corrects.sum(), n_available_data, 100.0 * corrects.sum() / n_available_data)\n print \"> average negative log likelihood = %.3f\" % log_likely\n\n return log_likely\n\n\ndef generate_doublearray(file, features):\n trie = da.DoubleArray()\n trie.initialize(features)\n trie.save(file)\n\n\n\n\nif __name__ == '__main__':\n sys.stdout = codecs.getwriter('utf-8')(sys.stdout)\n\n parser = optparse.OptionParser()\n parser.add_option(\"-m\", dest=\"model\", help=\"model directory\")\n parser.add_option(\"--init\", dest=\"init\", help=\"initialize model\", action=\"store_true\")\n parser.add_option(\"--learning\", dest=\"learning\", help=\"learn model\", action=\"store_true\")\n parser.add_option(\"--shrink\", dest=\"shrink\", help=\"remove irrevant features\", action=\"store_true\")\n parser.add_option(\"--debug\", dest=\"debug\", help=\"detect command line text for debug\", action=\"store_true\")\n\n # for initialization\n parser.add_option(\"--ff\", dest=\"bound_feature_freq\", help=\"threshold of feature frequency (for initialization)\", type=\"int\", default=8)\n parser.add_option(\"-n\", dest=\"ngram_bound\", help=\"n-gram upper bound (for initialization)\", type=\"int\", default=99999)\n parser.add_option(\"-x\", dest=\"maxsubst\", help=\"max substring extractor\", default=\"./maxsubst\")\n\n # for learning\n parser.add_option(\"-e\", \"--eta\", dest=\"eta\", help=\"learning rate\", type=\"float\", default=0.1)\n parser.add_option(\"-r\", \"--regularity\", dest=\"reg_const\", help=\"regularization constant\", type=\"float\")\n parser.add_option(\"--wr\", dest=\"n_whole_reg\", help=\"number of whole regularizations\", type=\"int\", default=2)\n\n (options, args) = parser.parse_args()\n if not options.model: parser.error(\"need model directory (-m)\")\n\n\n detector = ldig(options.model)\n if options.init:\n if not os.path.exists(options.model):\n os.mkdir(options.model)\n if len(args) == 0:\n parser.error(\"need corpus\")\n else:\n if not os.path.exists(detector.features):\n parser.error(\"features file doesn't exist\")\n if not os.path.exists(detector.labels):\n parser.error(\"labels file doesn't exist\")\n if not os.path.exists(detector.param):\n parser.error(\"parameters file doesn't exist\")\n\n\n if options.init:\n temp_path = os.path.join(options.model, 'temp')\n detector.init(temp_path, args, options.bound_feature_freq, options.ngram_bound)\n\n elif options.debug:\n detector.debug(args)\n\n elif options.shrink:\n detector.shrink()\n\n elif options.learning:\n detector.learn(options, args)\n\n else:\n detector.detect(options, args)\n # import cProfile\n # cProfile.runctx('detector.detect(options, args)', globals(), locals(), 'ldig.profile')\n\n\n" }, { "alpha_fraction": 0.5267019867897034, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 28.120481491088867, "blob_id": "e6a28582e5e5d3c47787ec7f0d6562f958c1fada", "content_id": "6e2f66d835f097782847d95c49e732a6b03527ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2453, "license_type": "permissive", "max_line_length": 187, "num_lines": 83, "path": "/foursquare-sentiment-mining/miner/lib/ldig/maxsubst/maxsubst.cpp", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "/**\r\n\t@file\r\n\t@brief maximal substring extractor\r\n\r\n\tCopyright (C) 2012 Nakatani Shuyo / Cybozu Labs, Inc., all rights reserved.\r\n*/\r\n\r\n#include <iostream>\n#include <vector>\n#include <fstream>\r\n#include \"esa.hxx\"\n#include \"cybozu/string.hpp\"\r\n\nconst int k = 0x10000;\n\nvoid replace(cybozu::String& str, const cybozu::String& from, cybozu::Char to) {\n cybozu::String::size_type pos = 0;\r\n while (pos = str.find(from, pos), pos != cybozu::String::npos) {\r\n\t\tstr[pos] = to;\r\n ++pos;\r\n }\r\n}\n\nint main(int argc, char* argv[]){\n\r\n\tstd::ifstream ifs(argv[1], std::ios::binary);\r\n\tcybozu::String str(std::istreambuf_iterator<char>(ifs.rdbuf()), std::istreambuf_iterator<char>());\r\n\t//std::istreambuf_iterator<char> isit(std::cin);\n\t//cybozu::String str(isit, std::istreambuf_iterator<char>());\r\n\r\n\treplace(str, \"\\n\", 1);\t// replace \\n => \\u0001\r\n\treplace(str, \"\\t\", 32);\t// replace \\t => ' '\r\n\t//replace(str, \"\\0\", 32);\t// replace \\0 => ' '\r\n\tsize_t origLen = str.size();\n\tstd::cerr << \" chars:\" << origLen << std::endl;\n\r\n\tstd::vector<int> charvec;\r\n\tcharvec.resize(origLen);\r\n\tstd::copy(str.begin(), str.end(), charvec.begin());\r\n\tstd::vector<int>::iterator icv = charvec.begin(), icvend=charvec.end();\r\n\tfor (;icv!=icvend;++icv) {\r\n\t\tif (*icv == 0 || *icv >= k) *icv = 32;\r\n\t}\r\n\n\tstd::vector<int> SA(origLen);\n\tstd::vector<int> L (origLen);\n\tstd::vector<int> R (origLen);\n\tstd::vector<int> D (origLen);\n\n\tint nodeNum = 0;\n\tif (esaxx(charvec.begin(), SA.begin(), L.begin(), R.begin(), D.begin(), (int)origLen, k, nodeNum) == -1){\n\t\treturn -1;\n\t}\n\tstd::cerr << \" nodes:\" << nodeNum << std::endl;\n\n\tstd::vector<int> rank(origLen);\r\n\tint r = 0;\r\n\tfor (size_t i = 0; i < origLen; i++) {\r\n\t\tif (i == 0 || charvec[(SA[i] + origLen - 1) % origLen] != charvec[(SA[i - 1] + origLen - 1) % origLen]) r++;\r\n\t\trank[i] = r;\r\n\t}\n\n\t/*\n\tfor (int i = 0; i < nodeNum; ++i){\n\tcout << i << \"\\t\" << R[i] - L[i] << \"\\t\" << D[i] << \"\\t\" << L[i] << \"\\t\" << SA[L[i]] << \"\\t\" << (rank[ R[i] - 1 ] - rank[ L[i] ]) << \"\\t\" << \"'\" << str.substr(SA[L[i]], D[i]) << \"'\";\n\t//printSnipet(T, SA[L[i]], D[i], id2word);\n\tcout << std::endl;\n\t}\n\t*/\n\n\tstd::ofstream ofs(argv[2], std::ios::binary);\r\n\tint maxsubst = 0;\n\tfor (int i = 0; i < nodeNum; ++i){\n\t\tint c = rank[ R[i] - 1 ] - rank[ L[i] ];\n\t\tif (D[i] > 0 && c > 0) {\n\t\t\tofs << str.substr(SA[L[i]], D[i]) << \"\\t\" << c + 1 << std::endl;\n\t\t\t++maxsubst;\n\t\t}\n\t}\n\tstd::cerr << \" maxsubst:\" << maxsubst << std::endl;\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6294416189193726, "alphanum_fraction": 0.6443316340446472, "avg_line_length": 18.700000762939453, "blob_id": "dea5cfa9bf0b9156441ccf551b4f1ccbe90e554c", "content_id": "c595dee2823b8840cbd128239e06be517fbcea54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2955, "license_type": "permissive", "max_line_length": 98, "num_lines": 150, "path": "/foursquare-sentiment-mining/miner/lib/ldig/maxsubst/cybozulib/include/cybozu/exception.hpp", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#pragma once\n/**\n\t@file\n\t@brief definition of abstruct exception class\n\tCopyright (C) 2008-2012 Cybozu Labs, Inc., all rights reserved.\n*/\n#include <string>\n#include <cybozu/itoa.hpp>\n#include <errno.h>\n#ifdef _WIN32\n\t#include <winsock2.h>\n\t#include <windows.h>\n#else\n\t#include <string.h> // for strerror_r\n#endif\n\nnamespace cybozu {\n\nconst bool DontThrow = true;\n\nnamespace exception {\n\n/* get max 32 characters to avoid buffer overrun */\ninline std::string makeString(const char *p, size_t size = -1)\n{\n\tconst size_t maxSize = 32;\n\tsize_t pos = 0;\n\tif (p) {\n\t\tfor (pos = 0; pos < std::min(size, maxSize); pos++) {\n\t\t\tif (p[pos] == 0) break;\n\t\t}\n\t}\n\treturn std::string(p, pos);\n}\n/**\n\tconvert errno to string\n\t@param err [in] errno\n\t@note for both windows and linux\n*/\ninline std::string ConvertErrorNoToString(int err)\n{\n\tchar errBuf[256];\n#ifdef _WIN32\n\tstrerror_s(errBuf, sizeof(errBuf), err);\n\treturn errBuf;\n#else\n\treturn ::strerror_r(err, errBuf, sizeof(errBuf));\n#endif\n}\n\n} // cybozu::exception\n\nclass Exception : public std::exception {\n\tstd::string str_;\npublic:\n\texplicit Exception(const char *name)\n\t\t: str_(name)\n\t{\n\t}\n\t~Exception() throw () {}\n\tconst char *what() const throw () { return str_.c_str(); }\n\tconst std::string& toString() const { return str_; }\n\t/**\n\t\tappend string into str_\n\t*/\n\tException& operator<<(const std::string& str)\n\t{\n\t\tstr_ += ':';\n\t\tstr_ += str;\n\t\treturn *this;\n\t}\n\t/**\n\t\tappend C-string into str_\n\t*/\n\tException& operator<<(const char *str) { return operator<<(cybozu::exception::makeString(str)); }\n\t/**\n\t\tappend char into str_\n\t*/\n\tException& operator<<(char c) { str_ += ':'; str_ += c; return *this; }\n\t/**\n\t\tappend integer into str_\n\t*/\n\ttemplate<class P>\n\tException& operator<<(P t) { return operator<<(cybozu::itoa(t)); }\n};\n\nclass ErrorNo {\n#ifdef _WIN32\n\ttypedef unsigned int NativeErrorNo;\n#else\n\ttypedef int NativeErrorNo;\n#endif\n\tNativeErrorNo err_;\npublic:\n\texplicit ErrorNo(NativeErrorNo err)\n\t\t: err_(err)\n\t{\n\t}\n\tErrorNo()\n\t\t: err_(getLatestNativeErrorNo())\n\t{\n\t}\n\tNativeErrorNo getLatestNativeErrorNo() const\n\t{\n#ifdef _WIN32\n\t\treturn ::GetLastError();\n#else\n\t\treturn errno;\n#endif\n\t}\n\t/**\n\t\tconvert NativeErrNo to string(maybe UTF8)\n\t\t@param err [in] errno\n\t\t@note Linux : same as ConvertErrorNoToString\n\t\t\t Windows : for Win32 API(use en-us)\n\t*/\n\tstd::string toString() const\n\t{\n#ifdef _WIN32\n\t\tconst int msgSize = 256;\n\t\twchar_t msg[msgSize];\n\t\tint size = FormatMessageW(\n\t\t\tFORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,\n\t\t\t0,\n\t\t\terr_,\n\t\t\tMAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),\n\t\t\tmsg,\n\t\t\tmsgSize,\n\t\t\tNULL\n\t\t);\n\t\tif (size <= 0) return \"\";\n\t\t// remove last \"\\r\\n\"\n\t\tif (size > 2 && msg[size - 2] == '\\r') {\n\t\t\tmsg[size - 2] = 0;\n\t\t\tsize -= 2;\n\t\t}\n\t\tstd::string ret;\n\t\tret.resize(size);\n\t\t// assume ascii only\n\t\tfor (int i = 0; i < size; i++) {\n\t\t\tret[i] = (char)msg[i];\n\t\t}\n\t\treturn ret;\n#else\n\t\treturn exception::ConvertErrorNoToString(err_);\n#endif\n\t}\n};\n\n} // cybozu\n" }, { "alpha_fraction": 0.4731532633304596, "alphanum_fraction": 0.479336142539978, "avg_line_length": 36.3125, "blob_id": "4f6bb83ff70522621cf3686c445a8842bad4b353", "content_id": "f11ac9467750e3ab8fa5d17db85152d83c08ea19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3082, "license_type": "no_license", "max_line_length": 86, "num_lines": 80, "path": "/foursquare-sentiment-mining/miner/unittests.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 06.05.2014\r\n\r\n@author: Andreas\r\n'''\r\n\r\nimport unittest\r\n\r\nfrom analysis import analysis\r\nfrom postagging import POSTagger\r\n\r\n\r\nclass TestMiner(unittest.TestCase): \r\n path_to_treetagger = r'C:\\TreeTagger\\bin\\tree-tagger.exe'\r\n path_to_parameter_file_de = r'C:\\TreeTagger\\lib\\german-utf8.par'\r\n path_to_parameter_file_en = r'C:\\TreeTagger\\lib\\english.par'\r\n \r\n text_en = u\"\"\"What is the airspeed of an unladen swallow ?\"\"\"\r\n text_en_tokens = [[u'What', u'WP', u'What'],\r\n [u'is', u'VBZ', u'be'],\r\n [u'the', u'DT', u'the'],\r\n [u'airspeed', u'NN', u'airspeed'],\r\n [u'of', u'IN', u'of'],\r\n [u'an', u'DT', u'an'],\r\n [u'unladen', u'JJ', u'<unknown>'],\r\n [u'swallow', u'NN', u'swallow'],\r\n [u'?', u'SENT', u'?']]\r\n \r\n text_de = u'Das Haus ist sehr schön und groß. Es hat auch einen hübschen Garten.'\r\n text_de_tokens = [[u'Das', u'ART', u'die'],\r\n [u'Haus', u'NN', u'Haus'],\r\n [u'ist', u'VAFIN', u'sein'],\r\n [u'sehr', u'ADV', u'sehr'],\r\n [u'schön', u'ADJD', u'schön'],\r\n [u'und', u'KON', u'und'],\r\n [u'groß', u'ADJD', u'groß'],\r\n [u'.', u'$.', u'.'],\r\n [u'Es', u'PPER', u'es'],\r\n [u'hat', u'VAFIN', u'haben'],\r\n [u'auch', u'ADV', u'auch'],\r\n [u'einen', u'ART', u'eine'],\r\n [u'hübschen', u'ADJA', u'hübsch'],\r\n [u'Garten', u'NN', u'Garten'],\r\n [u'.', u'$.', u'.']]\r\n \r\n def test_treetagger_en(self):\r\n tagger_en = POSTagger(self.path_to_treetagger, self.path_to_parameter_file_en)\r\n self.assertListEqual(tagger_en.tag(self.text_en), self.text_en_tokens) \r\n \r\n def test_treetagger_de(self):\r\n tagger_de = POSTagger(self.path_to_treetagger, self.path_to_parameter_file_de)\r\n self.assertListEqual(tagger_de.tag(self.text_de), self.text_de_tokens)\r\n \r\n def test_analysis(self):\r\n analysis_runner = analysis()\r\n # analysis_runner.crawl(u\"Frankfurt am Main\")\r\n # analysis_runner.save_to_pickle()\r\n analysis_runner.load_from_pickle()\r\n self.assertEqual(len(analysis_runner.venues), 50)\r\n self.assertGreater(len(analysis_runner.tips), 500)\r\n\r\n def test_tips_tagging(self):\r\n analysis_runner = analysis()\r\n tagger_de = POSTagger(self.path_to_treetagger, self.path_to_parameter_file_de)\r\n \r\n analysis_runner.load_from_pickle()\r\n result = []\r\n i = 0\r\n \r\n for tip in analysis_runner.tips:\r\n tip.pos = tagger_de.tag(tip.text)\r\n result.append(tip)\r\n print tip.text, tip.pos\r\n i += 1\r\n if i == 10:\r\n break\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n\r\n \r\n" }, { "alpha_fraction": 0.7434607744216919, "alphanum_fraction": 0.7525150775909424, "avg_line_length": 46.33333206176758, "blob_id": "3ad29b02f33d3720f56eeef0e18796960b6c05e7", "content_id": "a681527db64652a8c9d886b711fd15916726275c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 998, "license_type": "no_license", "max_line_length": 191, "num_lines": 21, "path": "/README.md", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "opinion-mining\n==============\n\n###Benötigte Software:\n\n* Python 2.7 (KEIN Python 3.x!)\n* NLTK 2.0.4: http://www.nltk.org/install.html\n* Numpy: http://www.scipy.org/scipylib/download.html\n* Foursquare: https://github.com/mLewisLogic/foursquare\n* Treetagger: http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/ (Benötigt Perl)\n\n###Installation\n- **NLTK 2.0.4** nach Anleitung unter http://www.nltk.org/install.html installieren\n- **Numpy** nach Anleitung unter http://www.nltk.org/install.html installieren\n- **Foursquare Python Wrapper** unter https://github.com/mLewisLogic/foursquare über \"Download ZIP\" herunterladen und entpacken. In das Verzeichnis wechseln und über die setup.py installieren\n```bat\npython setup.py -build\npython setup.py -install\n```\n- **Perl** herunterladen unter http://www.activestate.com/activeperl/ und installieren\n- **Treetagger** Windows Version herunterladen unter http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/ und nach C:\\Treetagger installieren\n" }, { "alpha_fraction": 0.5339285731315613, "alphanum_fraction": 0.5372449159622192, "avg_line_length": 35.32075500488281, "blob_id": "3fa8af6fa86c59631511d278c39ef5089d44e3bd", "content_id": "0a6897f447d51a1b755f2d959102342f8c91b271", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3920, "license_type": "no_license", "max_line_length": 134, "num_lines": 106, "path": "/foursquare-sentiment-mining/miner/analysis.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on 17.04.2014\n\n@author: schieberle\n'''\n\nimport pickle\nfrom time import gmtime, strftime\n\nimport __init__\nfrom ldigwrapper import lang_detector\nfrom postagging import POSTagger\n\n\nclass analysis(object):\n '''\n Analyzer which scrapes data from Foursquare and analyzes it with\n POS Tagging.\n '''\n \n def __init__(self, **kwargs):\n if 'verbose' in kwargs and kwargs['verbose'] == False:\n self.verbose = False\n else:\n self.verbose = True\n print \"Analyzer initialized...\"\n \n def time_helper(self):\n return \"\".join([\"[\", strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()), \"] - \"])\n \n\n def tag_tips(self,\n treetagger_path=r'C:\\TreeTagger\\bin\\tree-tagger.exe',\n parameter_file={'de': r'.\\lib\\treetagger\\german-utf8.par',\n 'en': r'.\\lib\\treetagger\\english.par'},\n model_dir=r\".\\lib\\ldig\\models\\model.latin\",\n limit=None):\n \n tagger_en = POSTagger(treetagger_path, parameter_file.get('en'))\n tagger_de = POSTagger(treetagger_path, parameter_file.get('de'))\n if self.verbose: print self.time_helper(), \"Tagger initialized...\"\n \n detector = lang_detector(model_dir)\n if self.verbose: print self.time_helper(), \"Language Detector initialized...\"\n result = []\n \n if self.verbose: print self.time_helper(), \"Start tagging....\"\n \n for idx, tip in enumerate(self.tips):\n tip.lang = detector.detect(tip.text)\n if tip.lang == 'de':\n tip.pos = tagger_de.tag(tip.text)\n elif tip.lang == 'en':\n tip.pos = tagger_en.tag(tip.text)\n \n result.append(tip)\n if self.verbose:\n print tip.lang\n print self.time_helper(), \"\".join([(idx + 1).__str__(), \"/\", len(self.tips).__str__(), \": \", tip.lang, \" \", tip.text])\n \n # if idx is not None and idx >= limit:\n # break\n \n self.tips = result\n \n def crawl(self, place):\n \n # Initialise Miner\n if self.verbose: print self.time_helper(), 'Start miner for ' + place \n run = __init__.miner()\n \n # Run search for venues in Foursquare\n if self.verbose: print self.time_helper(), \"Search Venues...\"\n self.venues = run.searchVenuesNear(place)\n if self.verbose: print self.time_helper(), len(self.venues).__str__(), \"Venues found.\"\n \n # Run search for Tips for all found venues\n self.tips = [] \n if self.verbose: print self.time_helper(), \"Search Tips...\"\n for v in self.venues:\n if self.verbose: print self.time_helper(), \"Search Tips for \", v.name\n self.tips += run.searchTipsByVenue(v.id)\n if self.verbose: print self.time_helper(), len(self.tips).__str__(), \"Tips found\"\n \n def save_to_pickle(self, fname=\"save.pkl\"):\n fname.encode('utf-8')\n pickle.dump(self, open(fname, \"w\"))\n if self.verbose: print \"Data saved to pkl -> \", fname\n \n def load_from_pickle(self, fname=\"save.pickle\"):\n up = pickle.load(open(fname, \"r\"))\n self.venues = up.venues\n self.tips = up.tips\n self.verbose = up.verbose\n if self.verbose: print \"Data loaded from pickle file.\"\n \n def batch_analysis(self, places):\n if self.verbose: print \"Start batch analysis for\", \", \".join(places)\n for place in places:\n self.crawl(place)\n self.save_to_pickle(\"\".join([r\"C:\\Users\\Andreas\\Documents\\opinion-mining\", \"\\\\\", place,\".pkl\"]).encode(\"utf-8\"))\n #self.tag_tips()\n #self.save_to_pickle(place + \"-tagged.pkl\") \n \n\n\n\n \n \n\n\n\n \n" }, { "alpha_fraction": 0.4673128128051758, "alphanum_fraction": 0.4790953993797302, "avg_line_length": 31.079267501831055, "blob_id": "322083fc2507114f9f11ebd421596ebf98333376", "content_id": "a8d1c3e0b2c164d71a81ea07ec825221ebd265ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5262, "license_type": "permissive", "max_line_length": 88, "num_lines": 164, "path": "/foursquare-sentiment-mining/miner/lib/ldig/da.py", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport collections\n\nimport numpy\n\n\n# Double Array for static ordered data\n# This code is available under the MIT License.\n# (c)2011 Nakatani Shuyo / Cybozu Labs Inc.\nclass DoubleArray(object):\n def __init__(self, verbose=False):\n self.verbose = verbose\n\n def validate_list(self, list):\n pre = \"\"\n for i, line in enumerate(list):\n if pre >= line:\n raise Exception, \"list has not ascent order at %d\" % (i + 1)\n pre = line\n\n def initialize(self, list):\n self.validate_list(list)\n\n self.N = 1\n self.base = [-1]\n self.check = [-1]\n self.value = [-1]\n\n max_index = 0\n queue = collections.deque([(0, 0, len(list), 0)])\n while len(queue) > 0:\n index, left, right, depth = queue.popleft()\n if depth >= len(list[left]):\n self.value[index] = left\n left += 1\n if left >= right: continue\n\n # get branches of current node\n stack = collections.deque([(right, -1)])\n cur, c1 = (left, ord(list[left][depth]))\n result = []\n while len(stack) >= 1:\n while c1 == stack[-1][1]:\n cur, c1 = stack.pop()\n mid = (cur + stack[-1][0]) / 2\n if cur == mid:\n result.append((cur + 1, c1))\n cur, c1 = stack.pop()\n else:\n c2 = ord(list[mid][depth])\n if c1 != c2:\n stack.append((mid, c2))\n else:\n cur = mid\n\n # search empty index for current node\n v0 = result[0][1]\n j = -self.check[0] - v0\n while any(j + v < self.N and self.check[j + v] >= 0 for right, v in result):\n j = -self.check[j + v0] - v0\n tail_index = j + result[-1][1]\n if max_index < tail_index:\n max_index = tail_index\n self.extend_array(tail_index + 2)\n\n # insert current node into DA\n self.base[index] = j\n depth += 1\n for right, v in result:\n child = j + v\n self.check[self.base[child]] = self.check[child]\n self.base[-self.check[child]] = self.base[child]\n self.check[child] = index\n queue.append((child, left, right, depth))\n left = right\n\n self.shrink_array(max_index)\n\n def extend_array(self, max_cand):\n if self.N < max_cand:\n new_N = 2 ** int(numpy.ceil(numpy.log2(max_cand)))\n self.log(\"extend DA : %d => (%d) => %d\", (self.N, max_cand, new_N))\n self.base.extend(n - 1 for n in xrange(self.N, new_N))\n self.check.extend(-n - 1 for n in xrange(self.N, new_N))\n self.value.extend(-1 for n in xrange(self.N, new_N))\n self.N = new_N\n\n def shrink_array(self, max_index):\n self.log(\"shrink DA : %d => %d\", (self.N, max_index + 1))\n self.N = max_index + 1\n self.check = numpy.array(self.check[:self.N])\n self.base = numpy.array(self.base[:self.N])\n self.value = numpy.array(self.value[:self.N])\n\n not_used = self.check < 0\n self.check[not_used] = -1\n not_used[0] = False\n self.base[not_used] = self.N\n\n def log(self, format, param):\n if self.verbose:\n import time\n print \"-- %s, %s\" % (time.strftime(\"%Y/%m/%d %H:%M:%S\"), format % param)\n\n def save(self, filename):\n numpy.savez(filename, base=self.base, check=self.check, value=self.value)\n\n def load(self, filename):\n loaded = numpy.load(filename)\n self.base = loaded['base']\n self.check = loaded['check']\n self.value = loaded['value']\n self.N = self.base.size\n\n def add_element(self, s, v):\n pass\n\n def get_subtree(self, s):\n cur = 0\n for c in iter(s):\n v = ord(c)\n next = self.base[cur] + v\n if next >= self.N or self.check[next] != cur:\n return None\n cur = next\n return cur\n\n def get_child(self, c, subtree):\n v = ord(c)\n next = self.base[subtree] + v\n if next >= self.N or self.check[next] != subtree:\n return None\n return next\n\n def get(self, s):\n cur = self.get_subtree(s)\n if cur >= 0:\n value = self.value[cur]\n if value >= 0: return value\n return None\n\n def get_value(self, subtree):\n return self.value[subtree]\n\n def extract_features(self, st):\n events = dict()\n l = len(st)\n clist = [ord(c) for c in iter(st)]\n N = self.N\n base = self.base\n check = self.check\n value = self.value\n for i in xrange(l):\n pointer = 0\n for j in xrange(i, l):\n next = base[pointer] + clist[j]\n if next >= N or check[next] != pointer: break\n id = value[next]\n if id >= 0:\n events[id] = events.get(id, 0) + 1\n pointer = next\n return events\n\n" }, { "alpha_fraction": 0.7007908821105957, "alphanum_fraction": 0.7333040237426758, "avg_line_length": 26.08333396911621, "blob_id": "727d5c6e1c00815a0cfb9873362adfd00c00e5c7", "content_id": "31d4fae10784873470ce1a86ca936489f0f7d76c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2276, "license_type": "permissive", "max_line_length": 173, "num_lines": 84, "path": "/foursquare-sentiment-mining/miner/lib/ldig/readme.md", "repo_name": "gheed87/opinion-mining", "src_encoding": "UTF-8", "text": "ldig (Language Detection with Infinity Gram)\n======================\n\n\nThis is a prototype of language detection for short message service (twitter).\nwith 99.1% accuracy for 17 languages\n\n\nUsage\n------\n\n1. Extract model directory\n tar xf models/[select model archive]\n\n2. Detect\n ldig.py -m [model directory] [text data file]\n\n\nData format\n------\n\nAs input data, Each tweet is one line in text file as the below format.\n\n [label]\\t[some metadata separated '\\t']\\t[text without '\\t']\n\n[label] is a language name alike en, de, fr and so on.\nIt is also optional as metadata.\n(ldig doesn't use metadata and label for detection, of course :D)\n\nThe output data of lidg is as the below.\n\n [correct label]\\t[detected label]\\t[original metadata and text]\n\n\nEstimation Tool\n----\n\nldig has a estimation tool.\n\n ./server.py -m [model directory]\n\nOpen http://localhost:48000 and input target text into textarea.\nThen ldig outputs language probabilities and feature parameters in the text.\n\n\nSupported Languages\n------\n\n- cs\tCzech\n- da\tDannish\n- de\tGerman\n- en\tEnglish\n- es\tSpanish\n- fi\tFinnish\n- fr\tFrench\n- id\tIndonesian\n- it\tItalian\n- nl\tDutch\n- no\tNorwegian\n- pl\tPolish\n- pt\tPortuguese\n- ro\tRomanian\n- sv\tSwedish\n- tr\tTurkish\n- vi\tVietnamese\n\n\nDocuments\n------\n\n- [Presentation in English](http://www.slideshare.net/shuyo/short-text-language-detection-with-infinitygram-12949447)\n- [Presentation in Japanese](http://www.slideshare.net/shuyo/gram-10286133)\n\n- Blog Articles about ldig\n - [Language Detection for twitter with 99.1% Accuracy](http://shuyo.wordpress.com/2012/02/21/language-detection-for-twitter-with-99-1-accuracy/)\n - [Precision and Recall of ldig (twitter language detection)](http://shuyo.wordpress.com/2012/03/02/precision-and-recall-of-ldig-twitter-language-detection/)\n - [Estimation of ldig (twitter Language Detection) for LIGA dataset](http://shuyo.wordpress.com/2012/03/02/estimation-of-ldig-twitter-language-detection-for-liga-dataset/)\n - [Why is Norwegian and Danish identification difficult?](http://shuyo.wordpress.com/2012/03/07/why-is-norwegian-and-danish-identification-difficult/)\n\n\nCopyright & License\n-----\n- (c)2011-2012 Nakatani Shuyo / Cybozu Labs Inc. All rights reserved.\n- All codes and resources are available under the MIT License.\n\n" } ]
18
mllewis/dplace-data
https://github.com/mllewis/dplace-data
ae499fb2ed77fae1b0132ee5c059410fb22da077
9c2e491ec569736efd8964da897df2d604c6a78c
0f0fa7b35de19a5f9daa8625e411a3e8a0a0f0eb
refs/heads/master
2021-07-22T22:26:52.353014
2017-10-19T12:00:59
2017-10-19T12:00:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5092341899871826, "alphanum_fraction": 0.5162110924720764, "avg_line_length": 30, "blob_id": "434d45b11ef49db28f504badb8f3ba0d0dc0060b", "content_id": "5df9671e61cd6ac25154cbcfc19961a729a64578", "detected_licenses": [ "CC-BY-4.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12183, "license_type": "permissive", "max_line_length": 96, "num_lines": 393, "path": "/scripts/dplace_to_cldf.py", "repo_name": "mllewis/dplace-data", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# dplase_to_cldf.py - convert all datasets to csvw using pycldf\n\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport copy\nimport collections\n\nfrom six.moves import map\n\nimport attr\nimport clldutils.dsv\nimport pycldf.dataset\n\nimport pydplace.api\n\nSRC = '..'\nDST = '../cldf'\nDIALECT = clldutils.dsv.Dialect()\n\n\ndef registered(cls):\n assert issubclass(cls, BaseConverter)\n try:\n seen = registered.converters\n except AttributeError:\n seen = registered.converters = []\n seen.append(cls)\n return cls\n\n\nclass BaseConverter(object):\n\n def skip(self, dataset):\n return False\n\n\nSeparator = collections.namedtuple('Separator', ['sep', 'split'])\n\n\nclass Converter(BaseConverter):\n\n def __init__(self):\n fields = attr.fields(self._source_cls)\n columns = list(self._itercols(fields, self._convert))\n\n def extract(s):\n return {target: trans(getattr(s, name))\n for name, trans, target, _ in columns}\n\n self._extract = extract\n self._add_component_args = ([self._component] +\n [args for _, _, _, args in columns])\n\n @staticmethod\n def _itercols(fields, convert):\n for f in fields:\n name = f.name\n if name in convert:\n args = convert[name]\n if args is None:\n continue\n elif hasattr(args, 'setdefault'):\n target = args.setdefault('name', name)\n else:\n target = args\n args = {'name': target}\n else:\n args = {'name': name}\n target = name\n\n transform = lambda x: x\n if 'separator' in args:\n sep, split = args['separator']\n args['separator'] = sep\n if split:\n transform = lambda x: x.split(sep)\n\n if 'datatype' not in args:\n args['datatype'] = 'float' if f.convert is float else 'string'\n\n yield name, transform, target, args\n\n def __call__(self, dataset):\n component = self._component.get('dc:conformsTo', self._component['url'])\n items = map(self._extract, self._iterdata(dataset))\n write_kwargs = {component: items}\n return self._add_component_args, write_kwargs\n\n\nclass SkipMixin(object):\n\n @classmethod\n def skip(cls, dataset, _sentinel=object()):\n return next(iter(cls._iterdata(dataset)), _sentinel) is _sentinel\n\n\n@registered\nclass LanguageTable(SkipMixin, Converter):\n\n _source_cls = pydplace.api.Society\n\n _iterdata = staticmethod(lambda dataset: dataset.societies)\n\n _component = {\n 'url': 'societies.csv',\n 'dc:conformsTo': 'http://cldf.clld.org/v1.0/terms.rdf#LanguageTable',\n 'tableSchema': {'primaryKey': ['id']},\n }\n\n _convert = {\n 'id': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#id',\n 'required': True,\n },\n 'xd_id': {\n 'required': True,\n 'datatype': {'base': 'string', 'format': r'xd\\d+'},\n },\n 'pref_name_for_society': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#name',\n 'required': True,\n },\n 'glottocode': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#glottocode',\n 'required': True,\n },\n 'ORIG_name_and_ID_in_this_dataset': {\n 'required': True,\n },\n 'alt_names_by_society': {\n 'separator': Separator(', ', split=True)\n },\n 'main_focal_year': {\n 'datatype': 'integer',\n 'null': 'NA',\n },\n 'HRAF_name_ID': {\n 'datatype': {'base': 'string', 'format': r'.+ \\([^)]+\\)'},\n },\n 'HRAF_link': {\n 'datatype': {'base': 'string', 'format': r'http://.+|in process'},\n },\n 'origLat': {\n 'datatype': {'base': 'decimal', 'minimum': -90, 'maximum': 90},\n 'required': True,\n },\n 'origLong': { # FIXME: EA/societies.csv:1279:11\n 'datatype': {'base': 'decimal', 'minimum': -190, 'maximum': 180},\n 'required': True,\n },\n 'Lat': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#latitude',\n 'datatype': {'base': 'decimal', 'minimum': -90, 'maximum': 90},\n 'required': True,\n },\n 'Long': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#longitude',\n 'datatype': {'base': 'decimal', 'minimum': -180, 'maximum': 180},\n 'required': True,\n },\n 'Comment': {'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#comment'},\n }\n\n\n@registered\nclass LangugageRelatedTable(SkipMixin, Converter):\n\n _source_cls = pydplace.api.RelatedSocieties\n\n _iterdata = staticmethod(lambda dataset: dataset.society_relations)\n\n _component = {\n 'url': 'societies_mapping.csv',\n 'tableSchema': {\n 'primaryKey': ['id'],\n 'foreignKeys': [\n {'columnReference': 'id',\n 'reference': {'resource': 'societies.csv', 'columnReference': 'id'}},\n ],\n },\n }\n\n _convert = {\n 'id': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#id',\n 'required': True,\n },\n 'related': {\n 'separator': Separator('; ', split=False),\n },\n }\n\n\n@registered\nclass ParameterTable(Converter):\n\n _source_cls = pydplace.api.Variable\n\n _iterdata = staticmethod(lambda dataset: dataset.variables)\n\n _component = {\n 'url': 'variables.csv',\n 'dc:conformsTo': 'http://cldf.clld.org/v1.0/terms.rdf#ParameterTable',\n 'tableSchema': {'primaryKey': ['id']}\n }\n\n _convert = {\n 'id': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#id',\n 'required': True,\n },\n 'category': {\n 'separator': Separator(', ', split=False),\n 'required': True,\n },\n 'title': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#name',\n 'required': True,\n },\n 'definition': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#description',\n },\n 'type': {\n 'datatype': {\n 'base': 'string',\n 'format': r'Categorical|Ordinal|Continuous',\n },\n 'required': True,\n },\n 'source': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#source',\n },\n 'notes': {'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#comment'},\n 'codes': None,\n }\n\n\n@registered\nclass CodeTable(SkipMixin, BaseConverter):\n\n _iterdata = staticmethod(lambda dataset: (c for v in dataset.variables for c in v.codes))\n\n _component = {\n 'url': 'codes.csv',\n 'dc:conformsTo': 'http://cldf.clld.org/v1.0/terms.rdf#CodeTable',\n 'tableSchema': {\n 'primaryKey': ['var_id', 'code'],\n 'foreignKeys': [\n {'columnReference': 'var_id',\n 'reference': {'resource': 'variables.csv', 'columnReference': 'id'}},\n ],\n },\n }\n\n _convert = {\n 'var_id': {\n 'name': 'var_id',\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#parameterReference',\n 'required': True,\n },\n 'code': {\n 'name': 'code', # FIXME: MODIS/data.csv:5884:6\n 'datatype': {'base': 'string', 'format': r'-?\\d+(?:.\\d+)?(?:E[+-]\\d+)?|NA'},\n 'required': True,\n },\n 'description': {\n 'name': 'description',\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#description',\n },\n 'name': {\n 'name': 'name',\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#name',\n 'required': True,\n },\n }\n\n def __call__(self, dataset):\n codes = list(self._iterdata(dataset))\n add_component_args = ([self._component] +\n [self._convert.get(f, f) for f in codes[0]._fields])\n component = self._component.get('dc:conformsTo', self._component['url'])\n items = (c._asdict() for c in codes)\n write_kwargs = {component: items}\n return add_component_args, write_kwargs\n\n\n@registered\nclass ValueTable(Converter):\n\n _source_cls = pydplace.api.Data\n\n _iterdata = staticmethod(lambda dataset: dataset.data)\n\n _component = {\n 'url': 'data.csv',\n 'dc:conformsTo': 'http://cldf.clld.org/v1.0/terms.rdf#ValueTable',\n 'tableSchema': {\n 'primaryKey': 'id',\n #'primaryKey': ['soc_id', 'sub_case', 'year', 'var_id', 'code', 'references'],\n 'foreignKeys': [\n {'columnReference': 'soc_id',\n 'reference': {'resource': 'societies.csv', 'columnReference': 'id'}},\n #{'columnReference': ['var_id', 'code'],\n #'reference': {'resource': 'codes.csv', 'columnReference': ['var_id', 'code']}},\n ],\n },\n }\n\n _extra = {\n 'name': 'id',\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#id',\n 'required': True,\n }\n\n _convert = {\n 'soc_id': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#languageReference',\n 'required': True,\n },\n 'sub_case': {\n 'null': None,\n 'required': True,\n },\n 'year': {\n 'datatype': {'base': 'string', 'format': r'-?\\d+(?:-\\d+)?|(?:NA)?'},\n 'null': None,\n 'required': True,\n },\n 'var_id': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#parameterReference',\n 'required': True,\n },\n 'code': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#codeReference',\n 'datatype': CodeTable._convert['code']['datatype'],\n 'required': True,\n },\n 'comment': {'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#comment'},\n 'references': {\n 'propertyUrl': 'http://cldf.clld.org/v1.0/terms.rdf#source',\n 'separator': Separator('; ', split=False),\n 'null': None,\n 'required': True,\n },\n }\n\n def __call__(self, dataset):\n component = self._add_component_args[0]\n if LanguageTable.skip(dataset): # drop data.csv fks to societies.csv if there is none\n component = copy.deepcopy(component)\n reduced = [f for f in component['tableSchema']['foreignKeys']\n if f['reference']['resource'] != LanguageTable._component['url']]\n component['tableSchema']['foreignKeys'] = reduced\n add_component_args = [component, self._extra] + self._add_component_args[1:]\n\n def extract_add_id(d, i, _extract=self._extract):\n result = _extract(d)\n result['id'] = i\n return result\n\n component = self._component.get('dc:conformsTo', self._component['url'])\n items = (extract_add_id(d, i) for i, d in enumerate(self._iterdata(dataset), 1))\n write_kwargs = {component: items}\n return add_component_args, write_kwargs\n\n\ndef main(source_dir=SRC, dest_dir=DST, dialect=DIALECT):\n repo = pydplace.api.Repos(source_dir)\n\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n\n converters = [cls() for cls in registered.converters]\n\n for d in repo.datasets:\n print(d)\n result_dir = os.path.join(dest_dir, d.id)\n result = pycldf.dataset.StructureDataset.in_dir(result_dir, empty_tables=True)\n result.tablegroup.dialect = dialect\n final_write_kwargs = {}\n for conv in converters:\n if not conv.skip(d):\n add_args, write_kwargs = conv(d)\n result.add_component(*add_args)\n final_write_kwargs.update(write_kwargs)\n result.write(**final_write_kwargs)\n result.validate()\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
khoale88/Multi-tenant-UMLParser
https://github.com/khoale88/Multi-tenant-UMLParser
e0785344cab9d66945623045f5d63673432d8998
27848d6fe099d31f948dd4827e643e86a135ae26
477d4587730e528c5e418635cd8525a695f51961
refs/heads/master
2021-06-17T01:45:39.067545
2017-05-03T06:53:22
2017-05-03T06:53:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5176036953926086, "alphanum_fraction": 0.5384331941604614, "avg_line_length": 36.41379165649414, "blob_id": "b8a72aab733d4dff61bd80e91bdfc77430ee87c6", "content_id": "b1563c2f1bbb054048f005f15feb457cc6314b40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5425, "license_type": "no_license", "max_line_length": 99, "num_lines": 145, "path": "/multi_tenant/model.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nimport ast\n\napp = Flask(__name__)\n\n\napp.config[\"SQL_USER\"] = SQL_USER = 'root'\napp.config[\"SQL_PASSWORD\"] = SQL_PASSWORD = 'aichomavao'\napp.config[\"SQL_HOSTNAME\"] = SQL_HOSTNAME = 'localhost'\napp.config[\"SQL_PORT\"] = SQL_PORT = 3306\napp.config[\"SQL_DATABASE\"] = SQL_DATABASE = 'cmpe281'\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s:%d/%s' \\\n %(SQL_USER, SQL_PASSWORD, SQL_HOSTNAME, SQL_PORT, SQL_DATABASE)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb = SQLAlchemy(app)\n\n\nclass AS_DICT():\n def as_dict(self):\n return ast.literal_eval(str(self))\n\n\nclass TENANT_TABLE(db.Model, AS_DICT):\n __tablename__ = \"TENANT_TABLE\"\n __table_args__ = (\n db.PrimaryKeyConstraint('tenant_id', 'table_name'),\n )\n tenant_id = db.Column(db.String(10), nullable=False)\n table_name = db.Column(db.String(45), nullable=False)\n table_desc = db.Column(db.String(80))\n\n def __init__(self, tenant_id, table_name, table_desc):\n self.tenant_name = tenant_id\n self.table_name = table_name\n self.table_desc = table_desc\n\n def __repr__(self):\n return '<TENANT %r>' %(self.tenant_id)\n\n def __str__(self):\n return str({\"tenant_id\" :self.tenant_id,\n \"table_name\":self.table_name,\n \"table_desc\":self.table_desc})\n\n\nclass TENANT_FIELDS(db.Model, AS_DICT):\n __tablename__ = \"TENANT_FIELDS\"\n __table_args__ = (\n db.PrimaryKeyConstraint('index', 'tenant_id', 'table_name'),\n )\n index = db.Column(db.Integer, nullable=False)\n tenant_id = db.Column(db.String(10), db.ForeignKey(\"TENANT_TABLE.tenant_id\"),\n nullable=False)\n table_name = db.Column(db.String(45), db.ForeignKey(\"TENANT_TABLE.tenant_table\"),\n nullable=False)\n field_name = db.Column(db.String(45),\n unique=True, nullable=False)\n field_type = db.Column(db.String(80))\n field_column = db.Column(db.Integer,\n nullable=False)\n\n def __init__(self, tenant_id, table_name, field_name, field_type, field_column):\n self.tenant_id = tenant_id\n self.table_name = table_name\n self.field_name = field_name\n self.field_type = field_type\n self.field_column = field_column\n\n def __repr__(self):\n return { \"index\" : self.index,\n \"tenant_id\" : self.tenant_id,\n \"table_name\" : self.table_name,\n \"field_name\" : self.field_name,\n \"field_type\" : self.field_type,\n \"field_column\": self.field_column}\n\n def __str__(self):\n return str({\"index\" : self.index,\n \"tenant_id\" : self.tenant_id,\n \"table_name\" : self.table_name,\n \"field_name\" : self.field_name,\n \"field_type\" : self.field_type,\n \"field_column\": self.field_column})\n\n\nclass TENANT_DATA(db.Model, AS_DICT):\n __tablename__ = \"TENANT_DATA\"\n __table_args__ = (\n db.PrimaryKeyConstraint('record_id', 'tenant_id'),\n )\n record_id = db.Column(db.String(45), \n nullable=False)\n tenant_id = db.Column(db.String(10), db.ForeignKey(\"TENANT_TABLE.tenant_id\"),\n nullable=False)\n tenant_table = db.Column(db.String(45), db.ForeignKey(\"TENANT_TABLE.tenant_table\"),\n nullable=False)\n column_1 = db.Column(db.String(80))\n column_2 = db.Column(db.String(80))\n column_3 = db.Column(db.String(80))\n column_4 = db.Column(db.String(80))\n column_5 = db.Column(db.String(80))\n column_6 = db.Column(db.String(80))\n column_7 = db.Column(db.String(80))\n column_8 = db.Column(db.String(80))\n column_9 = db.Column(db.String(80))\n column_10 = db.Column(db.String(80))\n\n\n def __init__(self, record_id, tenant_id, tenant_table, column_1, \n column_2, column_3, column_4, column_5, column_6,\n column_7, column_8, column_9, column_10):\n self.record_id = record_id\n self.tenant_id = tenant_id\n self.tenant_table = tenant_table\n self.column_1 = column_1\n self.column_2 = column_2\n self.column_3 = column_3\n self.column_4 = column_4\n self.column_5 = column_5\n self.column_6 = column_6\n self.column_7 = column_7\n self.column_8 = column_8\n self.column_9 = column_9\n self.column_10 = column_10\n\n def __repr__(self):\n return '<TENANT %r>' %(self.tenant_id)\n\n def __str__(self):\n return str({\"record_id\" : self.record_id,\n \"tenant_id\" : self.tenant_id,\n \"tenant_table\" : self.tenant_table,\n \"column_1\" : self.column_1,\n \"column_2\" : self.column_2,\n \"column_3\" : self.column_3,\n \"column_4\" : self.column_4,\n \"column_5\" : self.column_5,\n \"column_6\" : self.column_6,\n \"column_7\" : self.column_7,\n \"column_8\" : self.column_8,\n \"column_9\" : self.column_9,\n \"column_10\" : self.column_10})\n" }, { "alpha_fraction": 0.5428134799003601, "alphanum_fraction": 0.5458715558052063, "avg_line_length": 30.926828384399414, "blob_id": "aa900230528a69980127370de03fc3bb7ae760ac", "content_id": "006e178163f1124b4aa8ccedf8d2bc003baf12de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 88, "num_lines": 41, "path": "/tenant1/model.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "import mysql.connector\n\ndef get_tenant_data(config):\n cnx = mysql.connector.connect(user=config[\"DB_USER\"], password=config[\"DB_PASSWORD\"],\n host=config[\"DB_HOST\"], port=config[\"DB_PORT\"],\n database=config[\"DB_DATABASE\"])\n cursor = cnx.cursor()\n query = (\"select * from TENANT_DATA \\\n where TENANT_ID = '\" + config[\"TENANT_ID\"] +\\\n \"' order by RECORD_ID desc LIMIT 1\")\n\n cursor.execute(query)\n data = next(cursor)\n cursor.close()\n cnx.close()\n\n return data\n\ndef update_tenant_data(config,data):\n cnx = mysql.connector.connect(user=config[\"DB_USER\"], password=config[\"DB_PASSWORD\"],\n host=config[\"DB_HOST\"], port=config[\"DB_PORT\"],\n database=config[\"DB_DATABASE\"])\n\n cursor = cnx.cursor()\n\n #ignore record_id for auto inc, only modify 3 - len(data)\n tenant_data = list(get_tenant_data(config))\n tenant_data[0] = None\n start = 3\n end = start + len(data)\n tenant_data[start:end] = data\n\n query = (\"\"\"insert into TENANT_DATA \\\n values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\")\n cursor.execute(query, tenant_data)\n\n cnx.commit()\n cursor.close()\n cnx.close()\n\n return" }, { "alpha_fraction": 0.6756250262260437, "alphanum_fraction": 0.6768749952316284, "avg_line_length": 33.04255294799805, "blob_id": "74adc9321b96157a85931902dfbcc4216927258f", "content_id": "597e5287c40c501774afffc477fe22457492bea8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 89, "num_lines": 47, "path": "/tenant1/unzip_and_parse.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "import subprocess\nimport zipfile\nimport os, shutil\n\ndef process(jar_path, save_path, zip_name, output_path):\n \"\"\"upzip and parse function will unzip a compressed file, and use provided UML parser\n to parse such file. The result is stored in the output path\"\"\"\n\n #reformat the paths\n save_path = os.path.abspath(save_path)\n output_path = os.path.abspath(output_path)\n\n shutil.rmtree(os.path.join(save_path, \"output\"))\n #unzip and parse the file.\n unzip_path = unzip_to_output_folder(save_path, zip_name)\n parse_uml(jar_path, unzip_path)\n\n #move file to output folder and rename to original name with .png\n ori = os.path.join(unzip_path, \"output.png\")\n des = os.path.join(output_path, \"output.png\")\n os.rename(ori, des)\n return des\n\n\ndef parse_uml(jar_path, input_path):\n \"\"\"parse_uml needs 2 inputs: a path to parser file,\n an input_path to folder where java classes reside,\n result is stored in the same folder as input_path\"\"\"\n #print input_path\n subprocess.call([\"java\", \"-jar\", jar_path, input_path, input_path + \"/\"])\n\ndef unzip_to_output_folder(zip_path, zip_name):\n \"\"\"rename the zip_file into output.zip\n and unzip it into \"output\" folder located in the same path as zip file\"\"\"\n\n #remane from xxx.zip to output.zip\n ori = os.path.join(zip_path, zip_name)\n des = os.path.join(zip_path, \"output.zip\")\n os.rename(ori, des)\n\n zipref = zipfile.ZipFile(des, \"r\")\n #get the folder and unzip\n des = des[:-4]\n zipref.extractall(des)\n zipref.close()\n #return the folder by excluding the '.zip'\n return des\n" }, { "alpha_fraction": 0.6091065406799316, "alphanum_fraction": 0.619415819644928, "avg_line_length": 29.63157844543457, "blob_id": "61ec6e282142bd4c24f43157653f5d0444c25c65", "content_id": "f48577f918fa41672bb2000a58fb5400a9b61462", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 83, "num_lines": 38, "path": "/tenant2/main.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, json, Response, request,render_template, url_for, redirect\nimport unzip_and_parse as uap\n\napp = Flask(__name__)\n\napp.config[\"APP_HOST\"] = \"0.0.0.0\"\napp.config[\"APP_PORT\"] = 5000\napp.config[\"CLASS_JAR\"] = \"./parser/parser.jar\"\napp.config[\"UPLOAD_FOLDER\"] = \"./uploads/\"\napp.config[\"OUTPUT_FOLDER\"] = \"./static/\"\n\n\[email protected]('/upload', methods=['POST'])\ndef upload_zip():\n zipfile = request.files['zipFile']\n filename = zipfile.filename\n\n save_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n zipfile.save(save_path)\n\n outputlink = uap.process(jar_path=app.config[\"CLASS_JAR\"],\n save_path=app.config[\"UPLOAD_FOLDER\"],\n zip_name=filename,\n output_path=app.config[\"OUTPUT_FOLDER\"])\n\n body = {}\n body[\"link\"] = url_for(\"static\", filename=\"output.png\")\n\n return Response(response=json.dumps(body), status=200)\n\n\[email protected](\"/tenant2\",methods=['GET'])\ndef index():\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=app.config[\"APP_HOST\"], port=app.config[\"APP_PORT\"])\n" }, { "alpha_fraction": 0.6994818449020386, "alphanum_fraction": 0.7305699586868286, "avg_line_length": 21.705883026123047, "blob_id": "84131654ff6f88b32d540345fbcdc7c2f2ce323c", "content_id": "e9b38db8b8c4f63c7629ab71b2acc9431ccfea3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 143, "num_lines": 17, "path": "/multi_tenant/test.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "import datetime\nimport mysql.connector\n\n\n\ncnx = mysql.connector.connect(user='admin', password=\"cmpe281#2017\", host=\"cmpe281.cotowvf3a27g.us-west-2.rds.amazonaws.com\", database='cmpe281')\ncursor = cnx.cursor()\n\nquery = (\"select * from TENANT_DATA where TENANT_ID = 'Tenant4' order by RECORD_ID desc LIMIT 1\")\n\ncursor.execute(query)\n\nc = next(cursor)\nprint (c)\n\ncursor.close()\ncnx.close()\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 22, "blob_id": "266d791aeaf28f64aa775083d174e06e4fb7fb41", "content_id": "a0c532f6aff179c92a52f6735c5be47a0d702944", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/README.md", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "# Multi-tenant-project\n" }, { "alpha_fraction": 0.6359832882881165, "alphanum_fraction": 0.6359832882881165, "avg_line_length": 23, "blob_id": "6cf229b795349b44366e2b4382ce669945a276e6", "content_id": "8cf615f5cdf8be4d21d7fe29023feb76b0e99c1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 239, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/tenant3/textcodes/sequence.java", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "Comments.newComment {\n CommentFactory.newComment -> \"the new comment\" {\n Comment.<<create>> {\n IDGen.nextID -> newId;\n Transaction.add(this);\n Comments.getSize -> size;\n }\n Comments.addToCache(the new comment);\n }\n}" }, { "alpha_fraction": 0.7487437129020691, "alphanum_fraction": 0.7839195728302002, "avg_line_length": 17.18181800842285, "blob_id": "08ec534e2e274c3fbc7f6197c704137b16348f58", "content_id": "0e83929f3f3bfdd46a2bb0e8c41c9f3ae79f9650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 199, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/tenant1/Dockerfile", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "#A simple Flask app container\nFROM khoale88/281-personal\nMAINTAINER khoale88 \"[email protected]\"\n\n#Place app in container\nCOPY . /app\nWORKDIR /app\n\nRUN pip install mysql-connector-python-rf\n\nCMD python main.py" }, { "alpha_fraction": 0.8407079577445984, "alphanum_fraction": 0.8407079577445984, "avg_line_length": 36.66666793823242, "blob_id": "9ce4547d766cf977f3b798436efa4e700dfc347f", "content_id": "3a5f9fb3ed12de3619dfbb3e82b8713364b847ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 113, "license_type": "no_license", "max_line_length": 56, "num_lines": 3, "path": "/multi_tenant/requirements.txt", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "apt-get install python-mysqldb\napt-get install python-pip python-dev libmysqlclient-dev\npip install MySQL-python\n" }, { "alpha_fraction": 0.7022222280502319, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 17.83333396911621, "blob_id": "592ce5ea0bd3448d69284256ba3abe939d14333b", "content_id": "9217fd8ee604085f33b59af09aedd9ccfd2f9008", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 225, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/tenant4/Dockerfile", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "#A simple Flask app container\nFROM khoale88/281-personal\nMAINTAINER khoale88 \"[email protected]\"\n\n#Place app in container\nCOPY . /app\nWORKDIR /app\n\n#ENV JAVA_HOME /usr/lib/jvm/jdk1.8.0_111\nENV UMLGRAPH_HOME /app/lib\n\nCMD python main.py" }, { "alpha_fraction": 0.6193293929100037, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 30.6875, "blob_id": "786a3c8593d36315a5e456edfd2d5897c3934ea9", "content_id": "2327ed01be62d7306ddebd9b570a76e079992f31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 83, "num_lines": 32, "path": "/gateway/main.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, json, Response, request,render_template, url_for, redirect\n\napp = Flask(__name__)\napp.config[\"APP_HOST\"] = \"0.0.0.0\"\napp.config[\"APP_PORT\"] = 5000\n# app.config[\"CLASS_JAR\"] = \"./parser/parser.jar\"\n# app.config[\"UPLOAD_FOLDER\"] = \"./uploads/\"\n# app.config[\"UNZIP_FOLDER\"] = \"./unzips/\"\n# app.config[\"OUTPUT_FOLDER\"] = \"./static/\"\n\n\[email protected](\"/\",methods=['GET'])\ndef index():\n return render_template(\"login.html\")\n\[email protected](\"/\",methods=['POST'])\ndef post_login():\n user_name = request.json[\"userName\"]\n user_password = request.json[\"userPassword\"]\n\n if user_name == \"admin\" and user_password == \"admin\":\n return Response(json.dumps({\"redirect\":url_for(\"dashboard\")}), status=200)\n else:\n return Response(json.dumps({\"error\":\"authentication fails\"}), status=403)\n\[email protected](\"/dashboard\",methods=['GET'])\ndef dashboard():\n return render_template(\"dashboard.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.7695167064666748, "alphanum_fraction": 0.7992565035820007, "avg_line_length": 23.454545974731445, "blob_id": "f691b588d4be07f04f1555d1815f987351df85c5", "content_id": "5f93170058cd55cdf0cadd52bb536690cc8f3e8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 269, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/docker/Dockerfile", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "#A simple Flask app container\nFROM kovarn/python-java\nMAINTAINER khoale88 \"[email protected]\"\n\n\n#Install dependencies\nRUN apt-get update\nRUN apt-get install graphviz plotutils imagemagick -y\nRUN pip install flask flask-sqlalchemy pymysql\n\nENV JAVA_HOME /usr/lib/jvm/jdk1.8.0_111\n" }, { "alpha_fraction": 0.5639269351959229, "alphanum_fraction": 0.5648401975631714, "avg_line_length": 38.818180084228516, "blob_id": "b4e40b8df1eb371322d204dc250a6942ee0d69bd", "content_id": "ea0490a8cfe001caa119020e06a98c6465c0d49e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2190, "license_type": "no_license", "max_line_length": 71, "num_lines": 55, "path": "/multi_tenant/controller.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "from model import TENANT_TABLE as TNTB\nfrom model import TENANT_FIELDS as TNFL\nfrom model import TENANT_DATA as TNDT\n\ndef get_tenant_tables(tenant_id=None):\n if tenant_id is None:\n tables = TNTB.query.all()\n else:\n tables = TNTB.query.filter_by(tenant_id=tenant_id)\n return [t.as_dict() for t in tables]\n\ndef get_tenant_fields(tenant_id=None, table_name=None):\n if tenant_id is None:\n fields = TNFL.query.all()\n elif table_name is None:\n fields = TNFL.query.filter_by(tenant_id=tenant_id)\n else:\n fields = TNFL.query.filter_by(tenant_id=tenant_id,\n table_name=table_name)\n return [f.as_dict() for f in fields]\n\ndef get_tenant_data(tenant_id=None, tenant_table=None, record_id=None):\n if tenant_id is None:\n data = TNDT.query.all()\n elif tenant_table is None:\n data = TNDT.query.filter_by(tenant_id=tenant_id)\n elif record_id is None:\n data = TNDT.query.filter_by(tenant_id=tenant_id,\n tenant_table=tenant_table)\n else:\n data = TNDT.query.filter_by(tenant_id=tenant_id,\n tenant_table=tenant_table,\n record_id=record_id)\n return [d.as_dict() for d in data]\n\ndef get_tenant_table_data(tenant_id):\n tables = get_tenant_tables()\n result = {\"tenant_id\":tenant_id,\n \"tables\" :[]}\n for table in tables:\n temp_table = {\"table_name\":table[\"table_name\"],\n \"table_desc\":table[\"table_desc\"],\n \"fields\" :[]}\n fields = get_tenant_fields(tenant_id, temp_table[\"table_name\"])\n data = get_tenant_data(tenant_id, temp_table[\"table_name\"])\n if len(data) == 0:\n continue\n for field in fields:\n field_column = \"column_\" + str(field[\"field_column\"])\n temp_field = {\"field_name\":field[\"field_name\"],\n \"field_type\":field[\"field_type\"],\n \"field_data\":data[0][field_column]}\n temp_table[\"fields\"].append(temp_field)\n result[\"tables\"].append(temp_table)\n return result\n" }, { "alpha_fraction": 0.6749821901321411, "alphanum_fraction": 0.6756949424743652, "avg_line_length": 31.604650497436523, "blob_id": "b14e4fbfe96febedb8e4c679206f0d0a9e31df43", "content_id": "c3ada9ed89afed7f57626c6443ff32c9e1dd59a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1403, "license_type": "no_license", "max_line_length": 86, "num_lines": 43, "path": "/tenant3/save_and_parse.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "import subprocess\nimport os\n\ndef process(jar_path, save_path, data, output_path):\n \"\"\"save data into a file in save_path directory as sequence.java\n and parse the file with output saved in output_path\"\"\"\n\n #get the absolute path of save_path and output_path\n save_path = os.path.abspath(save_path)\n output_path = os.path.abspath(output_path)\n file_name = \"sequence\"\n file_ext = \".java\"\n\n #save data to folder and get the whole path\n seq_path = save_to_folder(save_path, file_name + file_ext, data)\n #parse the file\n parse_uml(jar_path, seq_path)\n\n #move file from save_path directory to output_path folder and rename to output.png\n ori = os.path.join(save_path, file_name + \".png\")\n des = os.path.join(output_path, \"output.png\")\n\n os.rename(ori, des)\n\n return des\n\n\ndef parse_uml(jar_path, input_path):\n \"\"\"parse_uml needs 2 inputs: a path to parser file,\n and an input_path to folder where sequence file resides.\n the result is store in the same input_path directory\"\"\"\n\n subprocess.call([\"java\", \"-jar\", jar_path, \"--headless\", input_path])\n\ndef save_to_folder(save_path, save_name, data):\n \"\"\"save data into a file in save_path directory with name given by save_name\n return the path to the file\"\"\"\n\n path = os.path.join(save_path, save_name)\n myfile = open(path, \"w\")\n myfile.write(data)\n myfile.close()\n return path\n\n" }, { "alpha_fraction": 0.6236323714256287, "alphanum_fraction": 0.636761486530304, "avg_line_length": 30.517240524291992, "blob_id": "893af7f86bd91418c31ac89f29d1dfdb2d8ce600", "content_id": "2490e3d34d3094e5e867e0934c8f1465eb57e2bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 914, "license_type": "no_license", "max_line_length": 83, "num_lines": 29, "path": "/tenant3/main.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "from flask import Flask, json, Response, request,render_template, url_for, redirect\nimport save_and_parse as sap\n\napp = Flask(__name__)\napp.config[\"APP_HOST\"] = \"0.0.0.0\"\napp.config[\"APP_PORT\"] = 5000\napp.config[\"PARSER\"] = \"./parser/parser.jar\"\napp.config[\"SAVE_FOLDER\"] = \"./textcodes/\"\napp.config[\"OUTPUT_FOLDER\"] = \"./static/\"\n\n\[email protected]('/textUpload', methods=['POST'])\ndef upload_text():\n body = request.json[\"textCode\"]\n sap.process(app.config[\"PARSER\"],\n app.config[\"SAVE_FOLDER\"],\n body,app.config[\"OUTPUT_FOLDER\"])\n\n res_json = {}\n res_json[\"link\"] = url_for(\"static\", filename=\"output.png\")\n\n return Response(response=json.dumps(res_json), status=200)\n\[email protected](\"/tenant3\",methods=['GET'])\ndef index():\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=app.config[\"APP_HOST\"], port=app.config[\"APP_PORT\"])\n" }, { "alpha_fraction": 0.6268801689147949, "alphanum_fraction": 0.6399806141853333, "avg_line_length": 33.36666488647461, "blob_id": "ad7943cecaafd504c51a9a6af82a0c3454e6c697", "content_id": "bffcb07c5c2f3259a2fd052f9a73dbfeef5ed110", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2061, "license_type": "no_license", "max_line_length": 113, "num_lines": 60, "path": "/multi_tenant/app.py", "repo_name": "khoale88/Multi-tenant-UMLParser", "src_encoding": "UTF-8", "text": "from flask import Flask, request, url_for, Response, make_response,json,abort\nfrom flask_sqlalchemy import SQLAlchemy\nfrom model import db, Expenses,CreateDB,DropDB #,DropDB,CreateDB\n\napp = Flask(__name__)\n\[email protected]('/v1/expenses',methods=['POST'])\ndef create_expense():\n if request.method == 'POST':\n json_request = json.loads(request.data)\n name = json_request['name']\n email = json_request['email']\n category = json_request['category']\n description = json_request['description']\n link = json_request['link']\n estimated_costs = json_request['estimated_costs']\n submit_date = json_request['submit_date']\n status = status_generator()\n decision_date = decision_date_update()\n expense = Expenses(name,email,category,description,link,estimated_costs,submit_date,status,decision_date)\n db.session.add(expense)\n db.session.commit() \n response = make_response(str(expense))\n response.status_code = 201\n return response\n\ndef status_generator():\n return 'pending'\n\ndef decision_date_update():\n return ''\n\[email protected]('/v1/expenses/<int:expense_id>',methods=['GET','PUT','DELETE'])\ndef show_expense(expense_id):\n if request.method == 'GET':\n expense = Expenses.query.get_or_404(expense_id)\n response = make_response(str(expense))\n #response.status_code = 200 #accepted\n\n if request.method == 'PUT':\n expense = Expenses.query.get_or_404(expense_id)\n response = make_response()\n response.status_code = 202\n json_request = json.loads(request.data)\n expense.estimated_costs = json_request['estimated_costs']\n db.session.commit()\n\n if request.method == 'DELETE':\n expense = Expenses.query.get_or_404(expense_id)\n response = make_response()\n db.session.delete(expense)\n db.session.commit()\n response.status_code = 204\n\n return response\n\nif __name__ == '__main__':\n CreateDB() \n db.create_all()\n app.run(debug=True,host='0.0.0.0')" } ]
16
abubakrsiddq/HRWROS
https://github.com/abubakrsiddq/HRWROS
4c68d47b0cdf81e092ddb1000de3ebcf6650fbe5
f8d4323f48da0e6224cb7f18103236a0a27fcc57
92a1e271e8e1a4f386ba4822aa00f9334e138bf2
refs/heads/master
2023-03-13T06:14:41.040259
2021-03-03T16:32:39
2021-03-03T16:32:39
286,036,249
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.8297872543334961, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 94, "blob_id": "eea901b6903c0695ee5f422defc5880bb524c77c", "content_id": "80877d81f267239c4b27e278e4d7f8a2830f8d91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 94, "license_type": "no_license", "max_line_length": 94, "num_lines": 1, "path": "/hrwros_ws/devel/share/hrwros_msgs/cmake/hrwros_msgsConfig.cmake", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/share/hrwros_msgs/cmake/hrwros_msgsConfig.cmake" }, { "alpha_fraction": 0.8494623899459839, "alphanum_fraction": 0.8494623899459839, "avg_line_length": 93, "blob_id": "ba96bf11e8efe04c868e4e7b708320e65858e668", "content_id": "d2481abe427de37611c82218972a3a008f4086d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 93, "license_type": "no_license", "max_line_length": 93, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/CounterWithDelayFeedback.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/CounterWithDelayFeedback.h" }, { "alpha_fraction": 0.8041236996650696, "alphanum_fraction": 0.8350515365600586, "avg_line_length": 97, "blob_id": "202d9891ee4f3fbab682d69b2c5d50506a6b7c33", "content_id": "41884cad5c7a49eb4594f811ea853ba5b8d5387c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 97, "license_type": "no_license", "max_line_length": 97, "num_lines": 1, "path": "/hrwros_ws/devel/share/hrwros_week2/cmake/hrwros_week2Config.cmake", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_week2/share/hrwros_week2/cmake/hrwros_week2Config.cmake" }, { "alpha_fraction": 0.8541666865348816, "alphanum_fraction": 0.8541666865348816, "avg_line_length": 96, "blob_id": "078120315e7b2bd0e4bfe9f121e13e8801f5eba7", "content_id": "5c75e19557182f6a215a347bf02ee3b062a9019c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 96, "license_type": "no_license", "max_line_length": 96, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/ConvertMetresToFeetResponse.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/ConvertMetresToFeetResponse.h" }, { "alpha_fraction": 0.6454336047172546, "alphanum_fraction": 0.6678817868232727, "avg_line_length": 41.373985290527344, "blob_id": "150aeed309929dcc7fea92d68a5a5cc27b189474", "content_id": "fe3c9cbbbcbec8fda5e9ca2bb87d4ed93cd26935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5212, "license_type": "no_license", "max_line_length": 159, "num_lines": 123, "path": "/hrwros_ws/src/hrwros_factory_behaviors/flexbe_behaviors/src/flexbe_behaviors/pick_part_from_conveyor_sm.py", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###########################################################\n# WARNING: Generated code! #\n# ************************** #\n# Manual changes may get lost if file is generated again. #\n# Only code inside the [MANUAL] tags will be kept. #\n###########################################################\n\nfrom flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger\nfrom hrwros_factory_states.set_conveyor_power_state import SetConveyorPowerState\nfrom hrwros_factory_states.compute_grasp_state import ComputeGraspState\nfrom hrwros_factory_states.vacuum_gripper_control_state import VacuumGripperControlState\nfrom hrwros_factory_states.moveit_to_joints_dyn_state import MoveitToJointsDynState as hrwros_factory_states__MoveitToJointsDynState\nfrom hrwros_factory_states.detect_part_camera_state import DetectPartCameraState\n# Additional imports can be added inside the following tags\n# [MANUAL_IMPORT]\n\n# [/MANUAL_IMPORT]\n\n\n'''\nCreated on Tue Jun 30 2020\n@author: ABU BAKR SIDDIQ\n'''\nclass PickpartfromconveyorSM(Behavior):\n\t'''\n\tthis is behaviour to pick a part from conveyour belt using robot1\n\t'''\n\n\n\tdef __init__(self):\n\t\tsuper(PickpartfromconveyorSM, self).__init__()\n\t\tself.name = 'Pick part from conveyor'\n\n\t\t# parameters of this behavior\n\n\t\t# references to used behaviors\n\n\t\t# Additional initialization code can be added inside the following tags\n\t\t# [MANUAL_INIT]\n\n\t\t# [/MANUAL_INIT]\n\n\t\t# Behavior comments:\n\n\n\n\tdef create(self):\n\t\tpick_group = 'robot1'\n\t\thome1 = [1.24,-1.57,1.57,-1.57,-1.57,0]\n\t\tgripper1 = \"vacuum_gripper1_suction_cup\"\n\t\tnames1 = [\"robot1_shoulder_pan_joint\",\"robot1_shoulder_lift_joint\",\"robot1_elbow_joint\",\"robot1_wrist_1_joint\",\"robot1_wrist_2_joint\",\"robot1_wrist_3_joint\"]\n\t\t# x:782 y:543, x:76 y:559\n\t\t_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])\n\t\t_state_machine.userdata.part_pose = []\n\t\t_state_machine.userdata.pick_configuration = []\n\t\t_state_machine.userdata.home1 = home1\n\t\t_state_machine.userdata.conveyor_speed = 100\n\n\t\t# Additional creation code can be added inside the following tags\n\t\t# [MANUAL_CREATE]\n\n\t\t# [/MANUAL_CREATE]\n\n\n\t\twith _state_machine:\n\t\t\t# x:30 y:122\n\t\t\tOperatableStateMachine.add('Start conveyor',\n\t\t\t\t\t\t\t\t\t\tSetConveyorPowerState(stop=False),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'Detect part', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'speed': 'conveyor_speed'})\n\n\t\t\t# x:323 y:107\n\t\t\tOperatableStateMachine.add('Compute pick configuration',\n\t\t\t\t\t\t\t\t\t\tComputeGraspState(group=pick_group, offset=0.0, joint_names=names1, tool_link=gripper1, rotation=3.1415),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move Robot1 to pick', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'part_pose', 'joint_values': 'pick_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:543 y:315\n\t\t\tOperatableStateMachine.add('Activate gripper',\n\t\t\t\t\t\t\t\t\t\tVacuumGripperControlState(enable=True, service_name='/gripper1/control'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move Robot1 to home configuration', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:396 y:203\n\t\t\tOperatableStateMachine.add('Move Robot1 to pick',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveitToJointsDynState(move_group=pick_group, offset=0.0, tool_link=gripper1, action_topic='/move_group'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'Activate gripper', 'planning_failed': 'failed', 'control_failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'joint_values': 'pick_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:685 y:384\n\t\t\tOperatableStateMachine.add('Move Robot1 to home configuration',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveitToJointsDynState(move_group=pick_group, offset=0.0, tool_link=gripper1, action_topic='/move_group'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'finished', 'planning_failed': 'failed', 'control_failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'joint_values': 'home1', 'joint_names': 'joint_names'})\n\n\t\t\t# x:212 y:37\n\t\t\tOperatableStateMachine.add('Detect part',\n\t\t\t\t\t\t\t\t\t\tDetectPartCameraState(ref_frame='robot1_base', camera_topic='/hrwros/logical_camera_1', camera_frame='logical_camera_1_frame'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'stop conveyor', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'part_pose'})\n\n\t\t\t# x:585 y:26\n\t\t\tOperatableStateMachine.add('stop conveyor',\n\t\t\t\t\t\t\t\t\t\tSetConveyorPowerState(stop=True),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'Compute pick configuration', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'speed': 'conveyor_speed'})\n\n\n\t\treturn _state_machine\n\n\n\t# Private functions can be added inside the following tags\n\t# [MANUAL_FUNC]\n\n\t# [/MANUAL_FUNC]\n" }, { "alpha_fraction": 0.8585858345031738, "alphanum_fraction": 0.8585858345031738, "avg_line_length": 99, "blob_id": "349621acfee350ee02b0d3ee61383d9921e676c5", "content_id": "6f095b6a951db62a81284d538195d11210098bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 99, "license_type": "no_license", "max_line_length": 99, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/CounterWithDelayActionFeedback.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/CounterWithDelayActionFeedback.h" }, { "alpha_fraction": 0.8068181872367859, "alphanum_fraction": 0.8068181872367859, "avg_line_length": 88, "blob_id": "fd4442e03db9361bfc4273e234ef4d942ca72df5", "content_id": "51ac3d1a54878a12d584bdf22d50a4765af9a10a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 88, "license_type": "no_license", "max_line_length": 88, "num_lines": 1, "path": "/hrwros_ws/devel/share/gennodejs/ros/hrwros_msgs/_index.js", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/share/gennodejs/ros/hrwros_msgs/_index.js" }, { "alpha_fraction": 0.8292682766914368, "alphanum_fraction": 0.8292682766914368, "avg_line_length": 82, "blob_id": "f2a41814bcc5ef4e3239691e311a00fd534db3f3", "content_id": "edc3780cf4cd7d65a8ead12b591bb15f59546896", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 82, "license_type": "no_license", "max_line_length": 82, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/Proximity.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/Proximity.h" }, { "alpha_fraction": 0.8494623899459839, "alphanum_fraction": 0.8494623899459839, "avg_line_length": 93, "blob_id": "3ebe2b614705966b8abb6042deb191c5b516fe57", "content_id": "104d59bf5c650a98aae93fca1dd07da652f1b991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 93, "license_type": "no_license", "max_line_length": 93, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/VacuumGripperControl.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/VacuumGripperControl.h" }, { "alpha_fraction": 0.746835470199585, "alphanum_fraction": 0.746835470199585, "avg_line_length": 25, "blob_id": "b92b2445a67d97f8247ecb100bae252623826154", "content_id": "991f067c759ec214526a4c046aac8a2c0878bf64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 59, "num_lines": 3, "path": "/Readme.md", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "# HRWROS course \n\n## This repo contains the solutions of HRWROS course on edx\n\n" }, { "alpha_fraction": 0.8585858345031738, "alphanum_fraction": 0.8585858345031738, "avg_line_length": 99, "blob_id": "c2d0cd590f54b4157abbbb9fb5c695e8fded9cf8", "content_id": "5e2ba708f50ad87ab92432444da0826db0673b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 99, "license_type": "no_license", "max_line_length": 99, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/ConveyorBeltControlRequest.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/ConveyorBeltControlRequest.h" }, { "alpha_fraction": 0.8526315689086914, "alphanum_fraction": 0.8526315689086914, "avg_line_length": 95, "blob_id": "83b55dd09d3a2b79c28bb6c01cb087b7c8208efb", "content_id": "89afbaebd098829a215e035bd7b487209e78e90f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 95, "license_type": "no_license", "max_line_length": 95, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/CounterWithDelayActionGoal.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/CounterWithDelayActionGoal.h" }, { "alpha_fraction": 0.8028169274330139, "alphanum_fraction": 0.8028169274330139, "avg_line_length": 71, "blob_id": "ba83223cbae804ef0bafdc1f2e70833d2e381d42", "content_id": "0e19e1627ddbec5b532672eeebe03bfa3d1b4cab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 71, "num_lines": 1, "path": "/hrwros_ws/devel/_setup_util.py", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/catkin_tools_prebuild/_setup_util.py" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 91, "blob_id": "88cdb47221bf7481a45033245e12d3f9b9ab8c12", "content_id": "df8004d11eb0bfbe92a5ce766c3e660441c20c4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 91, "license_type": "no_license", "max_line_length": 91, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/LogicalCameraImage.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/LogicalCameraImage.h" }, { "alpha_fraction": 0.8219178318977356, "alphanum_fraction": 0.8219178318977356, "avg_line_length": 73, "blob_id": "6f26ad47173e363a6601801bc47e6675a423caa1", "content_id": "39cbe5376762eb2d36913851b35191dcec9da241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 73, "license_type": "no_license", "max_line_length": 73, "num_lines": 1, "path": "/hrwros_ws/devel/local_setup.bash", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/catkin_tools_prebuild/local_setup.bash" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 63, "blob_id": "5e63cb34839dac4000e240c1976682fa76ce397f", "content_id": "715f93c1c894f189fe4191e0b345fcc57ddc1126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 63, "license_type": "no_license", "max_line_length": 63, "num_lines": 1, "path": "/hrwros_ws/devel/env.sh", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/catkin_tools_prebuild/env.sh" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 91, "blob_id": "d2ea5078d9c9c4656979e078a155725c6f4c2a2b", "content_id": "b0bb827a113e581654dc71d06cce71dd3a92b504", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 91, "license_type": "no_license", "max_line_length": 91, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/CounterWithDelayAction.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/CounterWithDelayAction.h" }, { "alpha_fraction": 0.8444444537162781, "alphanum_fraction": 0.8444444537162781, "avg_line_length": 90, "blob_id": "cf85aa270b4d390615317704e3a2f7a062b3cb22", "content_id": "67c0319f02cb9c1e8f89719ddb0cdb1cdd7703de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 90, "license_type": "no_license", "max_line_length": 90, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/ConveyorBeltState.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/ConveyorBeltState.h" }, { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8409090638160706, "avg_line_length": 88, "blob_id": "037abeba4d6ffdb1d260d387c7f52cdafedf33f2", "content_id": "3cd17418426191ff68f567b3380ef04caba103d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 88, "license_type": "no_license", "max_line_length": 88, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/ConvertMetresToFeet.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/ConvertMetresToFeet.h" }, { "alpha_fraction": 0.6953210234642029, "alphanum_fraction": 0.7007616758346558, "avg_line_length": 36.51020431518555, "blob_id": "4b5e36b7071cbe198966580e10f3cf214b9d882d", "content_id": "12377efb9625dc4cb7377430f7aba451d82cb47d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1838, "license_type": "no_license", "max_line_length": 112, "num_lines": 49, "path": "/hrwros_ws/src/hrwros/hrwros_week1/scripts/assignment2.py", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# This code has been adapted from the ROS Wiki ROS Service tutorials to the context\n# of this course.\n# (http://wiki.ros.org/ROS/Tutorials/WritingServiceClient%28python%29)\n\nimport sys\nimport rospy\nfrom hrwros_msgs.srv import ConvertMetresToFeet, ConvertMetresToFeetRequest, ConvertMetresToFeetResponse\nfrom hrwros_msgs.msg import boxHeightInformation\ndef metres_to_feet_client(x):\n rospy.loginfo(\"Requesting conversion of %4.2f m to feet\"%(x.box_height))\n # First wait for the service to become available.\n rospy.loginfo(\"Waiting for service...\")\n rospy.wait_for_service('metres_to_feet')\n ''' try:\n # Create a service proxy.\n metres_to_feet = rospy.ServiceProxy('metres_to_feet', ConvertMetresToFeet)\n\n # Call the service here.\n service_response = metres_to_feet(x)\n\n print(\"I only got here AFTER the service call was completed!\")\n\n # Return the response to the calling function.\n return service_response\n\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n '''\n return\nif __name__ == \"__main__\":\n\n # Initialize the client ROS node.\n rospy.init_node(\"metres_to_feet_client\", anonymous = False)\n\n # The distance to be converted to feet.\n rospy.Subscriber('box_height_info',boxHeightInformation,metres_to_feet_client)\n\n \n\n # Call the service client function.\n #service_response = metres_to_feet_client(dist_metres)\n\n # Process the service response and display log messages accordingly.\n # if(not service_response.success):\n # rospy.logerr(\"Conversion unsuccessful! Requested distance in metres should be a positive real number.\")\n #else:\n # rospy.loginfo(\"%4.2f(m) = %4.2f feet\"%(dist_metres, service_response.distance_feet))\n # rospy.loginfo(\"Conversion successful!\")\n" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 100, "blob_id": "a29211c6eaaa2d5621af4d448cb1d6b30ab52ebf", "content_id": "0fb3f951901409ae09155af8eadeb58c137e10c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 100, "license_type": "no_license", "max_line_length": 100, "num_lines": 1, "path": "/hrwros_ws/devel/share/hrwros_gazebo/cmake/hrwros_gazeboConfig.cmake", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/share/hrwros_gazebo/cmake/hrwros_gazeboConfig.cmake" }, { "alpha_fraction": 0.8600000143051147, "alphanum_fraction": 0.8600000143051147, "avg_line_length": 100, "blob_id": "a27ee84db01e778b785acfe4f5a5c273fe7ff22f", "content_id": "1ac52c1c105082986ad61e01ba6860c12f995cf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 100, "license_type": "no_license", "max_line_length": 100, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/ConveyorBeltControlResponse.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/ConveyorBeltControlResponse.h" }, { "alpha_fraction": 0.8153846263885498, "alphanum_fraction": 0.8153846263885498, "avg_line_length": 65, "blob_id": "a4085d53d9469afed1130b4c51d863a6a10a1a98", "content_id": "5f47a75f36e4b297e260d43445f4cb2dffd96188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 65, "license_type": "no_license", "max_line_length": 65, "num_lines": 1, "path": "/hrwros_ws/devel/setup.sh", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/catkin_tools_prebuild/setup.sh" }, { "alpha_fraction": 0.8426966071128845, "alphanum_fraction": 0.8426966071128845, "avg_line_length": 89, "blob_id": "7dd3b7f6c55dcef919a03ff3c5c0bb5b741935df", "content_id": "924142c46b64b0f05b63262861f79dd8e4f4eae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 89, "license_type": "no_license", "max_line_length": 89, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/boxHeightInformation.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/boxHeightInformation.h" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 91, "blob_id": "9efea7c0b2e4da5ec58fcdb68bf5e3e11c0c8e32", "content_id": "6b2cc334dbe1670e7d5845f7cc72a0e70bf9455a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 91, "license_type": "no_license", "max_line_length": 91, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/CounterWithDelayResult.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/CounterWithDelayResult.h" }, { "alpha_fraction": 0.8205128312110901, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 78, "blob_id": "0c5627e32e53687140e1f644f60e11e1482d3645", "content_id": "c3d4b88f3cd0c530ea63ba676ad69ab84f9b270f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 78, "license_type": "no_license", "max_line_length": 78, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_gazebo/Model.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_gazebo/include/hrwros_gazebo/Model.h" }, { "alpha_fraction": 0.6479230523109436, "alphanum_fraction": 0.6696941256523132, "avg_line_length": 55.0823974609375, "blob_id": "0301e134a33aa5be28dd12f6aa52c6deefe69e54", "content_id": "3d1ea1f61f87a3b251a554664ff997f69f558246", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14974, "license_type": "no_license", "max_line_length": 201, "num_lines": 267, "path": "/hrwros_ws/src/hrwros_factory_behaviors/flexbe_behaviors/src/flexbe_behaviors/final_project_sm.py", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###########################################################\n# WARNING: Generated code! #\n# ************************** #\n# Manual changes may get lost if file is generated again. #\n# Only code inside the [MANUAL] tags will be kept. #\n###########################################################\n\nfrom flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger\nfrom flexbe_manipulation_states.srdf_state_to_moveit import SrdfStateToMoveit as flexbe_manipulation_states__SrdfStateToMoveit\nfrom hrwros_factory_states.compute_grasp_state import ComputeGraspState\nfrom hrwros_factory_states.control_feeder_state import ControlFeederState\nfrom hrwros_factory_states.vacuum_gripper_control_state import VacuumGripperControlState\nfrom hrwros_factory_states.locate_factory_device_state import LocateFactoryDeviceState\nfrom flexbe_states.subscriber_state import SubscriberState\nfrom hrwros_factory_states.moveit_to_joints_dyn_state import MoveitToJointsDynState as hrwros_factory_states__MoveitToJointsDynState\nfrom hrwros_factory_states.set_conveyor_power_state import SetConveyorPowerState\nfrom hrwros_factory_states.move_base_state import MoveBaseState as hrwros_factory_states__MoveBaseState\nfrom hrwros_factory_states.detect_part_camera_state import DetectPartCameraState\n# Additional imports can be added inside the following tags\n# [MANUAL_IMPORT]\nfrom geometry_msgs.msg import Pose2D\n\n# [/MANUAL_IMPORT]\n\n\n'''\nCreated on @author: you\n@author: you\n'''\nclass FinalProjectSM(Behavior):\n\t'''\n\tFinal project for the MOOC Hello (Real) World with ROS\nThe three robots in the factory move to process the parts\n\t'''\n\n\n\tdef __init__(self):\n\t\tsuper(FinalProjectSM, self).__init__()\n\t\tself.name = 'Final Project'\n\n\t\t# parameters of this behavior\n\n\t\t# references to used behaviors\n\n\t\t# Additional initialization code can be added inside the following tags\n\t\t# [MANUAL_INIT]\n\t\t\n\t\t# [/MANUAL_INIT]\n\n\t\t# Behavior comments:\n\n\n\n\tdef create(self):\n\t\tnames1 = ['robot1_shoulder_pan_joint', 'robot1_shoulder_lift_joint', 'robot1_elbow_joint', 'robot1_wrist_1_joint', 'robot1_wrist_2_joint', 'robot1_wrist_3_joint']\n\t\tpick1_group = 'robot1'\n\t\trobot1_loc = Pose2D(x=3.8, y=2.1, theta=-90.0)\n\t\tgripper1 = \"vacuum_gripper1_suction_cup\"\n\t\tpick2_group = 'robot2'\n\t\trobot2_loc = Pose2D(x=-4.3,y=-0.9,theta=0.0)\n\t\tnames2 = ['robot2_shoulder_pan_joint', 'robot2_shoulder_lift_joint', 'robot2_elbow_joint', 'robot2_wrist_1_joint', 'robot2_wrist_2_joint', 'robot2_wrist_3_joint']\n\t\tgripper2 = \"vacuum_gripper2_suction_cup\"\n\t\t# x:120 y:87, x:594 y:171\n\t\t_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])\n\t\t_state_machine.userdata.part_pose = []\n\t\t_state_machine.userdata.robot1_loc = robot1_loc\n\t\t_state_machine.userdata.pose_turtlebot = []\n\t\t_state_machine.userdata.pick1_configuration = []\n\t\t_state_machine.userdata.place1_configuration = []\n\t\t_state_machine.userdata.speed = 100\n\t\t_state_machine.userdata.robot2_loc = robot2_loc\n\t\t_state_machine.userdata.pick2_configuration = []\n\t\t_state_machine.userdata.place2_configuration = []\n\n\t\t# Additional creation code can be added inside the following tags\n\t\t# [MANUAL_CREATE]\n\t\t\n\t\t# [/MANUAL_CREATE]\n\n\n\t\twith _state_machine:\n\t\t\t# x:30 y:30\n\t\t\tOperatableStateMachine.add('Move R1 Home',\n\t\t\t\t\t\t\t\t\t\tflexbe_manipulation_states__SrdfStateToMoveit(config_name='R1Home', move_group=pick1_group, action_topic='/move_group', robot_name=''),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'conveyor start', 'planning_failed': 'failed', 'control_failed': 'failed', 'param_error': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off, 'param_error': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'config_name': 'config_name', 'move_group': 'move_group', 'robot_name': 'robot_name', 'action_topic': 'action_topic', 'joint_values': 'joint_values', 'joint_names': 'joint_names'})\n\n\t\t\t# x:1176 y:95\n\t\t\tOperatableStateMachine.add('Compute pick',\n\t\t\t\t\t\t\t\t\t\tComputeGraspState(group=pick1_group, offset=0.0, joint_names=names1, tool_link=gripper1, rotation=3.1415),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Activate Gripper 1', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'part_pose', 'joint_values': 'pick1_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:443 y:48\n\t\t\tOperatableStateMachine.add('Start feeder',\n\t\t\t\t\t\t\t\t\t\tControlFeederState(activation=True),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'Wait for part', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:789 y:5\n\t\t\tOperatableStateMachine.add('Stop feeder',\n\t\t\t\t\t\t\t\t\t\tControlFeederState(activation=False),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'conveyor stop', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:950 y:130\n\t\t\tOperatableStateMachine.add('Activate Gripper 1',\n\t\t\t\t\t\t\t\t\t\tVacuumGripperControlState(enable=True, service_name='/gripper1/control'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move R1 to pick', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:1153 y:438\n\t\t\tOperatableStateMachine.add('Compute place Turtlebot',\n\t\t\t\t\t\t\t\t\t\tComputeGraspState(group=pick1_group, offset=0.6, joint_names=names1, tool_link=gripper1, rotation=3.1415),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move R1 to place', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'pose_turtlebot', 'joint_values': 'place1_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:1150 y:372\n\t\t\tOperatableStateMachine.add('LocateTurtlebot',\n\t\t\t\t\t\t\t\t\t\tLocateFactoryDeviceState(model_name='mobile_base', output_frame_id='world'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'Compute place Turtlebot', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'pose_turtlebot'})\n\n\t\t\t# x:1129 y:236\n\t\t\tOperatableStateMachine.add('Move R1 back Home',\n\t\t\t\t\t\t\t\t\t\tflexbe_manipulation_states__SrdfStateToMoveit(config_name='R1Home', move_group=pick1_group, action_topic='/move_group', robot_name=''),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'turtle to robot1', 'planning_failed': 'failed', 'control_failed': 'failed', 'param_error': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off, 'param_error': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'config_name': 'config_name', 'move_group': 'move_group', 'robot_name': 'robot_name', 'action_topic': 'action_topic', 'joint_values': 'joint_values', 'joint_names': 'joint_names'})\n\n\t\t\t# x:77 y:382\n\t\t\tOperatableStateMachine.add('Move R2 back to Home',\n\t\t\t\t\t\t\t\t\t\tflexbe_manipulation_states__SrdfStateToMoveit(config_name='R2Home', move_group=pick2_group, action_topic='/move_group', robot_name=''),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'Move R2 to bin', 'planning_failed': 'failed', 'control_failed': 'failed', 'param_error': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off, 'param_error': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'config_name': 'config_name', 'move_group': 'move_group', 'robot_name': 'robot_name', 'action_topic': 'action_topic', 'joint_values': 'joint_values', 'joint_names': 'joint_names'})\n\n\t\t\t# x:629 y:17\n\t\t\tOperatableStateMachine.add('Wait for part',\n\t\t\t\t\t\t\t\t\t\tSubscriberState(topic='/break_beam_sensor_change', blocking=True, clear=True),\n\t\t\t\t\t\t\t\t\t\ttransitions={'received': 'Stop feeder', 'unavailable': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'message': 'message'})\n\n\t\t\t# x:1119 y:168\n\t\t\tOperatableStateMachine.add('Move R1 to pick',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveitToJointsDynState(move_group=pick1_group, offset=0.0, tool_link=gripper1, action_topic='/move_group'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'Move R1 back Home', 'planning_failed': 'failed', 'control_failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'joint_values': 'pick1_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:1145 y:504\n\t\t\tOperatableStateMachine.add('Move R1 to place',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveitToJointsDynState(move_group=pick1_group, offset=0.0, tool_link=gripper1, action_topic='/move_group'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'deactivate Gripper 1', 'planning_failed': 'failed', 'control_failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'joint_values': 'place1_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:251 y:10\n\t\t\tOperatableStateMachine.add('conveyor start',\n\t\t\t\t\t\t\t\t\t\tSetConveyorPowerState(stop=False),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'Start feeder', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'speed': 'speed'})\n\n\t\t\t# x:957 y:12\n\t\t\tOperatableStateMachine.add('conveyor stop',\n\t\t\t\t\t\t\t\t\t\tSetConveyorPowerState(stop=True),\n\t\t\t\t\t\t\t\t\t\ttransitions={'succeeded': 'Detect Part Camera', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'succeeded': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'speed': 'speed'})\n\n\t\t\t# x:1158 y:304\n\t\t\tOperatableStateMachine.add('turtle to robot1',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveBaseState(),\n\t\t\t\t\t\t\t\t\t\ttransitions={'arrived': 'LocateTurtlebot', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'arrived': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'waypoint': 'robot1_loc'})\n\n\t\t\t# x:967 y:534\n\t\t\tOperatableStateMachine.add('deactivate Gripper 1',\n\t\t\t\t\t\t\t\t\t\tVacuumGripperControlState(enable=False, service_name='/gripper1/control'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move R1 back HomeD', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:782 y:531\n\t\t\tOperatableStateMachine.add('Move R1 back HomeD',\n\t\t\t\t\t\t\t\t\t\tflexbe_manipulation_states__SrdfStateToMoveit(config_name='R1Home', move_group=pick1_group, action_topic='/move_group', robot_name=''),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'turtle to robot2', 'planning_failed': 'failed', 'control_failed': 'failed', 'param_error': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off, 'param_error': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'config_name': 'config_name', 'move_group': 'move_group', 'robot_name': 'robot_name', 'action_topic': 'action_topic', 'joint_values': 'joint_values', 'joint_names': 'joint_names'})\n\n\t\t\t# x:599 y:526\n\t\t\tOperatableStateMachine.add('turtle to robot2',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveBaseState(),\n\t\t\t\t\t\t\t\t\t\ttransitions={'arrived': 'Detect Part Camera 2', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'arrived': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'waypoint': 'robot2_loc'})\n\n\t\t\t# x:1148 y:10\n\t\t\tOperatableStateMachine.add('Detect Part Camera',\n\t\t\t\t\t\t\t\t\t\tDetectPartCameraState(ref_frame='robot1_base', camera_topic='/hrwros/logical_camera_1', camera_frame='logical_camera_1_frame'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Compute pick', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'part_pose'})\n\n\t\t\t# x:423 y:518\n\t\t\tOperatableStateMachine.add('Detect Part Camera 2',\n\t\t\t\t\t\t\t\t\t\tDetectPartCameraState(ref_frame='robot2_base', camera_topic='/hrwros/logical_camera_2', camera_frame='logical_camera_2_frame'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Compute pick_2', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'part_pose'})\n\n\t\t\t# x:276 y:544\n\t\t\tOperatableStateMachine.add('Compute pick_2',\n\t\t\t\t\t\t\t\t\t\tComputeGraspState(group=pick2_group, offset=0.0, joint_names=names2, tool_link=gripper2, rotation=3.1415),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Activate Gripper 2', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'pose': 'part_pose', 'joint_values': 'pick2_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:79 y:540\n\t\t\tOperatableStateMachine.add('Activate Gripper 2',\n\t\t\t\t\t\t\t\t\t\tVacuumGripperControlState(enable=True, service_name='/gripper2/control'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move R2 to pick', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:79 y:473\n\t\t\tOperatableStateMachine.add('Move R2 to pick',\n\t\t\t\t\t\t\t\t\t\thrwros_factory_states__MoveitToJointsDynState(move_group=pick2_group, offset=0.0, tool_link=gripper2, action_topic='/move_group'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'Move R2 back to Home', 'planning_failed': 'failed', 'control_failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'joint_values': 'pick2_configuration', 'joint_names': 'joint_names'})\n\n\t\t\t# x:75 y:284\n\t\t\tOperatableStateMachine.add('Move R2 to bin',\n\t\t\t\t\t\t\t\t\t\tflexbe_manipulation_states__SrdfStateToMoveit(config_name='R2Place', move_group=pick2_group, action_topic='/move_group', robot_name=''),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'deActivate Gripper 2', 'planning_failed': 'failed', 'control_failed': 'failed', 'param_error': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off, 'param_error': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'config_name': 'config_name', 'move_group': 'move_group', 'robot_name': 'robot_name', 'action_topic': 'action_topic', 'joint_values': 'joint_values', 'joint_names': 'joint_names'})\n\n\t\t\t# x:99 y:193\n\t\t\tOperatableStateMachine.add('deActivate Gripper 2',\n\t\t\t\t\t\t\t\t\t\tVacuumGripperControlState(enable=False, service_name='/gripper2/control'),\n\t\t\t\t\t\t\t\t\t\ttransitions={'continue': 'Move R2 back to Home_2', 'failed': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})\n\n\t\t\t# x:172 y:120\n\t\t\tOperatableStateMachine.add('Move R2 back to Home_2',\n\t\t\t\t\t\t\t\t\t\tflexbe_manipulation_states__SrdfStateToMoveit(config_name='R2Home', move_group=pick2_group, action_topic='/move_group', robot_name=''),\n\t\t\t\t\t\t\t\t\t\ttransitions={'reached': 'Move R1 Home', 'planning_failed': 'failed', 'control_failed': 'failed', 'param_error': 'failed'},\n\t\t\t\t\t\t\t\t\t\tautonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off, 'param_error': Autonomy.Off},\n\t\t\t\t\t\t\t\t\t\tremapping={'config_name': 'config_name', 'move_group': 'move_group', 'robot_name': 'robot_name', 'action_topic': 'action_topic', 'joint_values': 'joint_values', 'joint_names': 'joint_names'})\n\n\n\t\treturn _state_machine\n\n\n\t# Private functions can be added inside the following tags\n\t# [MANUAL_FUNC]\n\t\n\t# [/MANUAL_FUNC]\n" }, { "alpha_fraction": 0.8556700944900513, "alphanum_fraction": 0.8556700944900513, "avg_line_length": 97, "blob_id": "3017245cf5c751535b6ad78e60733aff2d1944d8", "content_id": "17340d71fcec9e4bdb2cd98053203e71c582fbd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 97, "license_type": "no_license", "max_line_length": 97, "num_lines": 1, "path": "/hrwros_ws/devel/include/hrwros_msgs/CounterWithDelayActionResult.h", "repo_name": "abubakrsiddq/HRWROS", "src_encoding": "UTF-8", "text": "/home/abu/hrwros_ws/devel/.private/hrwros_msgs/include/hrwros_msgs/CounterWithDelayActionResult.h" } ]
28
AldanaVallejos/Aldana-Vallejos
https://github.com/AldanaVallejos/Aldana-Vallejos
eb57cb9ce20fac4ce930bb3652ce0ff14eb92cf2
fa580db44a14cdbb75bc800d96643704cd372080
948fd11018f85f6c0e243db099880c8b41032b97
refs/heads/main
2023-07-31T06:42:27.395980
2023-03-05T23:33:12
2023-03-05T23:33:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.36434781551361084, "alphanum_fraction": 0.39739128947257996, "avg_line_length": 34, "blob_id": "72a07e35516a387d34787214e56e93d381fc74cf", "content_id": "421aea63a2f6782afd7b43e6b7403df97e9b5d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 88, "num_lines": 32, "path": "/Guia 2 Ej 8.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 8: Dados un mes y el año correspondiente informar cuantos días tiene el mes.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: mes, año\r\n# Salidas: 30 dias, 31 dias, 28 dias\r\n# Procesos: Ingresar mes\r\n# Ingresar año\r\n# Determinar cuantos dias tiene el mes\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nmes=0\r\naño=0\r\n\r\n# Ingreso de datos\r\naño=int(input(\"Ingrese el año: \"))\r\nmes=int(input(\"Ingrese el mes: \"))\r\n\r\nif mes==9 or mes==4 or mes==6 or mes==11:\r\n print(\"El mes tiene 30 días\")\r\nelse:\r\n if mes==1 or mes==3 or mes==5 or mes==7 or mes==8 or mes==10 or mes==12:\r\n print(\"El mes tiene 31 días\")\r\n else:\r\n print(\"El mes tiene 28 días\")" }, { "alpha_fraction": 0.3594040870666504, "alphanum_fraction": 0.3854748606681824, "avg_line_length": 39.230770111083984, "blob_id": "1bd802e19e8b68d0de95cd01b674003cf8a93673", "content_id": "325bf3533f9bde488e7778505aa6d0169ee8f453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 71, "num_lines": 26, "path": "/EJ9.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 30/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 9: Escribir un programa que utilice la función\r\n# anterior para generar una tabla de conversión\r\n# de temperaturas, desde 0 °F hasta 120 °F, de 10 en 10. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: \r\n# Salidas: \r\n# Procesos: Definir una función que termine en la ecuación prevista\r\n# Muestro por pantalla las tablas\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n\r\n# Defino la función\r\ndef ecuacion(c):\r\n return 9/5 * c + 32\r\n#Salida por pantalla\r\nprint (\"Grados Fahrenheit // Grados Celsius\")\r\nfor grados in range(0,121,10):\r\n print (\" \",grados,\" \",ecuacion(grados))\r\n\r\n" }, { "alpha_fraction": 0.38727524876594543, "alphanum_fraction": 0.4038727581501007, "avg_line_length": 30.893939971923828, "blob_id": "031f7c432e507b085b7036760b26c07f8aa68161", "content_id": "1251ac38c11733fdca93f39d5bbe9a4d2621e59a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2185, "license_type": "no_license", "max_line_length": 76, "num_lines": 66, "path": "/Guia 2 EJ 14.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 08/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 14: Suponiendo que el primer día del año fue lunes,\r\n# escribir un programa que reciba un número con el\r\n# día del año (de 1 a 366) y devuelva el día de la\r\n# semana que le toca. Por ejemplo: si recibe '3' debe\r\n# devolver 'miércoles', si recibe '9' debe devolver 'martes'. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: dia, resto, entero\r\n# Salidas: lunes, martes, miércoles, jueves, viernes, sabado, domingo\r\n# Procesos: Ingreso el dia\r\n# Planteo una division para sacar el resto -> dia%7\r\n# Si resto=0 print \"domingo\"\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\ndia=0\r\nresto=0\r\nentero=0\r\n\r\n# Ingreso de dato\r\ndia=int(input(\"Ingrese el número del día: \"))\r\n#Calculo el resto de la division por 7\r\nresto=dia%7\r\n\r\nif resto==1:\r\n # Salida por pantalla\r\n print(\"Lunes\")\r\nelse:\r\n if resto==2:\r\n # Salida por pantalla\r\n print(\"Martes\")\r\n else:\r\n if resto==3:\r\n # Salida por pantalla\r\n print(\"Miércoles\")\r\n else:\r\n if resto==4:\r\n # Salida por pantalla\r\n print(\"Jueves\")\r\n else:\r\n if resto==5:\r\n # Salida por pantalla\r\n print(\"Viernes\")\r\n else:\r\n if resto==6:\r\n # Salida por pantalla\r\n print(\"Sábado\")\r\n else:\r\n if resto==7:\r\n # Salida por pantalla\r\n print(\"Domingo\")\r\n\r\n\r\nif resto==0:\r\n entero=dia//7\r\n\r\n if 1<=entero<=52:\r\n # Salida por pantalla\r\n print(\"Domingo\")" }, { "alpha_fraction": 0.3959670066833496, "alphanum_fraction": 0.410632461309433, "avg_line_length": 32.15625, "blob_id": "698f2a9d1eff6d1a2ef714cee0f56a72b935c6fa", "content_id": "2b91b647523fdc2a70703524641e83f8741cfd4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1099, "license_type": "no_license", "max_line_length": 87, "num_lines": 32, "path": "/G3 ej2.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 13/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 2: Dados N y M números naturales, informar su producto por sumas sucesivas.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: N, M\r\n# Salidas: producto\r\n# Procesos: Ingresar numero N\r\n# Ingresar numero M\r\n# Determinar que sean numeros naturales\r\n# Calcular su producto -> producto=\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nN=0\r\nM=0\r\nproducto=0\r\n\r\nN=int(input(\"Ingrese el primer número: \"))\r\nM=int(input(\"Ingrese el segundo número: \"))\r\n\r\nwhile M!=0:\r\n producto=producto+N\r\n M=M-1\r\n\r\n# Salida por pantalla\r\nprint(\"El producto entre los dos números ingresados es {0}\".format(producto))" }, { "alpha_fraction": 0.5782101154327393, "alphanum_fraction": 0.5972762703895569, "avg_line_length": 35.27536392211914, "blob_id": "768fa2468d5034149b2895e499c0fbe07e1c152e", "content_id": "8bcdf9cff7fc3bb7dd0989c8d64348dbfcc4d9db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2583, "license_type": "no_license", "max_line_length": 107, "num_lines": 69, "path": "/G3 ej4.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 15/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 4: En un torneo de fútbol participan K equipos. El torneo se juega\r\n# con el sistema de todos contra todos. Por cada partido disputado\r\n# por un equipo se dispone de la siguiente información :\r\n# a)\tNro. de equipo,\r\n# b)\tCódigo del resultado ('P'= Perdido, 'E'= Empatado, 'G'= Ganado).\r\n# Se arma un lote de datos con todos los resultados del torneo, agrupados por Nro. de equipo.\r\n# Desarrollar el programa que imprima:\r\n# 1) Por cada equipo, su número y el puntaje total que obtuvo (suma 3 si gana, y 1 si empata).\r\n# 2) Nro. de equipo que totalizó la menor cantidad de puntos. (hay solo uno)\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nK=-1\r\ncontinuar=\"S\"\r\nnro=0\r\nresultado=\"\"\r\npuntaje=0\r\npuntajetotal=0\r\npuntajeganados=0\r\npuntajeempatados=0\r\nminimo=10000000000\r\n\r\nwhile K<0: #Validaciones\r\n\ttry:\r\n\t\tK=int(input(\"Ingrese la cantidad de equipos: \")) #Ingreso de dato\r\n\t\tif K<0:\r\n\t\t\tprint(\"Error. Ingrese número mayor a 0\")\r\n\t\telse:\r\n\t\t\tbreak\r\n\texcept ValueError:\r\n\t\tprint(\"Error. Tenés que ingresar una cantidad de equipos\")\r\n\r\nwhile K>0:\r\n\tK=K-1\r\n\tnro=int(input(\"Ingrese el numero de equipo: \")) #Ingreso de dato\r\n\tresultado=input(\"Ingrese el resultado (G E o P): \") #Ingreso de dato\r\n\r\n\tif resultado==\"G\": #Caso de resultado=ganado\r\n\t\tpuntajeganados=puntajeganados+3\r\n\t\tprint(\"El puntaje del equipo {0} es {1}\".format(nro,puntajeganados)) #Salida por pantalla\r\n\telse:\r\n\t\tif resultado==\"E\": #Caso de resultado=empatado\r\n\t\t\tpuntajeempatados=puntajeempatados+1\r\n\t\t\tprint(\"El puntaje del equipo {0} es {1}\".format(nro,puntajeempatados)) #Salida por pantalla\r\n\t\telse:\r\n\t\t\tif resultado==\"P\": #Caso de resultado=perdido\r\n\t\t\t\tpuntaje=0\r\n\t\t\t\tprint(\"El puntaje del equipo {0} es 0\".format(nro)) #Salida por pantalla\r\n\t\t\telse:\r\n\t\t\t\tprint(\"ERROR - Ingrese un resultado válido\")\r\n\t\t\t\tresultado=input(\"Ingrese el resultado: \")\r\n\t\r\n\tif nro>=0:\r\n\t\tif nro<minimo:\r\n\t\t\tminimo=nro\r\n\r\n\tif K>0:\r\n\t\tif puntajetotal<minimo:\r\n\t\t\t minimo=puntajetotal\r\n\r\n#Salidas por pantalla\r\nprint(\"La cantidad de equipos ingresados fueron: {0}\".format(nro, puntajetotal))\r\nprint(\"El equipo que totalizó la menor cantidad de puntos fue el: {0}\".format(nro))" }, { "alpha_fraction": 0.4181002378463745, "alphanum_fraction": 0.4293193817138672, "avg_line_length": 34.86111068725586, "blob_id": "e353dd71eb90861b6b10478fd1ae4d5843d62ba3", "content_id": "59429416a80b4d48907b4c73afb2741c751b28b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1345, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/EJ14.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 02/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 14: Modificar el programa anterior para que pueda generar\r\n# fichas de un juego que puede tener números de 0 a n\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: cantidad, continuar\r\n# Salidas: cantidad\r\n# Procesos: Ingreso una cantidad de fichas\r\n# Compruebo que sea un valor entero\r\n# Muestro los números por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\ncantidad=0\r\ncontinuar=\"S\"\r\n\r\n# Creo un ciclo de repetición\r\nwhile continuar==\"S\":\r\n if continuar==\"S\":\r\n # Ingreso de dato\r\n print(\"Ingrese una cantidad de fichas \")\r\n cantidad=int(input())\r\n continuar=\"N\"\r\n else:\r\n # Muestro un mensaje de error\r\n print(\"Ingrese únicamente valores enteros\\n\")\r\nfor n in range (cantidad+1):\r\n for x in range (n,cantidad+1):\r\n #Salida por pantalla\r\n print(n,x)\r\n\r\n " }, { "alpha_fraction": 0.5219106078147888, "alphanum_fraction": 0.5376862287521362, "avg_line_length": 36.64406967163086, "blob_id": "2d6d9ca730db6b1812b06e1bc57b625cb6a0f64f", "content_id": "c9d5dc5f367c7ec43724fd4759ee709fa43ee0d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2290, "license_type": "no_license", "max_line_length": 75, "num_lines": 59, "path": "/Guia 2 Ej 6.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 6: Dadas dos fechas informar cual es la más reciente.\r\n# Determine cuales serían los datos de entrada y las \r\n# leyendas a informar de acuerdo al proceso solicitado. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: fecha1, fecha2, fechaactual, diferencia, diferencia2, continuar\r\n# Salidas: la mas reciente\r\n# Procesos: Ingresar fechaactual\r\n# Ingresar fecha1\r\n# Ingresar fecha2\r\n# Validar que sean fechas pasadas\r\n# Validar que no sean fechas iguales\r\n# Calculo las diferencias -> diferencia=fechaactual-fecha1\r\n# diferencia2=fechaactual-fecha2\r\n# Muestro el resultado en pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nfechaactual=0\r\nfecha1=0\r\nfecha2=0\r\ncontinuar=\"S\"\r\ndiferencia=0\r\ndiferencia2=0\r\n\r\nwhile continuar==\"S\":\r\n # Ingreso de datos\r\n fechaactual=int(input(\"Ingrese la fecha actual en orden (AAAAMMDD): \"))\r\n fecha1=int(input(\"Ingrese la primera fecha en orden (AAAAMMDD): \"))\r\n fecha2=int(input(\"Ingrese la segunda fecha en orden (AAAAMMDD): \"))\r\n \r\n # Verifico que no sean fechas futuras a la actual\r\n if fecha1>fechaactual or fecha2>fechaactual:\r\n # Salida por pantalla\r\n print(\"Error - Ingrese una fecha pasada a la actual\")\r\n \r\n # Verifico que no sean fechas iguales\r\n if fecha1==fecha2:\r\n # Salida por pantalla\r\n print(\"Error - Ingrese fechas distintas\")\r\n continuar=\"N\"\r\n\r\n# Calculo la diferencia entre la fecha actual menos las fechas\r\ndiferencia=fechaactual-fecha1\r\ndiferencia2=fechaactual-fecha2\r\n\r\nif diferencia<diferencia2:\r\n # Salida por pantalla\r\n print(\"La primera fecha es la más reciente\")\r\nelse:\r\n # Salida por pantalla\r\n print(\"La segunda fecha es la más reciente\")\r\n\r\n" }, { "alpha_fraction": 0.4960739016532898, "alphanum_fraction": 0.5117782950401306, "avg_line_length": 31.581396102905273, "blob_id": "ab0f8fa83b0c39a37f2a17e098c338381e361b2e", "content_id": "b4afd8e4f0f84129b23b801901675021bdc73450", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4349, "license_type": "no_license", "max_line_length": 90, "num_lines": 129, "path": "/Guia 2 EJ 13.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 08/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 13: Escribir un algoritmo que permita encontrar:\r\n# a) El máximo o mínimo de un polinomio de segundo grado\r\n# (dados los coeficientes a, b y c), indicando si es un máximo o un mínimo. \r\n# b) Las raíces (reales) de un polinomio de segundo grado. Nota: validar\r\n# que las operaciones puedan efectuarse antes de realizarlas (no dividir\r\n# por cero, ni calcular la raíz de un número negativo). \r\n# c) La intersección de dos rectas (dadas las pendientes y ordenada\r\n# al origen de cada recta). Nota: validar que no sean dos rectas con\r\n# la misma pendiente, antes de efectuar la operación.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: a, b, c, x, y\r\n# Salidas: v\r\n# Procesos: a) Ingresar los coeficientes a, b y c\r\n# Calcular el valor de x -> x=-(b)/(2*a)\r\n# Calcular el valor de y -> y=(c)-((b)*2)/(4*(a))\r\n# Calcular el vértice -> v=[x, y]\r\n# Mostrar tiene un minimo (si a es positiva) en v=[x, y]\r\n# Mostrar tiene un máximo (si a es negativa) en v=[x, y]\r\n#\r\n# b) Ingresar los coeficientes a, b y c\r\n# Calcular r1=b*2-4*a*c\r\n# Verificar que no de 0 ni -\r\n# Calcular x=(-b+pow(r1))/ 2*a -> verificar que no de 0\r\n# Calcular y=(-b-pow(r1))/ 2*a\r\n# raices=[x, y]\r\n# Mostrar (\"Las raices del polinomio son\", (raices))\r\n#\r\n# c) Ingresar los coeficientes a, b y c\r\n# Ingresar los coeficientes d, e y f\r\n# Calculo el determinante -> determinante=a*e - b*d\r\n# Calculo los sistemas de ecuacion -> x=(c*e - b*f) / determinante\r\n# y= (a*f - c*d) / determinante\r\n# Mostrar la intersección por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\n# Calcular [x, y]=(-b+-pow((b)*2-4*a*c))/2*a\r\na=0\r\nb=0\r\nc=0\r\nv=0\r\nx1=0\r\nx2=0\r\nr1=0\r\nd=0\r\ne=0\r\nf=0\r\ndeterminante=0\r\n\r\n#a)\r\n# Ingreso de datos\r\na=int(input(\"Ingrese el primer coeficiente (a): \"))\r\nb=int(input(\"Ingrese el segundo coeficiente (b): \"))\r\nc=int(input(\"Ingrese el tercer coeficiente (c): \"))\r\n\r\n# Calculo el valor de x\r\nx=-(b)/(2*a)\r\n\r\n# Calculo el valor de y\r\ny=(c)-((b)*2)/(4*(a))\r\n\r\n# Vértice\r\nv=[x, y]\r\n\r\n# Determino si es un punto maximo o minimo\r\nif a<0:\r\n print(\"Tiene un máximo en\", v)\r\nelse:\r\n if a>0:\r\n print(\"Tiene un minimo en\", v)\r\n\r\n#b)\r\n# Ingreso de datos\r\na=int(input(\"Ingrese el primer coeficiente (a): \"))\r\nb=int(input(\"Ingrese el segundo coeficiente (b): \"))\r\nc=int(input(\"Ingrese el tercer coeficiente (c): \"))\r\n\r\n# Realizo formula de bhaskara\r\nr1=(b)*2-(4*(a)*(c))\r\n\r\n# Verifico que no de 0 ni -\r\nif r1==0 or r1<0:\r\n print(\"ERROR - INGRESE OTROS NUMEROS\")\r\n\r\n# Calculo x=(-b+pow(r1))/ 2*a -> verificar que no de 0\r\nimport math\r\nx1=(-b)+math.sqrt((r1)/ 2*(a))\r\n\r\nif a==0:\r\n print(\"ERROR - INGRESE OTROS NUMEROS\")\r\n\r\n# Calculo y=(-b-pow(r1))/ 2*a\r\nimport math\r\nx2=(-b)-math.sqrt((r1)/ 2*(a))\r\n\r\nprint(\"Las raices del polinomio son\", (x1,x2))\r\n\r\n#c)\r\n# Ingreso de datos\r\na=int(input(\"Ingrese el primer coeficiente (a): \"))\r\nb=int(input(\"Ingrese el segundo coeficiente (b): \"))\r\nc=int(input(\"Ingrese el tercer coeficiente (c): \"))\r\nd=int(input(\"Ingrese el coeficiente (d): \"))\r\ne=int(input(\"Ingrese el coeficiente (e): \"))\r\nf=int(input(\"Ingrese el coeficiente (f): \"))\r\n\r\ndef interseccion(a, b, c, d, e, f):\r\n # Calculo el determinante\r\n determinante=a*e - b*d\r\n\r\n if determinante!=0:\r\n # Calculo los sistemas de ecuacion\r\n x=(c*e - b*f) / determinante\r\n y= (a*f - c*d) / determinante\r\n return x,y\r\n else:\r\n # Salida por pantalla\r\n return \"ERROR INGRESE OTROS NÚMEROS\"\r\n\r\n# Salida por pantalla\r\nprint(interseccion(a, b, c, d, e, f))" }, { "alpha_fraction": 0.3210010826587677, "alphanum_fraction": 0.36670294404029846, "avg_line_length": 39.681819915771484, "blob_id": "8b7d7d8aa76750451ebb56abd71ad6e8b19d5d2f", "content_id": "63a643adec3822559df7e220382f8af75d5090fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 927, "license_type": "no_license", "max_line_length": 77, "num_lines": 22, "path": "/ej5.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 5: Escribir un programa que realice la siguiente\r\n# operación aritmética (3+22.5)2(3+22.5)2 \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: operacion\r\n# Salidas: resultado\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\noperacion=\"(3+22.5)2(3+22.5)2\"\r\nresultado=(3+22*5)*2*(3+22*5)*2\r\n\r\n#Salida por pantalla\r\nprint(\"Programa para realizar la operación aritmética:{0}\".format(operacion))\r\nprint(\"Resultado:{0}\".format(resultado))\r\n\r\n" }, { "alpha_fraction": 0.3880341947078705, "alphanum_fraction": 0.41025641560554504, "avg_line_length": 31.02857208251953, "blob_id": "265a4bc0a7db4dac32cd5c5c59e05e85637895e0", "content_id": "4520f0e016f3ea9caa8992d8e2fa9e7a540e1d33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 67, "num_lines": 35, "path": "/EJ10.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 01/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 10: Escribir un programa que imprima todos los números\r\n# pares entre dos números que se le pidan al usuario.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: numero1, numero2\r\n# Salidas: pares\r\n# Procesos: Ingresar numero1\r\n# Ingresar numero2\r\n# Crear un bucle for\r\n# Mostrar los pares por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumero1=0\r\nnumero2=0\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el primer número\")\r\nnumero1=int(input())\r\n\r\nprint(\"Ingrese el segundo número\")\r\nnumero2=int(input())\r\n\r\n#Creo un bucle for\r\nfor pares in range(numero1,numero2+1):\r\n if pares%2==0:\r\n #Salida por pantalla\r\n print (pares)\r\n\r\n " }, { "alpha_fraction": 0.4682495892047882, "alphanum_fraction": 0.493097722530365, "avg_line_length": 32.150943756103516, "blob_id": "01d923cabcf2023fb7c14bd351e4e0e560d35e1a", "content_id": "b7d2b6429aaf5ac3c4feeabc15275984f043ee19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1830, "license_type": "no_license", "max_line_length": 183, "num_lines": 53, "path": "/EJ5 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 29/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 5: Implementar algoritmos que resuelvan los siguientes problemas:\r\n# a) Dados dos números, imprimir la suma, resta, división y\r\n# multiplicación de ambos. \r\n# b) Dado un número entero n, imprimir su tabla de multiplicar.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: a) numero1 , numero2\r\n# b) n\r\n#\r\n# Salidas: a) suma, resta, división, multiplicación\r\n# b) tabla\r\n#\r\n# Procesos: 1) Ingresar numero1\r\n# 2) Ingresar numero2\r\n# 3) Calcular la suma, resta, división y multiplicación\r\n# 4) Mostrar por pantalla\r\n# 5) Ingresar un número entero\r\n# 6) Mostrar su tabla de multiplicar\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumero1=0.00\r\nnumero2=0.00\r\nn=0\r\n\r\n#Declaración de constantes\r\ndesde=1\r\nhasta=10\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el primer número\")\r\nnumero1=float(input())\r\n\r\nprint(\"Ingrese el segundo número\")\r\nnumero2=float(input())\r\n\r\n#Salida por pantalla\r\nprint(\"La suma entre\",numero1, \"y\",numero2, \"es\", (numero1+numero2), \"la resta es\",(numero1-numero2), \"la división es\", (numero1/numero2), \"y la multiplicación es\", (numero1*numero2))\r\n\r\n#Ingreso de dato\r\nprint(\"Ingrese un nímero entero\")\r\nn=int(input())\r\n\r\n#Calculo la tabla de multiplicar\r\nfor f in range(desde, hasta + 1):\r\n\tprint(f'{n} x {f} = {n * f}')\r\n\t" }, { "alpha_fraction": 0.4555555582046509, "alphanum_fraction": 0.4748148024082184, "avg_line_length": 37.764705657958984, "blob_id": "2e5930bfcb19ede7a27aa1a64b7fd1765d566322", "content_id": "c308f965e8b7f78d0611baff7c414d5ea7923754", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1364, "license_type": "no_license", "max_line_length": 90, "num_lines": 34, "path": "/Guia 2 Ej 5.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 5: Dados dos valores enteros y distintos, emitir\r\n# una leyenda apropiada que informe cuál es el mayor entre ellos.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: numero1, numero2\r\n# Salidas: mayor\r\n# Procesos: Ingresar numero1\r\n# Ingresar numero2\r\n# Determinar cuál es mayor mediante el uso de un if else\r\n# Mostrar el resultado por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumero1=0\r\nnumero2=0\r\n\r\n# Ingreso de datos\r\nnumero1=int(input(\"Ingrese el primer número: \"))\r\nnumero2=int(input(\"Ingrese el segundo número: \"))\r\n\r\n# Determino cuál es el número mayor\r\nif numero1>numero2:\r\n # Salida por pantalla\r\n print(\"El número más grande entre los dos valores ingresados es: {0}\".format(numero1))\r\nelse:\r\n # Salida por pantalla\r\n print(\"El número más grande entre los dos valores ingresados es: {0}\".format(numero2))" }, { "alpha_fraction": 0.5640411972999573, "alphanum_fraction": 0.5955097675323486, "avg_line_length": 27.21505355834961, "blob_id": "6c9e3f1a54afda1265dfd08415f712cdc3a036f6", "content_id": "b529930a008ce85aa37f38aff5c88b80e9782e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5483, "license_type": "no_license", "max_line_length": 92, "num_lines": 186, "path": "/EJ2 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 29/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 2: Implementar algoritmos que permitan:\r\n# a) Calcular el perímetro de un rectángulo dada su base y su altura.\r\n# b) Calcular el área de un rectángulo dada su base y su altura.\r\n# c) Calcular el área de un rectángulo (alineado con los\r\n# ejes x e y) dadas sus coordenadas x1, x2, y1, y2.\r\n# d) Calcular el perímetro de un círculo dado su radio.\r\n# e) Calcular el área de un círculo dado su radio.\r\n# f) Calcular el volumen de una esfera dado su radio.\r\n# g) Dados los catetos de un triángulo rectángulo, calcular su hipotenusa.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: base, altura\r\n# coorx1 , coorx2 , coory1 , coory2\r\n# radio , pi\r\n# cateto1 , cateto2\r\n# \r\n# Salidas: p_rectangulo\r\n# a_rectangulo\r\n# a_rectangulo_coor\r\n# p_circulo\r\n# a_circulo\r\n# volumen\r\n# hipotenusa\r\n#\r\n# Procesos: 1) Ingresar base \r\n# 2) Ingresar altura \r\n# 3) Calcular el perímetro -> p_rectangulo = 2*base+2*altura\r\n#\r\n# 4) Ingresar base\r\n# 5) Ingresar altura\r\n# 6) Calcular el área -> a_rectangulo = base * altura\r\n#\r\n# 7) Ingresar coordenada x1\r\n# 8) Ingresar coordenada x2\r\n# 9) Ingresar coordenada y1\r\n# 10) Ingresar coordenada y2\r\n# 11) Calcular el área de un rectángulo (alineado con los\r\n# ejes x e y) -> a_rectangulo_coor=[coorx1*coorx2,coory1*coory2]\r\n#\r\n# 12) Ingrese el radio\r\n# 13) Calcular el perímetro del circulo. -> p_circulo = 2*pi*radio\r\n# \r\n# 14) Ingrese el radio\r\n# 15) Calcular el área de un círculo -> a_circulo = pi*(radio)**2\r\n#\r\n# 16) Ingrese el radio\r\n# 17) Calcular el volumen de una esfera -> volumen = 4/3*pi*(radio)**3\r\n#\r\n# 18) Ingrese el cateto1\r\n# 19) Ingrese el cateto2\r\n# 20) Calcular la hipotenusa -> hipotenusa**2 = cateto1**2 + cateto2**2 \r\n#\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\np_rectangulo=0.00\r\nbase=0.00\r\naltura=0.00\r\na_rectangulo=0.00\r\na_rectangulo_x1=0.00\r\na_rectangulo_y1=0.00\r\na_rectangulo_x2=0.00\r\na_rectangulo_y2=0.00\r\ncoorx1=0.00\r\ncoorx2=0.00\r\ncoory1=0.00\r\ncoory2=0.00\r\nradio=0.00\r\np_circulo=0.00\r\na_circulo=0.00\r\nvolumen=0.00\r\ncateto1=0.00\r\ncateto2=0.00\r\nhipotenusa=0.00\r\n\r\n# Declaración de constantes\r\npi=3.1416\r\n\r\n# A) ALGORITMO QUE CALCULE EL PERÍMETRO DE UN RECTÁNGULO DADA SU BASE Y SU ALTURA\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el valor de la base\")\r\nbase=float(input())\r\n\r\nprint(\"Ingrese el valor de la altura\")\r\naltura=float(input())\r\n\r\n#Calculo el perímetro\r\np_rectangulo=2*base+2*altura\r\n\r\n#Salida por pantalla\r\nprint(\"El perimetro del rectángulo con base \",base,\"y altura \",altura, \" es \", p_rectangulo)\r\n\r\n# B) ALGORITMO QUE CALCULE EL ÁREA DE UN RECTÁNGULO DADA SU BASE Y SU ALTURA\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el valor de la base\")\r\nbase=float(input())\r\n\r\nprint(\"Ingrese el valor de la altura\")\r\naltura=float(input())\r\n\r\n#Calculo el área\r\na_rectangulo=base*altura\r\n\r\n#Salida por pantalla\r\nprint(\"El área de un rectángulo con base\",base,\"y altura \",altura,\" es \", a_rectangulo)\r\n\r\n# C) ALGORITMO QUE CALCULE EL ÁREA DE UN RECTANGULO DADAS SUS COORDENADAS\r\n\r\n# Ingreso de datos\r\nprint(\"Ingrese el valor de la coordenada x1\")\r\ncoorx1=float(input())\r\n\r\nprint(\"Ingrese el valor de la coordenada y1\")\r\ncoory1=float(input())\r\n\r\nprint(\"Ingrese el valor de la coordenada x2\")\r\ncoorx2=float(input())\r\n\r\nprint(\"Ingrese el valor de la coordenada y2\")\r\ncoory2=float(input())\r\n\r\n#Calculo el área\r\na_rectangulo_coor=[coorx1*coorx2, coory1*coory2]\r\n\r\n#Salida por pantalla\r\nprint(\"El área de el rectángulo dadas sus coordenadas es \", a_rectangulo_coor)\r\n\r\n# d) Calcular el perímetro de un círculo dado su radio.\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el valor del radio\")\r\nradio=float(input())\r\n\r\n#Calculo el perímetro\r\np_circulo=2*pi*radio\r\n\r\n#Salida por pantalla\r\nprint(\"El perímetro de el circulo es \", p_circulo)\r\n\r\n# e) Calcular el área de un círculo dado su radio.\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el radio del circulo\")\r\nbase=float(input())\r\n\r\n#Calculo el área\r\na_circulo=pi*(radio)**2\r\n\r\n#Salida por pantalla\r\nprint(\"El área del circulo es \", a_circulo)\r\n\r\n# f) Calcular el volumen de una esfera dado su radio.\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese el radio de la esfera\")\r\nradio=float(input())\r\n\r\n#Calculo el volumen\r\nvolumen=4/3*pi*(radio)**3\r\n\r\n#Salida por pantalla\r\nprint(\"El volumen de la esfera es \", volumen)\r\n\r\n# G) Dados los catetos de un triángulo rectángulo, calcular su hipotenusa.\r\n\r\n#Ingreso de datos\r\nprint(\"el primer cateto del triángulo rectángulo\")\r\ncateto1=float(input())\r\n\r\nprint(\"Ingrese el segundo cateto\")\r\ncateto2=float(input())\r\n\r\n#Calculo la hipotenusa\r\nhipotenusa=cateto1**2 + cateto2**2\r\n\r\n#Salida por pantalla\r\nprint(\"El valor de la hipotenusa es \", pow(hipotenusa,2))\r\n" }, { "alpha_fraction": 0.27590513229370117, "alphanum_fraction": 0.29338327050209045, "avg_line_length": 39.6315803527832, "blob_id": "871be8b4da6c16a7ceb8971bdfd6b4c13eb8357e", "content_id": "c39d440c67e795f97f2a7e530e2f973ac87822c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 76, "num_lines": 19, "path": "/EJ13.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 02/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 13: Escribir un programa que imprima por pantalla\r\n# todas las fichas de dominó, de una por línea y sin repetir. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: \r\n# Salidas: Fichas del dominó\r\n# Procesos: Crear un for in\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\nfor n in range (7):\r\n for f in range (0,7):\r\n print(n,f)\r\n\r\n " }, { "alpha_fraction": 0.443857342004776, "alphanum_fraction": 0.45970937609672546, "avg_line_length": 35.849998474121094, "blob_id": "9fd69baba3050c40d429ce1f5f206f7bb44cfebc", "content_id": "f99666507a35c7c598b944150e15d68cd8f1f34f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1523, "license_type": "no_license", "max_line_length": 74, "num_lines": 40, "path": "/ej6.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 6: Escribir un programa que pida al usuario su peso\r\n# (en kg) y estatura (en metros), calcule el índice\r\n# de masa corporal y lo almacene en una variable,\r\n# y muestre por pantalla la frase “Tu indice de masa\r\n# corportal es <imc>” donde <imc> es el indice de\r\n# masa corporal calculado redondeado con dos decimales. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: peso, estatura\r\n# Salidas: imc\r\n# Procesos: Ingresar peso\r\n# Ingresar estatura\r\n# Calcular indice de masa corporal -> imc= peso/(estatura)**2\r\n# Redondear con dos decimales -> round(imc,2)\r\n# Mostrar imc\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\npeso=0.00\r\nestatura=0.00\r\nimc=0.00\r\n\r\n#Ingreso de datos\r\nprint(\"Ingrese su peso en kg\")\r\npeso=int(input())\r\nprint(\"Ingrese su estatura en m\")\r\nestatura=float(input())\r\n\r\n#Calculo indice de masa corporal\r\nimc=(peso)/(estatura**2)\r\n\r\n#Salida por pantalla\r\nprint(\"Tu indice de masa corporal es {0}\".format(round(imc,2)))\r\n" }, { "alpha_fraction": 0.39823660254478455, "alphanum_fraction": 0.42174872756004333, "avg_line_length": 34.83783721923828, "blob_id": "3cb0cb4fa098f1687d3625739616fff3589dea45", "content_id": "d1adc1b8ec1c0c2859ee386426b562a3c4605f3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 92, "num_lines": 37, "path": "/Guia 2 EJ 10.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 10: Se ingresa una edad, mostrar por pantalla alguna de las siguientes leyendas:\r\n#\t ‘menor’ si la edad es menor o igual a 12 \r\n#\t ‘cadete’ si la edad está comprendida entre 13 y 18\r\n#\t ‘juvenil’ si la edad es mayor que 18 y no supera los 26\r\n#\t ‘mayor’ en el caso que no cumpla ninguna de las condiciones anteriores\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: edad\r\n# Salidas: menor, mayor, cadete, juvenil\r\n# Procesos: Ingresar una edad\r\n# Determinar cuál leyenda es -> utilización del if\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nedad=0\r\n\r\n# Ingreso de dato\r\nedad=int(input(\"Ingrese la edad: \"))\r\n\r\n# Creo un condicional\r\nif edad<=12:\r\n print(\"Es menor\")\r\nelse:\r\n if 13<edad<18:\r\n print(\"Es cadete\")\r\n else:\r\n if 18<edad<26:\r\n print(\"Es juvenil\")\r\n else:\r\n print(\"Es mayor\")" }, { "alpha_fraction": 0.47749391198158264, "alphanum_fraction": 0.49635037779808044, "avg_line_length": 34.57777786254883, "blob_id": "0d6c95982f5fb4244df5cfbbb6d04fd73e0f4fb0", "content_id": "b55e759c719b2c3dfeb82b97f63b673170b5bd48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1657, "license_type": "no_license", "max_line_length": 83, "num_lines": 45, "path": "/Guia 2 Ej 4.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 4: A partir de un valor entero ingresado por teclado, se pide informar:\r\n# a)\tLa quinta parte de dicho valor\r\n# b)\tEl resto de la división por 5\r\n# c)\tLa séptima parte del resultado del punto a)\r\n\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: cantidad\r\n# Salidas: division\r\n# Procesos: Ingresar el valor\r\n# Calcular la quinta parte del valor -> division=cantidad/5\r\n# Calcular el resto de la división por 5 -> cantidad%5\r\n# Calcular la séptima parte del resultado del punto a) -> division/7\r\n# Mostrar los resultados por teclado\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\ncantidad=0\r\ndivision=0\r\nresto=0\r\nparte7=0\r\n\r\n# Ingreso de dato\r\ncantidad=int(input(\"Ingrese el número: \"))\r\n\r\n# Calcular la quinta parte\r\ndivision=cantidad/5\r\n\r\n# Calcular el resto de la división por 5\r\nresto=cantidad%5\r\n\r\n# Calcular la séptima parte\r\nparte7=division/7\r\n\r\n# Salida por pantalla\r\nprint(\"a) La quinta parte de {0}\".format(cantidad),\"es: {0}\".format(division))\r\nprint(\"b) El resto de la división por 5 es: {0}\".format(resto))\r\nprint(\"c) La séptima parte del resultado del punto a) es: {0}\".format(parte7))" }, { "alpha_fraction": 0.44032615423202515, "alphanum_fraction": 0.4529280960559845, "avg_line_length": 32.589744567871094, "blob_id": "3f2ee342ae1fecf013d1d87538915e9c47b111a8", "content_id": "4764b74c71d13616300000948737ce746c0b198c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1357, "license_type": "no_license", "max_line_length": 88, "num_lines": 39, "path": "/G3 ej5.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 15/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 5: Ingresar e informar valores, mientras que el valor \r\n# ingresado no sea negativo. Informar la cantidad de valores ingresados. \r\n#\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: valor, continuar\r\n# Salidas: cantidad\r\n# Procesos: Pedir al usuario ingresar numeros\r\n# Contar los numeros positivos\r\n# Mostrar los positivos por pantalla \r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nvalor=0.00\r\ncantidad=0\r\ncontinuar=\"S\"\r\n\r\nwhile continuar==\"S\":\r\n continuar=\"N\"\r\n # Ingreso de dato\r\n valor=float(input(\"Ingrese un número: \"))\r\n \r\n #Utilizo un contador para contar solo los números positivos\r\n if valor>0:\r\n cantidad=cantidad+1\r\n \r\n # Ingreso de dato\r\n print(\"Desea ingresar otro número? S/N\")\r\n continuar=input()\r\n\r\n# Salida por pantalla\r\nprint(\"Usted ingresó {0}\".format(cantidad),\"valores positivos\")\r\n" }, { "alpha_fraction": 0.4754200279712677, "alphanum_fraction": 0.4853764772415161, "avg_line_length": 28.903846740722656, "blob_id": "c86eb8019fa155985df8d7c7360cde8a732a0ba5", "content_id": "3a3f94a975d5b162beb3023e75b4d45cb433eedc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1617, "license_type": "no_license", "max_line_length": 78, "num_lines": 52, "path": "/G4 EJ 3.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 15/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 3: Dado un conjunto de Nombres y Fechas de nacimientos (AAAAMMDD),\r\n# que finaliza con un Nombre = ‘FIN’, informar el nombre de la \r\n# persona con mayor edad y el de la más joven.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: continuar, nombresyfechas, contador\r\n# Salidas: mayor, menor\r\n# Procesos: Ingresar un nombre\r\n# Ingresar la fecha\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\ncontinuar=\"S\"\r\nnombresyfechas={}\r\ncontador=0\r\nmayor=0\r\nmenor=0\r\n\r\nimport datetime\r\nwhile continuar==\"S\":\r\n # Ingreso de dato\r\n\tnombre=input(\"Ingrese un nombre: \")\r\n\tif nombre==\"fin\" or nombre==\"FIN\":\r\n\t\tcontinuar=\"N\"\r\n\telse:\r\n\t\tnombresyfechas[nombre]=0\r\n # Ingreso de dato\r\n\t\tf=input(\"Ingrese fecha de nacimiento formato (ddmmaaaa) : \")\r\n\t\tfecha=datetime.datetime.strptime(f, '%d%m%Y')\r\n\t\tif contador==0:\r\n\t\t\tfechamayor=fecha\r\n\t\t\tfechamenor=fecha\r\n\t\telse:\r\n\t\t\tif fecha < fechamayor:\r\n\t\t\t\tmayor= nombre\r\n\r\n\t\t\tif fecha > fechamenor:\r\n\t\t\t\tmenor= nombre\r\n\r\n\t\tnombresyfechas[nombre]= fecha\r\n\tcontador+=1\r\n\r\n# Salida por pantalla\r\nprint(\"El que tiene mayor edad es: \",mayor)\r\nprint(\"El más joven es: \",menor)\r\n" }, { "alpha_fraction": 0.4068504571914673, "alphanum_fraction": 0.4385964870452881, "avg_line_length": 30.351350784301758, "blob_id": "3d6ac72082983c1dcc3a873ef37430d5dd84fea3", "content_id": "5f78a1ac83ddc75213b1ed9eccdd74e5934fe60b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1204, "license_type": "no_license", "max_line_length": 83, "num_lines": 37, "path": "/G3 ej1.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 13/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 1: Informar los primeros 100 números naturales y su sumatoria.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: \r\n# Salidas: numeros, suma\r\n# Procesos: Muestro los 100 numeros naturales\r\n# Calcular la sumatoria entre ellos\r\n# Mostrar la sumatoria en pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumeros=1\r\nsuma=0\r\n\r\n# Salida por pantalla\r\nprint(\"Los primeros 100 números naturales\")\r\n\r\nwhile numeros>0 and numeros<=100:\r\n # Salida por pantalla\r\n print(numeros, end=\" \")\r\n # Contador\r\n numeros=numeros+1\r\n \r\n\r\n# Calculo la sumatoria\r\nsuma=(((1+100))/2)*100\r\n\r\n# Salida por pantalla\r\nprint(end=\"\\n\")\r\nprint(\"La sumatoria entre los primeros 100 números naturales es: {0}\".format(suma))\r\n" }, { "alpha_fraction": 0.4277879297733307, "alphanum_fraction": 0.44180378317832947, "avg_line_length": 28.425926208496094, "blob_id": "706ce0969de59ae2bc513088b67367ae72c61a5f", "content_id": "257cee41d0bdfa4f8a32096f8ebaca802477eeb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1650, "license_type": "no_license", "max_line_length": 77, "num_lines": 54, "path": "/Guia 2 EJ 11.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 06/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 11: Escribir un algoritmo que resuelvan los siguientes problemas:\r\n# a) Dado un número entero n, indicar si es par o no.\r\n# b) Dado un número entero n, indicar si es primo o no. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: numero, division, v\r\n# Salidas: par, impar, primo, no primo\r\n# Procesos: a) Ingresar un numero entero\r\n# Determinar si es par o no -> n%2\r\n# Mostrar por pantalla\r\n# b) Ingresar un numero entero\r\n# Determinar si es primo o no -> def es_primo(num, n=2)\r\n# Mostrar por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumero=0\r\ndivision=0.00\r\nv=0\r\n\r\n# Ingreso de dato\r\nnumero=int(input(\"Ingrese un número entero: \"))\r\n\r\n# a) ¿Es par o impar?\r\ndivision=numero%2\r\n\r\nif division==0:\r\n print(\"Es par\")\r\nelse:\r\n print(\"Es impar\")\r\n\r\n# b) ¿Es primo o no?\r\n\r\n# Ingreso de dato\r\nv=int(input(\"Ingrese el numero: \"))\r\n\r\ndef es_primo(num, n=2):\r\n if n >= num:\r\n # Salida por pantalla\r\n return\"Es primo\"\r\n elif num % n != 0:\r\n return es_primo(num, n + 1)\r\n else:\r\n # Salida por pantalla\r\n return\"No es primo\"\r\n\r\nprint(es_primo(v))" }, { "alpha_fraction": 0.5762897729873657, "alphanum_fraction": 0.5938529372215271, "avg_line_length": 47.69091033935547, "blob_id": "451b0da86a593fabdcc80fc9e80e206ed86aba50", "content_id": "be4213442a41bfe032bf5ffd2ccbbffbf9d089ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2753, "license_type": "no_license", "max_line_length": 123, "num_lines": 55, "path": "/ej8.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 8: Imagina que acabas de abrir una nueva cuenta\r\n# de ahorros que te ofrece el 4% de interés al\r\n# año. Estos ahorros debido a intereses, que\r\n# no se cobran hasta finales de año, se te añaden\r\n# al balance final de tu cuenta de ahorros.\r\n# Escribir un programa que comience leyendo la\r\n# cantidad de dinero depositada en la cuenta de\r\n# ahorros, introducida por el usuario. Después\r\n# el programa debe calcular y mostrar por pantalla\r\n# la cantidad de ahorros tras el primer, segundo\r\n# y tercer años. Redondear cada cantidad a dos decimales.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: ahorro_inicial, interes\r\n#\r\n# Salidas: primer_ahorro, segundo_ahorro, tercer_ahorro\r\n#\r\n# Procesos: Ingresar el monto que deposito en la cuenta de ahorros \r\n# Calcular la cantidad de ahorros tras el primer año -> primer_ahorro=(ahorro_inicial*interes/100)+ahorro_inicial\r\n# Calcular la cantidad de ahorros tras el segundo año -> segundo_ahorro=(primer_ahorro*interes/100)+primer_ahorro\r\n# Calcular la cantidad de ahorros tras el tercer año -> tercer_ahorro=(segundo_ahorro*interes/100)+segundo_ahorro\r\n# Mostrar por pantalla los calculos redondeados a dos decimales\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nahorro_inicial=0.00\r\nprimer_ahorro=0.00\r\nsegundo_ahorro=0.00\r\ntercer_ahorro=0.00\r\n\r\n# Declaración constantes\r\ninteres=4\r\n\r\n# Ingreso de datos\r\nprint(\"Ingrese la cantidad de dinero depositado en la cuenta de ahorros\")\r\nahorro_inicial=float(input())\r\n\r\n# Calculo cantidad de ahorros tras el primer año\r\nprimer_ahorro=(ahorro_inicial*interes/100)+ahorro_inicial\r\n# Calculo cantidad de ahorros tras el segundo año\r\nsegundo_ahorro=(primer_ahorro*interes/100)+primer_ahorro\r\n# Calculo cantidad de ahorros tras el tercer año\r\ntercer_ahorro=(segundo_ahorro*interes/100)+segundo_ahorro\r\n\r\n# Salida por pantalla\r\nprint(\"La cantidad de ahorros tras el primer año fue de: {0}\".format(round(primer_ahorro,2)))\r\nprint(\"La cantidad de ahorros tras el segundo año fue de: {0}\".format(round(segundo_ahorro,2)))\r\nprint(\"La cantidad de ahorros tras el tercer año fue de: {0}\".format(round(tercer_ahorro,2)))\r\n" }, { "alpha_fraction": 0.38130465149879456, "alphanum_fraction": 0.41761937737464905, "avg_line_length": 31.795454025268555, "blob_id": "6dc8835bbb27fffc066029022913a9693bb7328e", "content_id": "0f2ed40575e216ee13b4e23199f323d0a82d656a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1503, "license_type": "no_license", "max_line_length": 87, "num_lines": 44, "path": "/Guia 2 EJ 15.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 08/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 15: Escribir un programa que reciba como entrada un entero\r\n# representando un año (por ejemplo 751, 1999, o 2158), y\r\n# muestre por pantalla el mismo año escrito en números romanos.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: año\r\n# Salidas: enterosaromanos\r\n# Procesos: Ingreso el año\r\n# Creo una función de conversión mediante listas\r\n# Muestro el resultado en pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\naño=0\r\n\r\n# Ingreso de dato\r\naño=int(input(\"Ingrese el año: \"))\r\n\r\n# Creo una función\r\ndef enterosaromanos(entero):\r\n numeros = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\r\n numerales = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I']\r\n\r\n numeral = ''\r\n i = 0\r\n\r\n while entero > 0:\r\n for _ in range(entero // numeros[i]):\r\n numeral += numerales[i]\r\n entero -= numeros[i]\r\n\r\n i += 1\r\n \r\n return numeral\r\n\r\n# Salida por pantalla\r\nprint(enterosaromanos(año))\r\n" }, { "alpha_fraction": 0.30595237016677856, "alphanum_fraction": 0.31785714626312256, "avg_line_length": 36.181819915771484, "blob_id": "64cc3cad8071ffa7567c135d2a6c196bf70b2d82", "content_id": "743c67fc76c3bff327ce38ff98f98c2bbe877cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 846, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/ej2.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 2: Escribir un programa que almacene la cadena\r\n# ¡Hola Mundo! en una variable y que muestre por\r\n# pantalla el contenido de la variable.\r\n# \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: texto\r\n# Salidas: texto\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\ntexto=\"¡Hola Mundo!\"\r\n\r\n# Muestro el texto por pantalla\r\nprint (texto)\r\n" }, { "alpha_fraction": 0.4384615421295166, "alphanum_fraction": 0.45897436141967773, "avg_line_length": 33.45454406738281, "blob_id": "b920304cd84391ec784329f3950355ef0556f6ad", "content_id": "928f12f8ca836db0372c103e54b45ef601a13178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1572, "license_type": "no_license", "max_line_length": 85, "num_lines": 44, "path": "/ej7.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 7: Escribir un programa que pida al usuario dos\r\n# números enteros y muestre por pantalla\r\n# “la division <n> entre <m> da un cociente <c>\r\n# y un resto <r>” donde <n> y <m> son los números\r\n# introducidos por el usuario, y <c> y <r> son el\r\n# cociente y el resto de la división entera\r\n# respectivamente.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: nro1, nro2\r\n# Salidas: cociente, resto\r\n# Procesos: Ingreso nro1\r\n# Ingreso nro2\r\n# Calculo el cociente -> cociente=nro1/nro2\r\n# Calculo el resto -> resto=nro1%nro2\r\n# Muestro por pantalla el cociente y resto\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnro1=0\r\nnro2=0\r\ncociente=0\r\nresto=0\r\n\r\n# Ingreso de datos\r\nprint(\"Ingrese un numero entero\")\r\nnro1=int(input())\r\nprint(\"Ingrese otro numero entero\")\r\nnro2=int(input())\r\n\r\n#Calculo el cociente\r\ncociente=(nro1/nro2)\r\n#Calculo el resto\r\nresto=(nro1%nro2)\r\n\r\n#Salida por pantalla\r\nprint(\"La división\",nro1,\"entre\",nro2,\"da un cociente:\",cociente,\"y un resto:\",resto)\r\n" }, { "alpha_fraction": 0.3642578125, "alphanum_fraction": 0.375, "avg_line_length": 35.925926208496094, "blob_id": "4bec097ee7a52bc3c9e6891a8426fc4772c2aa8d", "content_id": "c7a58de7828619e77fdb6c5ec699d79e1fad87be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 61, "num_lines": 27, "path": "/ej3.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 3: Escribir un programa que pregunte el nombre\r\n# del usuario en la consola y, después de que el\r\n# usuario lo introduzca, muestre por pantalla la\r\n# cadena ¡Hola <nombre>!, donde <nombre> es le\r\n# nombre que el usuario haya introducido. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: nombre\r\n# Salidas: nombre\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnombre=\"\"\r\n\r\n# Ingreso de dato\r\nprint (\"Introduzca su nombre\")\r\nnombre=input()\r\n\r\n# Salida por pantalla\r\nprint (\"Hola {0}\".format(nombre))\r\n" }, { "alpha_fraction": 0.40638482570648193, "alphanum_fraction": 0.42364105582237244, "avg_line_length": 36.43333435058594, "blob_id": "d3d028375e5cded543cd89228bbfe3f362e90ee3", "content_id": "ff5079d3fa67be6b6267cc9c6fe44c9b2d936df1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1168, "license_type": "no_license", "max_line_length": 86, "num_lines": 30, "path": "/EJ6 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 29/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 6: Escribir un programa que le pida una palabra al usuario,\r\n# para luego imprimirla 1000 veces, en una única línea, \r\n# con espacios intermedios. Ayuda: Investigar acerca del parámetro \r\n# end de la función print\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: palabra, continuar\r\n# Salidas: palabra\r\n# Procesos: Ingresar una palabra\r\n# Mostrar la repetición de esa palabra en una linea con espacios intermedios\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\npalabra=\"\"\r\ncontador=1\r\n\r\n#Ingreso de dato\r\nprint(\"Escriba una palabra\")\r\npalabra=input()\r\n\r\nwhile (contador)<1000:\r\n print((palabra),end=\" \")\r\n contador=contador+1\r\n\r\n " }, { "alpha_fraction": 0.4783673584461212, "alphanum_fraction": 0.5289795994758606, "avg_line_length": 40.24137878417969, "blob_id": "b04913e87bc82e8e4cb1addec59f010559e78e5e", "content_id": "917c5d14a8184e2e2a4af25a79626a79abe9ecfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2465, "license_type": "no_license", "max_line_length": 102, "num_lines": 58, "path": "/G3 ej6.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 15/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 6: Se ingresa un conjunto de valores reales, cada uno de los cuales\r\n# representan el sueldo de un empleado, excepto el último valor que\r\n# es cero e indica el fin del conjunto. Se pide desarrollar un programa\r\n# que determine e informe:\r\n# a)\tCuántos empleados ganan menos $1.520.\r\n# b)\tCuántos ganan $1.520 o más pero menos de $2.780.\r\n# c)\tCuántos ganan $2.780 o más pero menos de $5.999.\r\n# d)\tCuántos ganan $5.999 o más.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: sueldo, continuar\r\n# Salidas: sueldoa, sueldob, sueldoc, sueldod\r\n# Procesos: Ingresar sueldos\r\n# contar los sueldo menores a 1520 -> sueldoa\r\n# contar los sueldos mayores que 1520 y menores que 2700 -> sueldob\r\n# Contar los sueldos mayores que 2780 y menores que 5999 -> sueldoc\r\n# Contar los sueldos mayores que 5999 -> sueldod\r\n# Parar el programa cuando se ingrese 0\r\n# Mostrar los resultados por pantalla \r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nsueldo=0.00\r\ncontinuar=\"S\"\r\nsueldoa=0\r\nsueldob=0\r\nsueldoc=0\r\nsueldod=0\r\n\r\nwhile continuar==\"S\":\r\n continuar==\"N\"\r\n # Ingreso de dato\r\n sueldo=float(input(\"Ingrese el sueldo de un empleado: \"))\r\n\r\n if sueldo<1520 and sueldo>=1:\r\n sueldoa=sueldoa+1\r\n elif sueldo>=1520 and sueldo<2780:\r\n sueldob=sueldob+1\r\n elif sueldo>=2780 and sueldo<5999:\r\n sueldoc=sueldoc+1\r\n elif sueldo>=5999:\r\n sueldod=sueldod+1\r\n elif sueldo==0:\r\n break\r\n\r\n\r\n# Salida por pantalla\r\nprint(\"La cantidad de empleados que ganan menos de $1.520 es: {0}\".format(sueldoa))\r\nprint(\"La cantidad de empleados que ganan más de $1.520 pero menos de $2.700 es: {0}\".format(sueldob))\r\nprint(\"La cantidad de empleados que ganan más de $2.700 pero menos de $5.999 es: {0}\".format(sueldoc))\r\nprint(\"La cantidad de empleados que ganan más de $5.999 es: {0}\".format(sueldod))\r\n" }, { "alpha_fraction": 0.2505263090133667, "alphanum_fraction": 0.27157893776893616, "avg_line_length": 41, "blob_id": "e96dfd5381e345af787f562950c1b2384618761a", "content_id": "457becd83a2d18a0110e38bdfc7fcc7133cc7f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/ej1.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 1: Escribir un programa que muestre por pantalla\r\n# la cadena ¡Hola Mundo!\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\nprint (\"¡Hola Mundo!\")\r\n\r\n" }, { "alpha_fraction": 0.3544575870037079, "alphanum_fraction": 0.37056928873062134, "avg_line_length": 36.875, "blob_id": "45639e9894eda129ec570807026fcf73f69eb359", "content_id": "db9cb6fc089d57561d2f29c777a6dbb42f938615", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 936, "license_type": "no_license", "max_line_length": 65, "num_lines": 24, "path": "/Guia 2 Ej 12.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 06/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 12: Escribir un algoritmo, que devuelva el valor\r\n# absoluto de cualquier valor que reciba. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: num\r\n# Salidas: abs\r\n# Procesos: Ingresar un valor cualquiera\r\n# Mostrar por pantalla el absoluto del número ingresado\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnum=0.00\r\n\r\nnum=float(input(\"Ingrese un valor: \"))\r\n\r\n# Salida por pantalla\r\nprint(\"El absoluto de {0}\".format(num),\"es: \",abs(num))" }, { "alpha_fraction": 0.4300554096698761, "alphanum_fraction": 0.4591412842273712, "avg_line_length": 33.26829147338867, "blob_id": "9cd0a340a710d17a74bd61af950f0e27149acb1d", "content_id": "e9d6ecdea0e0b1e0eb3be76883360c16f8c43126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 86, "num_lines": 41, "path": "/G3 ej3.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 13/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 3: Dados 50 números enteros, informar el promedio \r\n# de los mayores que 100 y la suma de los menores que –10.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: numeros\r\n# Salidas: promedio, suma\r\n# Procesos: Ingresar los numeros hasta 50\r\n# calcular el promedio\r\n# calcular la suma\r\n# Mostrar el promedio y la suma por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumeros=1\r\npromedio=0\r\nsuma=0\r\nnaturales=0\r\nrpromedio=0\r\n\r\nfor i in range(50):\r\n # Salida por pantalla\r\n naturales=int(input(f\"Ingrese el valor {numeros} de los 50 numeros a ingresar: \"))\r\n numeros+=1\r\n if naturales>=100:\r\n # Calculo el promedio\r\n promedio=naturales%100\r\n rpromedio+=naturales\r\n elif naturales<=-10:\r\n # Calculo la suma\r\n suma+=naturales\r\n\r\n# Salida por pantalla \r\nprint (f\"El promedio de los numeros mayores a 100 es: {rpromedio}\")\r\nprint (f\"La suma de los numeros menores a -10 es: {suma}\")" }, { "alpha_fraction": 0.5107675194740295, "alphanum_fraction": 0.5450027585029602, "avg_line_length": 30.96363639831543, "blob_id": "a3a3d34c80e4821aec9084f93182ffc24d9132c2", "content_id": "d1e71d7f397acd098356fa932827b54de135bd57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 116, "num_lines": 55, "path": "/G4 EJ 4.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 16/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 4: Dado un conjunto de valores, que finaliza con un valor nulo, determinar e imprimir (si hubo valores):\r\n# a) El valor máximo negativo\r\n# b) El valor mínimo positivo\r\n# c) El valor mínimo dentro del rango -17.3 y 26.9\r\n# d) El promedio de todos los valores.\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnull=None\r\ncontinuar=\"S\"\r\nmaximo_n=0\r\nvalores=0\r\nminimo=100000000000\r\ncantidad=0\r\npromedio=0.00\r\ncontador=0\r\nacumulador=0\r\nminimorango=0\r\n\r\nwhile True:\r\n try:\r\n valores=int(input(\"Ingrese un valor numerico, se cancelara el ingreso con (null): \"))\r\n except ValueError:\r\n null=True\r\n break\r\n \r\n \r\n if valores>-17.3 and valores<26.9: #c) El valor mínimo dentro del rango -17.3 y 26.9\r\n if valores<minimorango:\r\n minimorango=valores\r\n \r\n if valores<0: #Valor maximo negativo\r\n if valores<maximo_n:\r\n maximo_n=valores\r\n \r\n if valores>0: #valor minimo positivo\r\n if valores<minimo:\r\n minimo=valores\r\n \r\n acumulador=acumulador+valores \r\n contador+=1\r\n\r\n promedio=acumulador/contador #promedio de todos los valores\r\n\r\n#Salidas por pantalla\r\nprint(\"El valor máximo negativo es {0}\".format(maximo_n))\r\nprint(\"El valor mínimo positivo es {0}\".format(minimo))\r\nprint(\"El valor mínimo dentro del rango -17.3 y 26.9 es {0}\".format(minimorango))\r\nprint(\"El promedio de todos los valores es {0}\".format(promedio))" }, { "alpha_fraction": 0.38684210181236267, "alphanum_fraction": 0.41228070855140686, "avg_line_length": 33.625, "blob_id": "1979e41523cfa6dd8b3aab8866b1d3cc9592345b", "content_id": "075cbc4546bf71d48a248740e1091b33d578de55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 73, "num_lines": 32, "path": "/EJ8 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 29/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 8: Escribir una función que convierta un valor dado\r\n# en grados Fahrenheit a grados Celsius. Recordar que\r\n# la fórmula para la conversión es: F=(9/5)*C+32\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: F\r\n# Salidas: cel\r\n# Procesos: Ingresar grados fahrenheit\r\n# Calcular la conversión a grados Celcius -> (F − 32)*(5/9) = C\r\n# Mostrar por pantalla el resultado\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nF=0.00\r\ncel=0.00\r\n\r\n#Ingreso de dato\r\nprint(\"Ingrese los grados Fahrenheit\")\r\nF=float(input())\r\n\r\n#Calculo la conversión\r\ncel=(F-32)*(5/9)\r\n\r\n#Salida por pantalla\r\nprint(\"En grados Celsius seria {0}\".format(cel))\r\n" }, { "alpha_fraction": 0.4039109945297241, "alphanum_fraction": 0.4248145520687103, "avg_line_length": 32.44186019897461, "blob_id": "1e6cd6f272029c38bd7be3315b10e51ea079ae06", "content_id": "9f7a379440f2d1d3ea238b04f4eaa3c59ff32bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1495, "license_type": "no_license", "max_line_length": 85, "num_lines": 43, "path": "/EJ11.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 02/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 11: Escribir un programa que le pregunte al usuario un número n\r\n# e imprima los primeros n números triangulares, junto \r\n# con su índice. Los números triangulares se obtienen mediante\r\n# la suma de los números naturales desde 1 hasta n. Es decir, si se\r\n# piden los primeros 5 números triangulares, el programa debe imprimir:\r\n# 1 - 1 \r\n# 2 - 3 \r\n# 3 - 6 \r\n# 4 - 10 \r\n# 5 - 15 \r\n#\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: n\r\n# Salidas: resultado\r\n# Procesos: Ingresar el numero\r\n# Calcular los numeros triangulares\r\n# Mostrar por pantalla el resultado\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nn=0\r\nresultado=0\r\n\r\n#Ingreso de dato\r\nprint(\"Ingrese un número\")\r\nn= int(input())\r\n \r\n#Calculo los números triangulares\r\nresultado={}\r\nfor i in range(1,n+1):\r\n\tresultado[i]=(i*(i+1)/2)\r\n \r\nfor i in resultado:\r\n #Salida por pantalla\r\n\tprint(i,resultado[i])\r\n\r\n" }, { "alpha_fraction": 0.375, "alphanum_fraction": 0.3841911852359772, "avg_line_length": 36.85714340209961, "blob_id": "66ee5ad87366abb903c9731877db4586f37620f0", "content_id": "3a1654c2026acdd474970fe996aebbc249a34cda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1101, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/ej4.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº0\r\n# Fecha: 26/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 4: Escribi un programa que pregunte el nombre\r\n# del usuario en la consola y después de que\r\n# el usuario lo introduzca muestre por pantalla\r\n# “<nombre> tiene <n> letras”, donde <nombre> es\r\n# el nombre de usuario en mayúsculas y <n> es el\r\n# número de letras que tiene el nombre. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: nombre\r\n# Salidas: nombre\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnombre=\"\"\r\n\r\n#Ingreso de dato\r\nprint(\"¿Cuál es su nombre?\")\r\nnombre=input()\r\n\r\n#Salida por pantalla\r\nprint((nombre),\"tiene\",(len(nombre)),\"letras.\")\r\n" }, { "alpha_fraction": 0.5150449275970459, "alphanum_fraction": 0.5338022708892822, "avg_line_length": 34.04225540161133, "blob_id": "c240bfedecd78529fdc4f1e38de20554fb65175e", "content_id": "4d281db8029980f837f636b880031009d0cf401a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2584, "license_type": "no_license", "max_line_length": 116, "num_lines": 71, "path": "/Guia 2 EJ 9.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 9: Dado el siguiente enunciado, estrategia y representación\r\n# gráfica especifique los datos de entrada, de salida y la\r\n# codificación en Python. Enunciado: Dados dos números, mostrar \r\n# un menú con opciones de sumar, restar o multiplicar dichos \r\n# números. Solicite elegir una opción.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: numero1, numero2, eleccion, continuar\r\n# Salidas: suma, resta, multiplicacion\r\n# Procesos: Ingresar numero1\r\n# Ingresar numero2\r\n# Muestro un menú\r\n# Determino la opción elegida\r\n# Muestro la salida por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnumero1=0.00\r\nnumero2=0.00\r\nsuma=0.00\r\nresta=0.00\r\nmultiplicacion=0.00\r\neleccion=\"\"\r\ncontinuar=\"S\"\r\n\r\n#Ingreso de dato\r\nnumero1=float(input(\"Ingrese el número 1: \"))\r\nnumero2=float(input(\"Ingrese el número 2: \"))\r\n\r\n# Creo un ciclo de repetición while\r\nwhile continuar==\"S\":\r\n # Creo un menú de opciones\r\n print(\" SELECCIONE EL OPERADOR QUE DESEE\")\r\n print(\" A) Sumar\")\r\n print(\" B) Restar\")\r\n print(\" C) Multiplicar\")\r\n print(\" D) Finalizar y Salir\")\r\n # Ingreso de dato\r\n eleccion=input()\r\n\r\n #Determino la opción elegida OPCION A) SUMA\r\n if eleccion==\"A\":\r\n # Calculo la suma\r\n suma=numero1+numero2\r\n # Salida por pantalla\r\n print(\"La suma entre los dos números es: {0}\".format(suma))\r\n \r\n #Determino la opción elegida OPCION B) RESTA\r\n if eleccion==\"B\":\r\n # Calculo la resta\r\n resta=numero1-numero2\r\n # Salida por pantalla\r\n print(\"La diferencia entre {0}\".format(numero1),\"menos {0}\".format(numero2),\"es igual a: {0}\".format(resta))\r\n\r\n #Determino la opción elegida OPCION C) MULTIPLICACION\r\n if eleccion==\"C\":\r\n # Calculo la multiplicación\r\n multiplicacion=numero1*numero2\r\n # Salida por pantalla\r\n print(\"La multiplicación entre los dos números da como resultado: {0}\".format(multiplicacion))\r\n\r\n # Opcioón D) SALIR Y FINALIZAR\r\n if eleccion==\"D\":\r\n continuar=\"N\"\r\n" }, { "alpha_fraction": 0.26173707842826843, "alphanum_fraction": 0.2828638553619385, "avg_line_length": 40.54999923706055, "blob_id": "ef7f0b14ed6325d8299828d75ac543a3066e8726", "content_id": "4a1e332b404219cd55508ff76f70a2efeb52a9f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 100, "num_lines": 20, "path": "/EJ3 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 29/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 3: Mostrar el resultado de ejecutar estos bloques de código en el intérprete de python: \r\n# >>> for i in range(5): ... print(i * i) b) \r\n# >>> for i in range(2, 6): ... print(i, 2 ** i) \r\n\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: \r\n# Salidas: \r\n# Procesos: \r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\nfor i in range(5): print(i * i)\r\nfor i in range(2, 6): print(i, 2 ** i) \r\n" }, { "alpha_fraction": 0.5835072994232178, "alphanum_fraction": 0.600904643535614, "avg_line_length": 37.91666793823242, "blob_id": "be16ab036382a2e372ab021e43331b76e8efcf54", "content_id": "a3bbda557f8533317631e8019d061db889e640f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2890, "license_type": "no_license", "max_line_length": 152, "num_lines": 72, "path": "/G4 ej 5.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº4\r\n# Fecha: 24/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 5: Se dispone de un lote de valores enteros positivos que finaliza\r\n# con un número negativo. El lote está dividido en sublotes por\r\n# medio de valores cero. Desarrollar un programa que determine e informe:\r\n# a)\tpor cada sublote el promedio de valores -> ()+()+()/3 LISTO\r\n# b)\tel total de sublotes procesados LISTO\r\n# c)\tel valor máximo del conjunto, indicando en que sublote se encontró y la posición relativa del mismo dentro del sublote LISTO\r\n# d)\tvalor mínimo de cada sublote\r\n# Nota: el lote puede estar vacío (primer valor negativo), o puede haber uno, varios o todos los sublotes vacíos (ceros consecutivos)\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nlotevalores=0\r\ncontinuar=\"S\"\r\ntotalsublotes=0\r\ndivisor=0\r\nacumulador=0\r\npromedio=0.00\r\nmaximoconjunto=0\r\npos_max_sublote=0\r\nposicion=0\r\nsublote=0\r\nminimo=100000000\r\nnumeros=[]\r\nminimosublote=0\r\n\r\nwhile continuar==\"S\":\r\n try: #Validacion\r\n lotevalores=int(input(\"Ingrese los valores del lote (finaliza con un numero negativo): \")) #Ingreso de dato\r\n except ValueError:\r\n print(\"Error. Tiene que ingresar un numero\")\r\n lotevalores=int(input(\"Ingrese los valores del lote (finaliza con un numero negativo): \"))\r\n\r\n if lotevalores<0:\r\n continuar=\"N\"\r\n else:\r\n numeros.append(lotevalores)\r\n if lotevalores!=0 and lotevalores>0:\r\n divisor+=1\r\n\r\n if lotevalores==0: # b)total de sublotes procesados\r\n totalsublotes+=1\r\n\r\n if lotevalores<minimo and lotevalores!=0 and lotevalores>0:\r\n minimo=lotevalores\r\n \r\n if lotevalores>0:\r\n acumulador=acumulador+lotevalores\r\n\r\n promedio=acumulador/divisor # a)promedio de valores\r\n\r\n for i in numeros:\r\n if i>maximoconjunto: # c.1) Valor máximo del conjunto\r\n maximoconjunto=i\r\n sublote=totalsublotes+1 # c.2) en que sublote se encontró\r\n posicion=numeros.index(i)\r\n \r\n if totalsublotes>0: #minimo del sublote\r\n if totalsublotes==0:\r\n if lotevalores<minimo:\r\n minimo=lotevalores\r\n\r\n#Salidas por pantalla\r\nprint(\"El promedio de los valores ingresados es {0}\".format(promedio))\r\nprint(\"El total de sublotes procesados es {0}\".format(totalsublotes))\r\nprint(\"El valor máximo del conjunto es %d, se encontró en el sublote %d, y su posicion dentro del sublote es %d\" %(maximoconjunto, sublote, posicion+1))\r\nprint(\"El valor minimo del sublote es {0}\".format(minimo))\r\n" }, { "alpha_fraction": 0.37835052609443665, "alphanum_fraction": 0.3907216489315033, "avg_line_length": 32.57143020629883, "blob_id": "758c9b7d4f280f167c080486839a660bfa9a577e", "content_id": "b295995c50fee39de9ca78faf918eba14dd3d699", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 979, "license_type": "no_license", "max_line_length": 100, "num_lines": 28, "path": "/EJ4 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 29/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 4: Implementar un algoritmo que, dado un número entero 𝑛, permita calcular su factorial.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: n\r\n# Salidas: factorial\r\n# Procesos: Ingresar un numero entero \r\n# Calcular su factorial\r\n# Mostrar por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nn=0\r\nfactorial=0\r\n\r\n#Ingreso de dato\r\nprint(\"Ingrese un número entero\")\r\nn=int(input())\r\n\r\n#Salida por pantalla\r\nfrom math import factorial\r\nprint(\"El factorial es\", (factorial(n)))\r\n\r\n" }, { "alpha_fraction": 0.4269954562187195, "alphanum_fraction": 0.4548994302749634, "avg_line_length": 33.88372039794922, "blob_id": "64bcf9e98b2890e427be0c2883bb579cabdaa7d8", "content_id": "078e219f993605b4ce81b59559095218702c97d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1560, "license_type": "no_license", "max_line_length": 94, "num_lines": 43, "path": "/Guia 2 Ej 7.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 7: Dado un triángulo representado por sus lados L1, L2, L3,\r\n# determinar e imprimir una leyenda según sea: equilátero, isósceles o escálenos.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: L1, L2, L3\r\n# Salidas: isósceles, equilátero, escaleno\r\n# Procesos: Ingresar el primer lado\r\n# Ingresar el segundo lado\r\n# Ingresar el tercer lado\r\n# Determino si: L1=L2=L2 -> equilátero\r\n# L1=L2!=L3 -> isósceles\r\n# L1!=L2!=L3 -> escaleno\r\n# Muestro el resultado por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nL1=0\r\nL2=0\r\nL3=0\r\n\r\n# Ingrese los lados\r\nL1=int(input(\"Ingrese el primer lado: \"))\r\nL2=int(input(\"Ingrese el segundo lado: \"))\r\nL3=int(input(\"Ingrese el tercer lado: \"))\r\n\r\n#Determino la opcion elegida mediante condicionales anidados\r\nif L1==L2!=L3:\r\n\t\tprint(\"Es un triángulo isósceles\")\r\nelse:\r\n\t\tif L1!=L2!=L3:\r\n\t\t\tprint(\"Es un triángulo escaleno\")\r\n\t\telse:\r\n\t\t\tif L1==L2==L3:\r\n\t\t\t\tprint(\"Es un triángulo equilátero\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Valor inválido\")" }, { "alpha_fraction": 0.4549861550331116, "alphanum_fraction": 0.47783932089805603, "avg_line_length": 30.81818199157715, "blob_id": "ccb5d7efab61854603112e05af6f03f8eb6f6901", "content_id": "315f5aab3efed88441e5980acf30ab14dacf165d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1453, "license_type": "no_license", "max_line_length": 82, "num_lines": 44, "path": "/EJ1 (1).py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 28/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 1: Escribir un programa que pregunte al usuario:\r\n# a) su nombre, y luego lo salude. b) dos números, y\r\n# luego muestre el producto. \r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: nombre, numero1, numero2\r\n# Salidas: nombre, producto\r\n# Procesos: Ingresar nombre\r\n# Ingresar numero1\r\n# Ingresar numero2\r\n# Calcular el producto entre los dos numeros -> producto=numero1*numero2\r\n# Mostrar producto\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnombre=\"\"\r\nnumero1=0.00\r\nnumero2=0.00\r\nproducto=0.00\r\n\r\n#Ingreso de dato\r\nprint(\"¿Cuál es su nombre?\")\r\nnombre=input()\r\n\r\n#Salida por pantalla\r\nprint(\"Hola {0}\".format(nombre))\r\n\r\n#Ingreso de dato\r\nprint(\"Ingrese dos números\")\r\nnumero1=float(input())\r\nnumero2=float(input())\r\n\r\n#Calculo el producto entre los dos numeros ingresados\r\nproducto=numero1*numero2\r\n\r\n#Salida por pantalla\r\nprint(\"El producto entre los dos números ingresados es {0} \".format(producto))\r\n" }, { "alpha_fraction": 0.4481636583805084, "alphanum_fraction": 0.48303115367889404, "avg_line_length": 41.8979606628418, "blob_id": "5f1e65d39979694812722bfb4de492bf035ede3f", "content_id": "0e7764122c56d0715511c091b7197fa7d4b9039a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2163, "license_type": "no_license", "max_line_length": 112, "num_lines": 49, "path": "/Guia 2 EJ 2.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 2: Dado el siguiente enunciado y su representación gráfica \r\n# especifique los datos de entrada, de salida, estrategia, \r\n# seguimiento y codificación. Enunciado: Dado un número real\r\n# que representa el importe de una compra informar las \r\n# posibles formas de pago, según la siguiente tabla:\r\n# 1 cuota de $................. \r\n# 2 cuotas de $................. total $................. ( 5% de recargo)\r\n# 6 cuotas de $................. total $................. ( 40% de recargo)\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: importe\r\n# Salidas: cuota2, cuota6\r\n# Procesos: Ingresar importe de la compra\r\n# Calcular las 2 cuotas y 6 cuotas + los recargos\r\n# Muestro un menú de opciones en pantalla\r\n# Determinar la opción elegida y terminar el proceso\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nimporte=0.00\r\ncuota2=0.00\r\ncuota6=0.00\r\ntotal2c=0.00\r\ntotal6c=0.00\r\neleccion=0\r\n\r\n#Ingreso de dato\r\nimporte=float(input(\"Ingrese el importe de la compra: \"))\r\n\r\n# Calculo las 2 cuotas con recargo de 5%\r\ncuota2=importe/2\r\ntotal2c=importe+(5/100*importe)\r\n\r\n# Calculo las 6 cuotas con recargo de 40%\r\ncuota6=importe/6\r\ntotal6c=importe+(40/100*importe)\r\n\r\n#Realizo un menú de opciones\r\nprint(\"Seleccione la forma de pago que desee\")\r\nprint(\" 1 cuota de ${0}\".format(importe),\"--------------------------------------> INGRESE UNA A\")\r\nprint(\" 2 cuotas de ${0}\".format(cuota2),\"total ${0}\".format(total2c),\" (5% de recargo) ------> INGRESE UNA B\")\r\nprint(\" 6 cuotas de ${0}\".format(cuota6),\"total ${0}\".format(total6c),\" (40% de recargo) ------> INGRESE UNA C\")\r\n" }, { "alpha_fraction": 0.7455089688301086, "alphanum_fraction": 0.7634730339050293, "avg_line_length": 65.80000305175781, "blob_id": "36ff8be331b6efc16e6ac94dcf12367ec31aeb50", "content_id": "de79fc67bd95fc059ebf6f81fd6866fb69f5ed4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 340, "license_type": "no_license", "max_line_length": 145, "num_lines": 5, "path": "/README.md", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "# Guias-0-al-4\n\nAlgunos archivos llevan escrito \"GUIA\" o \"G\" seguido del numero de la guía correspondiente. Los que no, son los ejercicios de las guías de 0 y 1.\nPara diferenciarlos y saber cual es cual, los ejercicios de la guía 0 empiezan con \"ej\" todo en minúscula.\nLos ejercicios de la guía 1 empiezan con \"EJ\" todo en mayúscula.\n" }, { "alpha_fraction": 0.3271334767341614, "alphanum_fraction": 0.34682711958885193, "avg_line_length": 31.851852416992188, "blob_id": "52cf421b1eab14715cfc1258a1df434bb4564912", "content_id": "aa2d2b92d28412cab43b38f9d3cbea5b87c792b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/G4 EJ 1.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 15/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 1: Dados 10 valores informar el mayor\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: maximo\r\n# Salidas: mayor\r\n# Procesos: Ingresar 10 valores\r\n# Informar el mayor por un for in\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nmayor = 0\r\nmaximo = 10\r\n \r\nfor i in range(maximo):\r\n valor = int(input(\"Ingrese un valor: \"))\r\n if valor > mayor:\r\n mayor = valor\r\n\r\n# Salida por pantalla\r\nprint(\"El mayor es: {0}\".format(mayor))\r\n" }, { "alpha_fraction": 0.45928338170051575, "alphanum_fraction": 0.471009761095047, "avg_line_length": 28.13725471496582, "blob_id": "0f2233bb4e45c68b67d592c62a9b0befda4ed96a", "content_id": "7c4f5738a1f6ee60e1262a826ee7bd773256b078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 75, "num_lines": 51, "path": "/G4 EJ 2.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 15/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 2: Dados N valores informar el mayor, el menor\r\n# y en que posición del conjunto fueron ingresados.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: continuar, nro, numeros, contador\r\n# Salidas: nromayor\r\n# Procesos: Ingresar numeros\r\n# calcular el mayor\r\n# calcular el menor\r\n# Mostrarlos por pantalla con posicion\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nnromayor=0\r\ncontinuar=\"S\"\r\ncontador=0\r\nnro=0\r\nnumeros=[]\r\n\r\nwhile continuar==\"S\":\r\n # Ingreso de dato\r\n\tnro=(int(input(\"Ingrese un numero: \")))\r\n\tif nro==0:\r\n\t\tcontinuar=\"N\"\r\n\telse:\r\n\t\tnumeros.append(nro)\r\n\tcontador=contador+1\r\n\r\nfor i in numeros:\r\n\tif i > nromayor:\r\n # Calculo el mayor\r\n\t\tnromayor=i\r\n\t\tposnromayor= numeros.index(i)\r\n\r\nminimo=numeros[0]\r\nfor a in numeros:\r\n\tif a < minimo:\r\n # Calculo el menor\r\n\t\tminimo = a\r\n\t\tposnromenos= numeros.index(a)\r\n\r\n# Salida por pantalla\r\nprint(\"El numero mayor es %d con posicion en %d\" %(nromayor,posnromayor+1))\r\nprint(\"El numero menor es %d con posicion en %d\" %(minimo, posnromenos+1))" }, { "alpha_fraction": 0.4795321524143219, "alphanum_fraction": 0.5029239654541016, "avg_line_length": 31.032258987426758, "blob_id": "ae3a9dcf073ee714e29878add81b1220d3c7ef88", "content_id": "d1d04408c95e6f5c8b485a49c7347cfd88a0ce7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 103, "num_lines": 31, "path": "/G3 ej7.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº3\r\n# Fecha: 23/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 7: Dado un valor M determinar y emitir un listado con\r\n# los M primeros múltiplos de 3 que no lo sean de 5,\r\n# dentro del conjunto de los números naturales.\r\n#***********************************************************\r\n# Declaración de variables\r\nM=0\r\ncontador=0\r\nnumero=0\r\n\r\n\r\n# Ingreso de dato\r\ntry: #validación\r\n M=int(input(\"Ingrese el valor: \"))\r\nexcept ValueError:\r\n print(\"Error. Ingrese un número\")\r\n M=int(input(\"Ingrese el valor: \"))\r\n\r\nprint(\"Los primeros {0} multiplos de 3 que no son multiplos de 5 son: \".format(M)) #Salida por pantalla\r\nwhile contador<M:\r\n if numero%3==0 and numero%5!=0:\r\n multiplo=numero\r\n # Incremento el contador\r\n contador+=1\r\n print(numero) #Listado de los multiplos\r\n \r\n numero+=1\r\n\r\n" }, { "alpha_fraction": 0.4997239112854004, "alphanum_fraction": 0.5096631646156311, "avg_line_length": 32.20754623413086, "blob_id": "29523771714d7e948f109834f36ce549f5d4abc1", "content_id": "59875bf9bc7d601cbf45e13242cb1540de7730b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 65, "num_lines": 53, "path": "/Guia 2 EJ 1.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 1: A partir de dos valores enteros A y B, informar\r\n# la suma, la diferencia (A menos B), y el producto.\r\n# Estrategia:\r\n#\t Solicitar e ingresar datos por teclado\r\n#\t Calcular suma e informar por monitor\r\n#\t Calcular diferencia e informar por monitor\r\n#\t Calcular producto e informar por monitor\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: A, B\r\n# Salidas: suma, resta, producto\r\n# Procesos: Ingresar numero A\r\n# Ingresar numero B\r\n# Calcular la suma y mostrar en pantalla\r\n# Calcular la resta y mostrar en pantalla\r\n# Calcular el producto y mostrar por pantalla\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\nA=0\r\nB=0\r\nsuma=0\r\nresta=0\r\nproducto=0\r\n\r\n# Ingreso de datos\r\nA=int(input(\"Ingrese el primer número entero: \"))\r\nB=int(input(\"Ingrese el segundo número entero: \"))\r\n\r\n# Calculo la suma entre los dos valores\r\nsuma=A+B\r\n\r\n# Muestro el resultado en pantalla\r\nprint(\"El resultado de la suma es: {0}\".format(suma))\r\n\r\n# Calculo la diferencia entre los dos valores\r\nresta=A-B\r\n\r\n# Muestro el resultado en pantalla\r\nprint(\"El resultado de la resta es: {0}\".format(resta))\r\n\r\n# Calculo el producto entre los dos valores\r\nproducto=A*B\r\n\r\n# Muestro el resultado en pantalla\r\nprint(\"El resultado del producto es: {0}\".format(producto))" }, { "alpha_fraction": 0.5727167725563049, "alphanum_fraction": 0.5921387076377869, "avg_line_length": 43.07291793823242, "blob_id": "f595df47584d89cd56a0742858aa3636488a6f0d", "content_id": "a78b14411743ba3385f002c743203e76e0fb4f36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4351, "license_type": "no_license", "max_line_length": 137, "num_lines": 96, "path": "/G4 EJ 6.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº4\r\n# Fecha: 24/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 6: Dada una serie de M pares {color, número} que corresponden a los tiros de una ruleta. Se pide informar:\r\n# a)\tcuántas veces salió el número cero y el número anterior a cada cero\r\n# b)\tcuántas veces seguidas llegó a repetirse el color negro\r\n# c)\tcuántas veces seguidas llegó a repetirse el mismo número y cuál fue\r\n# d)\tel mayor número de veces seguidas que salieron alternados el rojo y el negro\r\n# e)\tel mayor número de veces seguidas que se negó la segunda docenas\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\n\r\n##Negro son los numeros cuya suma de digitos es par (Las excepciones son\r\n##el 10 y el 28 ambos negros)\r\n##Rojo es numero impar\r\n##La ruleta se divide en 3 docenas: del 0 al 12, 12 al 24 y del 24 al 36.\r\n##Declaracion de variables\r\ncantidad_ceros=0\r\ncantidad_negro=0\r\nveces_almacenadas=0\r\nveces_seguidas=0\r\nrepeticiones_almacenadas=0\r\nveces_alternadas=0\r\ncantidad_rojos=0\r\nnumero_repetido=0\r\nrepeticiones=0\r\nnegardocena=0\r\nveces_alter_almacenadas=0\r\ndocena_almacenada=0\r\npar=0\r\n\r\nimport random\r\nfrom typing import ValuesView\r\ntry: #Validaciones\r\n tiradas=int(input(\"Ingrese cuantas veces desea tirar la ruleta: \")) #Ingreso de dato\r\nexcept ValueError:\r\n print(\"Error. Ingrese un número\")\r\n tiradas=int(input(\"Ingrese cuantas veces desea tirar la ruleta: \"))\r\nif tiradas < 0 or tiradas>36: #Validación\r\n print(\"Error. Tiene que ingresar un valor mayor a 0 y menor que 36\")\r\n tiradas=int(input(\"Ingrese cuantas veces desea tirar la ruleta: \"))\r\nelse:\r\n for i in range(0,tiradas):\r\n tirada = random.randrange(36)\r\n #Formula de pares\r\n tiradapar = tirada\r\n while tiradapar != 0:\r\n par+=tiradapar%10\r\n tiradapar=tiradapar//10\r\n if tirada == 0: #Veces que salió el numero cero y el numero anterior a cada cero\r\n cantidad_ceros+=1\r\n print(\"El numero anterior al cero es:{0} y se repitio {1} ceros\".format(numero_anterior,cantidad_ceros)) #Salida por pantalla\r\n else:\r\n numero_anterior = tirada\r\n if tirada == 10 or tirada == 28: #Veces seguidas que se llegó a repetir el color negro\r\n cantidad_negro+=1\r\n veces_seguidas+=1\r\n else:\r\n if par%2 == 0:\r\n cantidad_negro+=1\r\n veces_seguidas+=1\r\n else:\r\n cantidad_rojos+=1\r\n if veces_seguidas > 1:\r\n if veces_seguidas > veces_almacenadas:\r\n veces_almacenadas = veces_seguidas\r\n else:\r\n veces_alternadas+=1\r\n if veces_alternadas > veces_alter_almacenadas:\r\n veces_alter_almacenadas = veces_alternadas\r\n veces_seguidas = 0\r\n\r\n if tirada == numero_repetido: #Veces seguidas que llegó a repetirse el mismo numero\r\n repeticiones+=1\r\n print(\"Se repitió el número: {0}\".format(numero_repetido)) #Salida por pantalla\r\n else:\r\n if repeticiones > repeticiones_almacenadas:\r\n repeticiones_almacenadas = repeticiones\r\n repeticiones = 0\r\n numero_repetido = tirada\r\n if tirada < 12 or tirada > 24: #Veces seguidas que se negó la segunda docena de la ruleta\r\n negardocena+=1\r\n else:\r\n if negardocena > docena_almacenada:\r\n docena_almacenada = negardocena\r\n negardocena=0\r\n\r\n#Salidas por pantalla\r\nprint(\"La cantidad de veces seguidas que se repitio el mismo numero fueron: {0}\".format(repeticiones_almacenadas))\r\nprint(\"La cantidad de veces seguidas que se repitio el color negro fue: {0}\".format(veces_almacenadas))\r\nprint(\"La cantidad de veces seguidas que se alterno entre el rojo y el negro: {0}\".format(veces_alter_almacenadas))\r\nprint(\"La cantidad de veces seguidas que se nego la segunda docena de la ruleta: {0}\".format(docena_almacenada))" }, { "alpha_fraction": 0.47933492064476013, "alphanum_fraction": 0.489311158657074, "avg_line_length": 34.894737243652344, "blob_id": "0d53888789f85099d7b5d18dd02c78c3f47fe450", "content_id": "1f0e8dfa13073d7daf4d444a68f1d42aa69bd9e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2117, "license_type": "no_license", "max_line_length": 77, "num_lines": 57, "path": "/EJ12.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº1\r\n# Fecha: 02/08/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 12: Escribir un programa que tome una cantidad n de valores\r\n# ingresados por el usuario, a cada uno le calcule el factorial\r\n# (lo realizado en el ejercicio 1.4) e imprima el resultado\r\n# junto con el número de orden correspondiente.\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: cantidad, num, factorial, continuar\r\n# Salidas: cantidad, num, factorial\r\n# Procesos: Ingreso la cantidad de numeros a calcular\r\n# Identifico si son números enteros\r\n# Ingreso los numeros\r\n# Identifico si son números enteros\r\n# Calculo los factoriales\r\n# Muestro por pantalla la cantidad, el numero y su factorial\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\ncantidad=0\r\nnum=0\r\ncontinuar=\"S\"\r\nfactorial=0\r\n\r\n#Creo un ciclo de repetición\r\nwhile continuar==\"S\":\r\n if continuar==\"S\":\r\n #Ingreso de dato\r\n print(\"Ingrese una cantidad de números \")\r\n cantidad=int(input())\r\n continuar=\"N\"\r\n else:\r\n #Muestro un mensaje de error\r\n print(\"Ingrese solo valores numéricos enteros\\n\")\r\n\r\nfor n in range(1,cantidad+1):\r\n continuar=\"S\"\r\n while continuar==\"S\":\r\n if continuar==\"S\":\r\n #Ingreso de dato\r\n print(\"Ingrese un número para calcular su factorial: \")\r\n num=int(input())\r\n continuar=\"N\"\r\n else:\r\n #Muestro mensaje de error\r\n print(\"Ingrese solo valores numéricos enteros\\n\")\r\n #Calculo el factorial\r\n factorial = 1\r\n for x in range(2,num+1):\r\n factorial *= x\r\n #Salida por pantalla\r\n print(n,num,factorial)\r\n\r\n" }, { "alpha_fraction": 0.39514732360839844, "alphanum_fraction": 0.40727901458740234, "avg_line_length": 34.125, "blob_id": "1ef00919830764f3e23ac8c312b7c17e5a491bfd", "content_id": "e0ae4374f8c4e79bd2a3cfcc813ecee5f91e7b9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 74, "num_lines": 32, "path": "/Guia 2 EJ 3.py", "repo_name": "AldanaVallejos/Aldana-Vallejos", "src_encoding": "UTF-8", "text": "#***********************************************************\r\n# Guia de ejercitación nº2\r\n# Fecha: 04/09/2021\r\n# Autor: Aldana Vallejos\r\n#***********************************************************\r\n# EJERCICIO 3: Dada una terna de números naturales que representan al día,\r\n# al mes y al año de una determinada fecha informarla \r\n# como un solo número natural de 8 dígitos (AAAAMMDD).\r\n#***********************************************************\r\n# A N A L I S I S\r\n#***********************************************************\r\n# Entradas: año, dia, mes\r\n# Salidas: añomesdia\r\n# Procesos: Ingresar el año\r\n# Ingresar el mes\r\n# Ingresar el dia\r\n# Unir los valores con str\r\n#***********************************************************\r\n# D I S E Ñ O\r\n#***********************************************************\r\n# Declaración de variables\r\naño=0\r\ndia=0\r\nmes=0\r\n\r\n# Ingreso de datos\r\naño=int(input(\"Ingrese el año: \"))\r\nmes=int(input(\"Ingrese el mes: \"))\r\ndia=int(input(\"Ingrese el dia: \"))\r\n\r\n# Salida por pantalla\r\nprint(\"Usted ingresó el \"+str(año)+str(mes)+str(dia))" } ]
50
subshna/TopGear_Project
https://github.com/subshna/TopGear_Project
4ae5bf548b61aca937ce24bd02675bf81e5fe92e
94561509b179bb3298c729c9d155ae8731a79916
2d193e9bbf3fb5b55519348cab011bb7a61d66e1
refs/heads/master
2020-06-14T22:13:41.321798
2019-07-04T00:10:56
2019-07-04T00:10:56
195,141,491
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.602366030216217, "alphanum_fraction": 0.621294379234314, "avg_line_length": 39.1452522277832, "blob_id": "2c3b38f3d40ed15c58a33678f689c434e7355901", "content_id": "720e7787e86375132b55e7ba7e2f94639f8d4390", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7185, "license_type": "no_license", "max_line_length": 112, "num_lines": 179, "path": "/Excel_TestCases/MS_Excel_001_Format_Existing_File.py", "repo_name": "subshna/TopGear_Project", "src_encoding": "UTF-8", "text": "import win32com.client as win32\nfrom win32com.client import constants\nimport win32gui, win32con, win32api\nimport win32print as printer\nfrom zipfile import ZipFile\nimport pyautogui as imageGrab\nimport logging, time\nimport os, datetime\n\n# Check if folder exist or not\nlstfolder_check = ['resources', 'screenshot_tc1', 'reports']\nfor fldr in lstfolder_check:\n if not os.path.exists('..\\\\' + fldr):\n os.makedirs('..\\\\' + fldr)\n\n# folder paths\nscreenshot_path = '..\\\\screenshot_tc1\\\\'\nresources_path = '..\\\\resources\\\\'\nreport_path = '..\\\\reports\\\\'\n\n\n# Create a logger file\ndef logInfoError(log, msg):\n timestamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S')\n logName = 'report'+timestamp+'.log'\n logging.basicConfig(filename=report_path+logName,\n format='%(asctime)s %(message)s',\n filemode='w')\n logger = logging.getLogger(logName)\n logger.setLevel(logging.DEBUG)\n if log == 'error':\n logger.error(msg)\n else:\n logger.info(msg)\n\n# Create the Excel Application\ntry:\n xlApp = win32.gencache.EnsureDispatch('Excel.Application')\n logInfoError('info', 'Step1-Excel Application Invoked Successfully')\nexcept Exception as e:\n logInfoError('error', e)\n raise e\n\n# Function to Connect to ALM QC and Navigate to Particular folder\n# and Download the file to resources folder path\ndef qcConnect_Donwloadfile(qcServer, qcUser, qcPassword, qcDomain,\n qcProject, qcTC_Folder, TestCase_Name):\n # Connect to qc Server\n try:\n qcConn = win32.Dispatch('TDApiOle80.TDConnection.1')\n qcConn.InitConnectionEx(qcServer)\n qcConn.Login(qcUser, qcPassword)\n qcConn.Connect(qcDomain, qcProject)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_Connect_QCServer.png')\n logInfoError('info', 'Step2-Connect to QC Successfully')\n\n # Download file attached to test cases\n TreeObj = qcConn.TreeManager\n folder = TreeObj.NodeByPath(qcTC_Folder)\n testList = folder.FindTests(TestCase_Name)\n if (len(testList)) == 0:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_No_TC_Found.png')\n logInfoError('error', 'No TC found in folder-{}'.format(qcTC_Folder))\n else:\n for tst in range(len(testList)):\n teststorage = testList[tst].ExtendedStorage\n teststorage.ClientPath = resources_path + testList[tst].name\n teststorage.Load('', True)\n time.sleep(5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_Downloadfile.png')\n logInfoError('info', 'Stpe2-Completed Download')\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_QCServer_Error.png')\n logInfoError('error', 'Could not Connect to QC Server-{}'.format(e))\n\n# Function to extract the Zip file and Save the unzip file to resource path\ndef extract_ZipFile(resources_path):\n try:\n filelist = os.listdir(resources_path)\n for file in filelist:\n if file.startswith('Basic_Test'):\n with ZipFile(resources_path + file, 'r') as zip:\n zip.printdir()\n zip.extractall(resources_path)\n os.startfile(resources_path)\n time.sleep(1)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_Unzip_file.png')\n logInfoError('info', 'Step2-Unzip file {} successfully'.format(zip.infolist()[0].filename))\n return (zip.infolist()[0].filename)\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_File Not found.png')\n logInfoError('error','Step2-Could not Unzip file {}'.format(e))\n raise e\n\n# Function to Type formula and copy the same and paste to alternate cells\ndef xl_type_CopyPaste(unzipfileName, xlpath):\n # Type cell with value and copy paste the value to different cells\n try:\n path = os.getcwd().replace('\\'', '\\\\') + '\\\\'\n wb = xlApp.Workbooks.Open(path + xlpath + unzipfileName)\n ws = wb.Worksheets(1)\n xlApp.Visible = True\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step1_Validate_Application.png')\n hwnd = win32gui.GetForegroundWindow()\n win32gui.ShowWindow(hwnd, win32con.SW_MAXIMIZE)\n ws.Cells(28, 3).Value = '=COUNTA(C8:C24)'\n wb.Save()\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step3to4_Type_Formula.png')\n logInfoError('info','Step3to4-Type formula to Cell C28 Successfully')\n time.sleep(1)\n wCol = 'E'\n for col in range(0, 5):\n ws.Range('C28').Copy()\n ws.Range(wCol + '28').PasteSpecial(Paste=constants.xlPasteValues)\n wCol = chr(ord(wCol) + 2)\n wb.Save()\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step5_Copy_Cell_Paste.png')\n logInfoError('info','Step5-Copy C28, Paste Cell to Alternate Cells')\n return(wb, ws)\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step5_Copy_Cell_Paste_Error.png')\n logInfoError('error','Step5-Could not Open Excel {}'.format(e))\n raise e\n\n# Function to format all the Copied cells\ndef xl_format_Cells(wb, ws):\n try:\n y = 'C'\n for j in range(0, 6):\n ws.Range(y + '28').HorizontalAlignment = 3\n strCol = '%02x%02x%02x' % (0, 165, 255)\n ws.Range(y + '28').Interior.Color = int(strCol, 16)\n for id in range(7, 13):\n ws.Range(y + '28').Borders(id).LineStyle = 1\n ws.Range(y + '28').Borders(id).Weight = 2\n y = chr(ord(y) + 2)\n wb.Save()\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step6to9_Format_Cells.png')\n logInfoError('info','Step6to9-Format Cells and Highlight')\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step8to9_Couldnot_Format_Cells.png')\n logInfoError('error','Step6to9-Could not format the cells')\n raise e\n\n# Function to Print and Close the Xl file\ndef print_file_close(wb, flpath, flname):\n try:\n Cur_printer = printer.GetDefaultPrinter()\n print(Cur_printer)\n printer.SetDefaultPrinter(Cur_printer)\n wb.Save()\n wb.Close()\n win32api.ShellExecute(0, 'print', flpath+flname, Cur_printer, ',', 0)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step10to11_Print_Close_File.png')\n logInfoError('info','Step10to11-Successfully printed file {} on printer {}'.format(flname, Cur_printer))\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step10to11_Print_error.png')\n logInfoError('error','Step10to11-Could not print file {} on printer {}'.format(flname, Cur_printer))\n raise e\n\nif __name__ == '__main__':\n unzipfilename = extract_ZipFile(resources_path)\n xlws = xl_type_CopyPaste(unzipfilename, resources_path)\n xl_format_Cells(xlws[0], xlws[1])\n print_file_close(xlws[0], resources_path, unzipfilename)" }, { "alpha_fraction": 0.6222850680351257, "alphanum_fraction": 0.6371040940284729, "avg_line_length": 39.186363220214844, "blob_id": "d8cf2c9f4f0e9db0915669c17b417444db960912", "content_id": "94f00561872454841510fa3cc30a0268a84c9ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8840, "license_type": "no_license", "max_line_length": 107, "num_lines": 220, "path": "/Excel_TestCases/MS_Excel_002_Insert_Pivot_Table.py", "repo_name": "subshna/TopGear_Project", "src_encoding": "UTF-8", "text": "import win32com.client as win32\nfrom win32com.client import constants\nimport win32gui, win32con\nfrom zipfile import ZipFile\nimport pyautogui as imageGrab\nimport logging, time\nimport os, datetime\n\n# Check if folder exist or not\nlstfolder_check = ['resources', 'screenshot_tc2', 'reports']\nfor fldr in lstfolder_check:\n if not os.path.exists('..\\\\' + fldr):\n os.makedirs('..\\\\' + fldr)\n\n# folder paths\nscreenshot_path = '..\\\\screenshot_tc2\\\\'\nresources_path = '..\\\\resources\\\\'\nreport_path = '..\\\\reports\\\\'\n\n# Create a logger file\ndef logInfoError(log, msg):\n timestamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S')\n logName = 'report'+timestamp+'.log'\n logging.basicConfig(filename=report_path+logName,\n format='%(asctime)s %(message)s',\n filemode='w')\n logger = logging.getLogger(logName)\n logger.setLevel(logging.DEBUG)\n if log == 'error':\n logger.error(msg)\n else:\n logger.info(msg)\n\n# Create the Excel Application\ntry:\n xlApp = win32.gencache.EnsureDispatch('Excel.Application')\n logInfoError('info', 'Step1-Excel Application Invoked Successfully')\nexcept Exception as e:\n logInfoError('error', e)\n raise e\n\n# Function to Connect to ALM QC and Navigate to Particular folder\n# and Download the file to resources folder path\ndef qcConnect_Donwloadfile(qcServer, qcUser, qcPassword, qcDomain,\n qcProject, qcTC_Folder, TestCase_Name):\n # Connect to qc Server\n try:\n qcConn = win32.Dispatch('TDApiOle80.TDConnection.1')\n qcConn.InitConnectionEx(qcServer)\n qcConn.Login(qcUser, qcPassword)\n qcConn.Connect(qcDomain, qcProject)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_Connect_QCServer.png')\n logInfoError('info', 'Step2-Connect to QC Successfully')\n\n # Download file attached to test cases\n TreeObj = qcConn.TreeManager\n folder = TreeObj.NodeByPath(qcTC_Folder)\n testList = folder.FindTests(TestCase_Name)\n if (len(testList)) == 0:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_No_TC_Found.png')\n logInfoError('error', 'No TC found in folder-{}'.format(qcTC_Folder))\n else:\n for tst in range(len(testList)):\n teststorage = testList[tst].ExtendedStorage\n teststorage.ClientPath = resources_path + testList[tst].name\n teststorage.Load('', True)\n time.sleep(5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_Downloadfile.png')\n logInfoError('info', 'Stpe2-Completed Download')\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_QCServer_Error.png')\n logInfoError('error', 'Could not Connect to QC Server-{}'.format(e))\n\n# Function to extract the Zip file and Save the unzip file to resource path\ndef extract_ZipFile(resources_path):\n try:\n filelist = os.listdir(resources_path)\n for file in filelist:\n if file.startswith('Adobe'):\n with ZipFile(resources_path + file, 'r') as zip:\n #zip.printdir()\n zip.extractall(resources_path)\n os.startfile(resources_path)\n time.sleep(1)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_Unzip_file.png')\n logInfoError('info', 'Step2-Unzip file {} successfully'.format(zip.infolist()[0].filename))\n return (zip.infolist()[0].filename)\n except Exception as e:\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step2_File Not found.png')\n logInfoError('error','Step2-Could not Unzip file {}'.format(e))\n raise e\n\n# Function open the file\ndef validate_xl(unzipfileName, xlpath):\n # Type cell with value and copy paste the value to different cells\n try:\n path = os.getcwd().replace('\\'', '\\\\') + '\\\\'\n wb = xlApp.Workbooks.Open(path + xlpath + unzipfileName)\n xlApp.Visible = True\n hwnd = win32gui.GetForegroundWindow()\n win32gui.ShowWindow(hwnd, win32con.SW_MAXIMIZE)\n time.sleep(1)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step3_Open_Data_File.png')\n logInfoError('info', 'Step3-File Opened Successfully')\n\n # Select the Source Range\n ws = wb.Sheets('Sheet1')\n PivotRangeSrc = ws.UsedRange\n PivotRangeSrc.Select()\n return (wb, PivotRangeSrc)\n except Exception as e:\n logInfoError('error', 'Step3-Data file Not Found {}'.format(e))\n raise e\n\n# Function to create the Pivot table based on the source and destination Range\ndef create_PivotTable(wb, PivotRangeSrc, PivotTblName, stepNo):\n try:\n # Select destination Range\n PivotSht = wb.Worksheets.Add()\n PivotRangeDest = PivotSht.Range('A1')\n PivotTableName = PivotTblName\n except Exception as e:\n logInfoError('error', 'Select Destination Error {}'.format(e))\n raise e\n\n # Create Pivot table\n try:\n PivotCache = wb.PivotCaches().Create(SourceType=constants.xlDatabase, SourceData=PivotRangeSrc)\n PivotTable = PivotCache.CreatePivotTable(TableDestination=PivotRangeDest, TableName=PivotTableName)\n time.sleep(0.5)\n im = imageGrab.grab()\n im.save(screenshot_path + stepNo+'_Highlight_All_Column.png')\n logInfoError('info', stepNo+'-Highlighted all columns')\n return (PivotTable, PivotSht)\n except Exception as e:\n logInfoError('error', stepNo+'-Range Selection error {}'.format(e))\n raise e\n\n# Function to Drag the Columns and Sort\ndef select_PivotFields_Sort(PivotTable, *argv):\n try:\n PivotTable.PivotFields(argv[0]).Orientation = argv[2]\n DataField = PivotTable.AddDataField(PivotTable.PivotFields(argv[1]))\n DataField.NumberFormat = '##0.00'\n time.sleep(0.5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step6_Pivot_Table_Loaded.png')\n logInfoError('info', 'Step6-Pivot table should loaded as per selection')\n\n # Sort field Hostname descending order\n PivotTable.PivotFields(argv[0]).AutoSort(constants.xlAscending, DataField)\n time.sleep(0.5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step7_Pivot_Table_Sorted.png')\n logInfoError('info', 'Step7-The pivot table should load with sorting')\n except Exception as e:\n logInfoError('error', 'Step6to7-Pivot loading and sorting error {}'.format(e))\n raise e\n\n# Function to Drag the Columns, Create chart, and Close the file\ndef select_PivotFields_Chart(PivotTable, PivotSht, wb, *argv):\n try:\n PivotTable.PivotFields(argv[0]).Orientation = argv[3]\n PivotTable.PivotFields(argv[0]).Position = 1\n PivotTable.PivotFields(argv[1]).Orientation = argv[3]\n PivotTable.PivotFields(argv[0]).Position = 2\n time.sleep(0.5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step10_Columns_to_Axis.png')\n logInfoError('info', 'Step7-Columns are dragged successfully to Axis')\n\n DataField = PivotTable.AddDataField(PivotTable.PivotFields(argv[2]))\n DataField.NumberFormat = '##0.00'\n time.sleep(0.5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step11_Columns_to_Axis.png')\n logInfoError('info', 'Step11-Columns are dragged successfully to Axis')\n\n # Create Chart\n chart = PivotSht.Shapes.AddChart2(201)\n time.sleep(0.5)\n im = imageGrab.grab()\n im.save(screenshot_path + 'Step12_Pivot_Chart.png')\n logInfoError('info', 'Step12-Pivot chart should be loaded')\n wb.Save()\n wb.Close()\n except Exception as e:\n logInfoError('error', 'Step10tp12-Columns and Chart not created {}'.format(e))\n raise e\n\n\n# Call the main function\nif __name__ == '__main__':\n PivotFields = {1 : 'UserName',\n 2 : 'Host Name',\n 3 : 'User Region',\n 4 : 'Count of Host Name'\n }\n PivotOrient = {1 : constants.xlRowField,\n 2 : constants.xlPageField,\n 3 : constants.xlColumnField,\n }\n unzipfilename = extract_ZipFile(resources_path)\n xlwb_Src = validate_xl(unzipfilename, resources_path)\n\n # Create first pivot table\n PivotTbl = create_PivotTable(xlwb_Src[0], xlwb_Src[1], 'PivotTable1','Step4to5')\n select_PivotFields_Sort(PivotTbl[0], PivotFields[1],PivotFields[2], PivotOrient[1])\n\n # Create Second Pivot table\n PivotTbl = create_PivotTable(xlwb_Src[0], xlwb_Src[1], 'PivotTable2', 'Step8to9')\n select_PivotFields_Chart(PivotTbl[0], PivotTbl[1], xlwb_Src[0], PivotFields[1],\n PivotFields[3], PivotFields[2], PivotOrient[2])" } ]
2
Kashyap90/Assignment-7
https://github.com/Kashyap90/Assignment-7
b0a4d96b036d6ea0eb70938934f2a93ebc668ec4
6eed1654b8a312c684f29503cae88a0383c74cab
21070541c2a581e4579a46bedd8535c3b5b0fa3b
refs/heads/master
2020-03-27T20:34:40.268038
2018-09-02T11:22:41
2018-09-02T11:22:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5914826393127441, "alphanum_fraction": 0.6671923995018005, "avg_line_length": 17.58823585510254, "blob_id": "76b3dd127048a9dbd49de600c6d2e12181fa8933", "content_id": "3c50b3abbb6c065dfeb36d23f352364d039f9139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 59, "num_lines": 34, "path": "/Numpy 2 Assignement.py", "repo_name": "Kashyap90/Assignment-7", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\n# The Moving Avereage Sequence with 7 values\n\nimport numpy as np\n\ndataset = [25, 35, 45, 55, 65, 75, 85]\n\ndef movingaverage(values, window):\n weights = np.repeat(7.0, window)/window\n smas = np.convolve(values,weights,'valid')\n return smas\n\nprint(movingaverage(dataset,4))\n\n\n# In[2]:\n\n\n# Moving Average in an array over a window, window=3\n\nimport numpy as np\n\ndataset = [3, 5, 7, 2, 8, 10, 11, 65, 72, 81, 99, 100, 150]\n\ndef movingaverage(values, window):\n weights = np.repeat(1.0, window)/window\n smas = np.convolve(values,weights,'valid')\n return smas\n\nprint(movingaverage(dataset,3))\n\n" } ]
1
ysaquib/WhatsApp-ChatAnalyzer
https://github.com/ysaquib/WhatsApp-ChatAnalyzer
470c95caa6350a444338d975dba324971baa242f
dec1dfe0f8903b8e01c0cadead6065fdd187fc33
e46dced3d11b020b9fa81aae0e384e45bcd251fe
refs/heads/master
2020-12-06T10:48:29.344778
2020-11-08T11:15:14
2020-11-08T11:15:14
232,444,707
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7654867172241211, "alphanum_fraction": 0.7743362784385681, "avg_line_length": 41.25, "blob_id": "be5eb2032e7c63e39d2e793a6cba78e9f4d91e19", "content_id": "675e8ab3b17c2fbb1d52fa04c00f643f51e1a816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 678, "license_type": "no_license", "max_line_length": 153, "num_lines": 16, "path": "/README.md", "repo_name": "ysaquib/WhatsApp-ChatAnalyzer", "src_encoding": "UTF-8", "text": "Project: WhatsApp Chat Analyzer\n\nAuthor: Yusuf Saquib\n\nDate Created: 30 April 2019\n\nLibraries Used:\nxlsxwriter for Excel writing \nRe for pattern matching\n\nThis is a simple Python script that goes through your exported whatsapp texts (only works for Android at the moment) and exports data\ninto an excel file. Data such as \"Most Used Words\", \"Most Used Emojis\", \"Total Messages\" and \"Number of Texts Sent per Time of Day\"\nare all displayed in the excel file allowing the user to create a graph of the data.\n\nThis was made over a few days of on and off work after final exams. I made this mainly because I was curious to see the words and emoji I used most with \nsome close friends.\n\n\n" }, { "alpha_fraction": 0.4994249641895294, "alphanum_fraction": 0.5172513127326965, "avg_line_length": 32.09803771972656, "blob_id": "d0ce62d9afb42aa1f44f13765240cd86a2917603", "content_id": "caf158f4c2d4e3f25846d5d2dfc906742e6a3b6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10434, "license_type": "no_license", "max_line_length": 114, "num_lines": 306, "path": "/ChatAnalyzer.py", "repo_name": "ysaquib/WhatsApp-ChatAnalyzer", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport re\r\nimport xlsxwriter as xlw\r\n\r\n#replace texts.txt with your file\r\nf = open(\"texts.txt\",\"r\",encoding=\"utf-8\")\r\n\r\nSENDER = \"Yusuf\" # You\r\nRECEIVER = \"Person\" # Person you are chatting with\r\n\r\n## DONT TOUCH THIS\r\n\r\nwb = xlw.Workbook(\"chat_analysis.xlsx\")\r\nws = wb.add_worksheet(\"Chat Data Analysis\")\r\n\r\nMSG_RCVD = 0\r\nMSG_SENT = 0\r\nIMG_RCVD = 0\r\nIMG_SENT = 0\r\nPREV_DAY = \"\"\r\nCURR_DAY = \"\"\r\nTOTAL_CHARS_SENT = 0\r\nTOTAL_CHARS_RCVD = 0\r\n\r\nTOTAL_WORDS_SENT = 0\r\nTOTAL_WORDS_RCVD = 0\r\n\r\nTOTAL_EMOJI_SENT = 0\r\nTOTAL_EMOJI_RCVD = 0\r\n\r\nCURR_MSNGR = \"\"\r\n\r\nMSG_RCVD_TODAY = 0\r\nMSG_SENT_TODAY = 0\r\n\r\n\r\ndef analyze(message, word_dict, emoji_dict, time_dict):\r\n\r\n # DECLARE GLOBALS -- DONT TOUCH\r\n global PREV_DAY\r\n global CURR_DAY\r\n global MSG_RCVD\r\n global MSG_RCVD_TODAY\r\n global MSG_SENT\r\n global MSG_SENT_TODAY\r\n global IMG_SENT\r\n global IMG_RCVD\r\n global TOTAL_CHARS_SENT\r\n global TOTAL_CHARS_RCVD\r\n global TOTAL_WORDS_SENT\r\n global TOTAL_WORDS_RCVD\r\n global TOTAL_EMOJI_SENT\r\n global TOTAL_EMOJI_RCVD\r\n global SENDER\r\n global RECEIVER\r\n global CURR_MSNGR\r\n\r\n\r\n ## IF THE DAY IS CURRENTLY ON GOING AND IT IS AT THE END OF THE FILE, THEN I NEED TO MAKE IT ANOTHER DAY!!!\r\n \r\n\r\n if re.match(\"\\d+/\\d+/\\d+,\", message):\r\n CURR_DAY = message.split(\",\")[0]\r\n if PREV_DAY == \"\":\r\n PREV_DAY = CURR_DAY\r\n if PREV_DAY != CURR_DAY:\r\n print (\" Messages Sent on \" + PREV_DAY + \": \" + str(MSG_SENT_TODAY))\r\n print (\"Messages Received on \" + PREV_DAY + \": \" + str(MSG_RCVD_TODAY) + \"\\n\")\r\n PREV_DAY = CURR_DAY\r\n MSG_SENT_TODAY = 0\r\n MSG_RCVD_TODAY = 0\r\n if \" - \" + SENDER + \": <Media omitted>\" in message:\r\n IMG_SENT += 1\r\n if \" - \" + RECEIVER + \": <Media omitted>\" in message:\r\n IMG_RCVD += 1\r\n\r\n str_msg = \"\"\r\n str_time = \"\"\r\n hour = \"\"\r\n str_emoji_regex = \"[\\U0001F100-\\U0001F7EC]\" #Encompasses All Emoji Unicode\r\n if \" - \" + SENDER + \":\" in message:\r\n MSG_SENT += 1\r\n MSG_SENT_TODAY += 1\r\n CURR_MSNGR = SENDER\r\n TOTAL_CHARS_SENT += len(message.split(\":\")[1])\r\n str_msg = message.split(SENDER + \":\")[1].strip()\r\n str_time = (message.split(\",\")[1].strip()).split(\"- \" + SENDER)[0].strip()\r\n if str_msg == \"<Media omitted>\": str_msg = \"\"\r\n TOTAL_WORDS_SENT += len(re.findall(\"[^\\w*'*\\w]\", str_msg))\r\n TOTAL_EMOJI_SENT += len(re.findall(str_emoji_regex, str_msg))\r\n time = \"\".join(re.findall(\"[\\d\\d:\\d\\d]\", str_time))\r\n hour = time.split(\":\")[0]\r\n\r\n elif \" - \" + RECEIVER + \":\" in message:\r\n MSG_RCVD += 1\r\n MSG_RCVD_TODAY += 1\r\n CURR_MSNGR = RECEIVER\r\n TOTAL_CHARS_RCVD += len(message.split(\":\")[1])\r\n str_msg = message.split(RECEIVER + \":\")[1].strip()\r\n str_time = (message.split(\",\")[1].strip()).split(\"- \" + RECEIVER)[0].strip()\r\n if str_msg == \"<Media omitted>\": str_msg = \"\"\r\n TOTAL_WORDS_RCVD += len(re.findall(\"[^\\w*'*\\w]\", str_msg))\r\n TOTAL_EMOJI_RCVD += len(re.findall(str_emoji_regex, str_msg))\r\n time = \"\".join(re.findall(\"[\\d\\d:\\d\\d]\", str_time))\r\n hour = time.split(\":\")[0]\r\n\r\n else:\r\n if CURR_MSNGR == SENDER:\r\n TOTAL_CHARS_SENT += len(message)\r\n str_msg = message\r\n str_time = \"\"\r\n if str_msg == \"<Media omitted>\": str_msg = \"\"\r\n TOTAL_WORDS_SENT += len(re.findall(\"[^\\w*'*\\w]\", str_msg))\r\n TOTAL_EMOJI_SENT += len(re.findall(str_emoji_regex, str_msg))\r\n\r\n elif CURR_MSNGR == RECEIVER:\r\n TOTAL_CHARS_RCVD += len(message)\r\n str_msg = message\r\n str_time = \"\"\r\n if str_msg == \"<Media omitted>\": str_msg = \"\"\r\n TOTAL_WORDS_RCVD += len(re.findall(\"[^\\w*'*\\w]\", str_msg))\r\n TOTAL_EMOJI_RCVD += len(re.findall(str_emoji_regex, str_msg))\r\n\r\n wordList = re.sub(\"[^\\w*'*\\w]\", \" \", str_msg).split()\r\n emojiList = re.findall(str_emoji_regex, str_msg)\r\n\r\n inds = [i for i, x in enumerate(wordList) if (x == \"ll\" or x ==\"re\" or x==\"ve\")]\r\n for j in inds:\r\n if j - 1 >= 0:\r\n wordList[j] = str(wordList[j-1]) + str(wordList[j])\r\n j -= 1\r\n\r\n for word in wordList:\r\n if word.lower().strip() not in word_dict:\r\n word_dict[word.lower().strip()] = 1\r\n else:\r\n word_dict[word.lower().strip()] += 1\r\n\r\n if hour not in time_dict:\r\n if CURR_MSNGR == SENDER: time_dict[hour] = [1, 0, 1]\r\n else: time_dict[hour] = [0, 1, 1]\r\n else:\r\n if CURR_MSNGR == SENDER:\r\n time_dict[hour][0] += 1\r\n time_dict[hour][2] += 1\r\n else: \r\n time_dict[hour][1] += 1\r\n time_dict[hour][2] += 1\r\n\r\n for emoji in emojiList:\r\n if emoji not in emoji_dict:\r\n emoji_dict[emoji] = 1\r\n else:\r\n emoji_dict[emoji] += 1\r\n\r\ndef fileAnalyzer(file):\r\n file.readline()\r\n string = file.readline()\r\n if \"Messages to this chat and calls are now secured with end-to-end encryption. Tap for more info.\" in string:\r\n string = file.readline()\r\n word_dict = {}\r\n time_dict = {}\r\n emoji_dict = {}\r\n while(string != \"\"):\r\n analyze(string, word_dict, emoji_dict, time_dict)\r\n string = file.readline()\r\n #print (word_dict)\r\n del time_dict[\"\"]\r\n words_sorted = sorted(word_dict.items(), reverse = True, key = lambda x: x[1])\r\n emojis_sorted = sorted(emoji_dict.items(), reverse = True, key = lambda z: z[1])\r\n\r\n f.close()\r\n print (\" Total Messages Sent: \" + str(MSG_SENT))\r\n print (\"Total Messages Received: \" + str(MSG_RCVD) + \"\\n\")\r\n\r\n print (\" Total Messages: \" + str(MSG_RCVD + MSG_SENT) + \"\\n\")\r\n\r\n print (\" Total Images Sent: \" + str(IMG_SENT))\r\n print (\" Total Images Received: \" + str(IMG_RCVD) + \"\\n\")\r\n\r\n print (\" Total Images: \" + str(IMG_RCVD + IMG_SENT) + \"\\n\")\r\n\r\n print (\" Total Chars Sent: \" + str(TOTAL_CHARS_SENT))\r\n print (\" Total Chars Received: \" + str(TOTAL_CHARS_RCVD) + \"\\n\")\r\n\r\n print (\" Total Characters: \" + str(TOTAL_CHARS_RCVD + TOTAL_CHARS_SENT) + \"\\n\")\r\n\r\n print (\" Total Words Sent: \" + str(TOTAL_WORDS_SENT))\r\n print (\" Total Words Received: \" + str(TOTAL_WORDS_RCVD) + \"\\n\")\r\n\r\n print (\" Total Words: \" + str(TOTAL_WORDS_RCVD + TOTAL_WORDS_SENT) + \"\\n\")\r\n\r\n print (\" Avg Sent Msg Length: \" + str(TOTAL_CHARS_SENT//MSG_SENT) + \" Chars\")\r\n print (\"Avg Received Msg Length: \" + str(TOTAL_CHARS_RCVD//MSG_RCVD) + \" Chars\" + \"\\n\")\r\n\r\n print (\"100 Most Used Words: \")\r\n words100 = []\r\n for i in range(100):\r\n if not words_sorted[i][0].isdigit():\r\n print (\" \" + str(i+1) + \". \" + words_sorted[i][0] + \" - \" + str(words_sorted[i][1]))\r\n words100.append((words_sorted[i][0], words_sorted[i][1]))\r\n print (\"\\n\")\r\n\r\n print (\" Total Emojis Sent: \" + str(TOTAL_EMOJI_SENT))\r\n print (\" Total Emojies Received: \" + str(TOTAL_EMOJI_RCVD) + \"\\n\")\r\n\r\n print (\" Total Emojies: \" + str(TOTAL_EMOJI_SENT+TOTAL_EMOJI_RCVD) + \"\\n\")\r\n \r\n print (\"Most Used Emojis: \")\r\n emojis_all = []\r\n for j in range(len(emojis_sorted)):\r\n print (\" \" + str(j+1)+\". \" + emojis_sorted[j][0] + \" - \" + str(emojis_sorted[j][1]))\r\n emojis_all.append((emojis_sorted[j][0], emojis_sorted[j][1]))\r\n print (\"\\n\")\r\n list_times = [\"00\",\"01\",\"02\",\"03\",\"04\",\"05\",\"06\",\"07\",\"08\",\"09\",\"10\",\"11\",\r\n \"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\"]\r\n print (\" Message Freq per Hour: | SENT | RECEIVED | TOTAL \")\r\n msg_freq_hr = []\r\n for hr in list_times:\r\n print (\" \"+hr+\":00 - \"+hr+\":59 :: \" + str(time_dict[hr][0]) \r\n + \" | \" + str(time_dict[hr][1]) \r\n + \" | \" + str(time_dict[hr][2]))\r\n msg_freq_hr.append(\r\n (\"\"+hr+\":00 - \"+hr+\":59\",\r\n int(time_dict[hr][0]),\r\n int(time_dict[hr][1]),\r\n int(time_dict[hr][2]))\r\n )\r\n \r\n\r\n analysis_list = [\r\n (\"Total Messages Sent\", int(MSG_SENT)),\r\n (\"Total Messages Received\", int(MSG_RCVD)),\r\n (\"Total Messages\", int(MSG_RCVD + MSG_SENT)),\r\n (\"\",\"\"),\r\n (\"Total Images Sent\", int(IMG_SENT)),\r\n (\"Total Images Received\", int(IMG_RCVD)),\r\n (\"Total Images\", int(IMG_RCVD + IMG_SENT)),\r\n (\"\",\"\"),\r\n (\"Total Chars Sent\", int(TOTAL_CHARS_SENT)),\r\n (\"Total Chars Received\", int(TOTAL_CHARS_RCVD)),\r\n (\"Total Characters\", int(TOTAL_CHARS_RCVD + TOTAL_CHARS_SENT)),\r\n (\"\",\"\"),\r\n (\"Total Words Sent\", int(TOTAL_WORDS_SENT)),\r\n (\"Total Words Received\", int(TOTAL_WORDS_RCVD)),\r\n (\"Total Words\", int(TOTAL_WORDS_RCVD + TOTAL_WORDS_SENT)),\r\n (\"\",\"\"),\r\n (\"Avg Sent Msg Length\", int(TOTAL_CHARS_SENT//MSG_SENT)),\r\n (\"Avg Received Msg Length\", int(TOTAL_CHARS_RCVD//MSG_RCVD)),\r\n (\"\",\"\"),\r\n (\"Total Emojis Sent\", int(TOTAL_EMOJI_SENT)),\r\n (\"Total Emojies Received\", int(TOTAL_EMOJI_RCVD)),\r\n (\"Total Emojies\", int(TOTAL_EMOJI_SENT+TOTAL_EMOJI_RCVD))\r\n ]\r\n\r\n ## Write the data to the excel file\r\n row = 0\r\n col = 0\r\n for analysis, number in analysis_list:\r\n ws.write(row, col, analysis)\r\n ws.write(row, col+1, number)\r\n row += 1\r\n\r\n row = 0\r\n col = 3\r\n ws.write(row, col, \"Most Used Words\")\r\n ws.write(row, col+1, \"Frequency\")\r\n\r\n row = 1\r\n col = 3\r\n for word, freq in words100:\r\n ws.write(row, col, word)\r\n ws.write(row, col+1, freq)\r\n row += 1\r\n\r\n row = 0\r\n col = 6\r\n ws.write(row, col, \"Most Used Emojis\")\r\n ws.write(row, col+1, \"Frequency\")\r\n\r\n row = 1\r\n col = 6\r\n for emoji, freq in emojis_all:\r\n ws.write(row, col, emoji)\r\n ws.write(row, col+1, freq)\r\n row += 1\r\n\r\n row = 0\r\n col = 9\r\n ws.write(row, col, \"Messages Per Hour\")\r\n ws.write(row, col+1, \"Sent\")\r\n ws.write(row, col+2, \"Received\")\r\n ws.write(row, col+3, \"Total\")\r\n\r\n row = 1\r\n col = 9\r\n for hour, sent, recv, total in msg_freq_hr:\r\n ws.write(row, col, hour)\r\n ws.write(row, col+1, sent)\r\n ws.write(row, col+2, recv)\r\n ws.write(row, col+3, total)\r\n row += 1\r\n\r\n wb.close()\r\n\r\nfileAnalyzer(f)\r\n" } ]
2
majian7654/resnet-in-tensorflow
https://github.com/majian7654/resnet-in-tensorflow
bfd1c64dd054a850e62151c003555e5501a77f76
ad676fec48d71eb72eff6f956ed059b38919905d
511215f8c0ddbfced62bfcf097e68dfac77ffd57
refs/heads/master
2020-04-02T16:22:08.924790
2018-10-30T01:54:44
2018-10-30T01:54:44
154,609,399
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6181684136390686, "alphanum_fraction": 0.6395863890647888, "avg_line_length": 41.3125, "blob_id": "aa89365a11bba9ff584c684e8b7e5c1689365df6", "content_id": "3a5a4e79e4980cbf0112514298001113c3c5b119", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1354, "license_type": "permissive", "max_line_length": 92, "num_lines": 32, "path": "/single_demo.py", "repo_name": "majian7654/resnet-in-tensorflow", "src_encoding": "UTF-8", "text": "#!/Users/majian/anaconda/bin/python\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom resnet import *\n\nif __name__=='__main__':\n #build graph\n name_op = tf.placeholder(dtype = tf.string)\n image_contents = tf.read_file(name_op)\n image = tf.image.decode_jpeg(image_contents, channels = 3)\n image = tf.image.resize_images(image, (FLAGS.img_height, FLAGS.img_width))#need to focus\n image = tf.image.per_image_standardization(image)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image,(-1,FLAGS.img_height, FLAGS.img_width,3))#need to focus\n logits_op = inference(image, reuse=False, is_train=False)\n prob_op = tf.nn.softmax(logits_op)\n #input image\n name = os.path.join('./dataset/model_test/','0a6b4bea357bad4e3943da9780f5856a.jpg')\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state('./logs_test_110')\n if ckpt and ckpt.model_checkpoint_path:\n saver = tf.train.Saver()\n saver.restore(sess,ckpt.model_checkpoint_path)\n logits = sess.run(logits_op, feed_dict={name_op:name})\n print('logits:\\n', logits)\n prob = sess.run(prob_op ,feed_dict = {name_op:name})[0,:]\n label, prob = np.argmax(prob), np.max(prob)\n print(label,prob)\n else:\n print('no checkpoint found!!!')\n" }, { "alpha_fraction": 0.7201257944107056, "alphanum_fraction": 0.7342767119407654, "avg_line_length": 20.931034088134766, "blob_id": "3d4627edfb8962e8da288a0606609d48651da094", "content_id": "90924e403d5dacd5452b16190a6e950f02d200c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 636, "license_type": "permissive", "max_line_length": 128, "num_lines": 29, "path": "/README.md", "repo_name": "majian7654/resnet-in-tensorflow", "src_encoding": "UTF-8", "text": "# ResNet in Tensorflow\n\nthis repo is an implement of resent!!!\n\nthe origin repo is https://github.com/wenxinxu/resnet-in-tensorflow\n\ni modify some part of the repo:\n\n1. modify batch_norm:\n\n use tf.layer.batch_norm, and this is more convenient for using batchnorm\n\n in single test\n\n2. add single_demo.py:\n\n Single_demo.py is for testing a single picture\n\n3. Add multi_demo.py:\n\n multi_demo.py is for batch test!!!\n\n4. modify resnet.py:\n\n Make the code is for resnet34\n\n5. modify cifar10_train.py:\n\n Modify the check if there is checkpoint! if there is , restore and continue training instead of using the hyper_parameters!!! " }, { "alpha_fraction": 0.6013612151145935, "alphanum_fraction": 0.6198347210884094, "avg_line_length": 39.33333206176758, "blob_id": "b73d77f0c9ed949a737a4c5c4e3776f723206887", "content_id": "479b0e6f9aab25c3feb482e70da941b29610791a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2057, "license_type": "permissive", "max_line_length": 170, "num_lines": 51, "path": "/mulit_demo.py", "repo_name": "majian7654/resnet-in-tensorflow", "src_encoding": "UTF-8", "text": "#!/Users/majian/anaconda/bin/python\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom resnet import *\nimport json\n\ndef multi_test(label_path = './dataset'):\n data_dict = {}\n labels =[]\n label_path = os.path.join(label_path,'ai_challenger_pdr2018_validationset_20180905/AgriculturalDisease_validationset/AgriculturalDisease_validation_annotations.json')\n with open(label_path, 'r') as f:\n label_list = json.load(f)\n for image in label_list:\n data_dict[image['image_id']] = int(image['disease_class'])\n \n total_num = 0\n acc_num = 0\n #build graph\n name_op = tf.placeholder(dtype = tf.string)\n image_contents = tf.read_file(name_op)\n image = tf.image.decode_jpeg(image_contents, channels = 3)\n image = tf.image.resize_images(image, (FLAGS.img_height, FLAGS.img_width))#need to focus\n image = tf.image.per_image_standardization(image)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image,(-1,FLAGS.img_height, FLAGS.img_width,3))#need to focus\n logits_op = inference(image, reuse=False, is_train=False)\n prob_op = tf.nn.softmax(logits_op)\n \n #input image\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state('./logs_test_110')\n if ckpt and ckpt.model_checkpoint_path:\n saver = tf.train.Saver()\n saver.restore(sess,ckpt.model_checkpoint_path)\n for name, label in data_dict.items():\n name = os.path.join('./dataset/ai_challenger_pdr2018_validationset_20180905/AgriculturalDisease_validationset/images', name)\n logits = sess.run(logits_op, feed_dict={name_op:name})\n prob = sess.run(prob_op ,feed_dict = {name_op:name})[0,:]\n pred, prob = np.argmax(prob), np.max(prob)\n total_num += 1\n if pred == label:\n acc_num +=1\n print(pred) \n print('acc:',acc_num / total_num)\n else:\n print('no checkpoint found!!!')\n\nif __name__=='__main__':\n multi_test()\n" }, { "alpha_fraction": 0.5534164905548096, "alphanum_fraction": 0.5688651204109192, "avg_line_length": 41.715736389160156, "blob_id": "471210b0c035712f857415b10e1976fe619b13fb", "content_id": "0e8e7304c016284e65b8448bde6a310d02594fe3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8415, "license_type": "permissive", "max_line_length": 129, "num_lines": 197, "path": "/input_data.py", "repo_name": "majian7654/resnet-in-tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nimport os\nimport json\nimport hyper_parameters\n\ndef read_cifar10(data_dir, is_train, batch_size, shuffle):\n \"\"\"\n Read cifar10 data\n :param data_dir: data directory\n :param is_train: input train data or test data\n :param batch_size: batch size\n :param shuffle: whether shuffle the data\n :return: label: 1D tensor, [batch_size, n_classes], one-hot coding, tf.int32\n images: 4D tensor, [batch_size, width, height, 3], tf.float32\n \"\"\"\n\n img_width = 32\n img_height = 32\n img_channel = 3\n label_bytes = 1\n image_bytes = img_width * img_height * img_channel\n\n with tf.name_scope('input'):\n\n data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')\n\n if is_train:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % ii) for ii in np.arange(1, 6)]\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n\n filename_queue = tf.train.input_producer(filenames)\n reader = tf.FixedLengthRecordReader(label_bytes + image_bytes)\n key, value = reader.read(filename_queue)\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n label = tf.slice(record_bytes, [0], [label_bytes])\n label = tf.cast(label, tf.int32)\n\n image_raw = tf.slice(record_bytes, [label_bytes], [image_bytes])\n image_raw = tf.reshape(image_raw, [img_channel, img_height, img_width])\n image = tf.transpose(image_raw, (1, 2, 0)) # convert D/H/W -> H/W/D\n image = tf.cast(image, tf.float32)\n\n # normalization: (x - mean) / var\n image = tf.image.per_image_standardization(image)\n\n # tf.train.shuffle_batch() Args:\n #\n # tensors: The list or dictionary of tensors to enqueue.\n # batch_size: The new batch size pulled from the queue.\n # capacity: An integer. The maximum number of elements in the queue.\n # min_after_dequeue: Minimum number elements in the queue after a dequeue,\n # used to ensure a level of mixing of elements.\n # num_threads: The number of threads enqueuing tensor_list.\n if shuffle:\n images, label_batch = tf.train.shuffle_batch([image, label],\n batch_size=batch_size,\n capacity=20000,\n min_after_dequeue=3000,\n num_threads=64)\n else:\n images, label_batch = tf.train.batch([image, label],\n batch_size=batch_size,\n capacity=2000,\n num_threads=64)\n # one-hot coding\n return images, label_batch\n\ndef read_armyData(file_list, is_train, batch_size, shuffle):\n \"\"\"\n Read ArmyData\n :param file_list: a text containing names of images,\n such as :[['./dataset/deq.jpg',4],['./dataset/qwe.jpg', 6]]\n :param is_train: input train data or test data\n :param batch_size: batch size\n :param shuffle: whether shuffle the data\n :return: label: 1D tensor, [batch_size, n_classes], one-hot coding, tf.int32\n images: 4D tensor, [batch_size, width, height, 3], tf.float32\n \"\"\"\n img_width = 3500\n img_height = 2400\n img_channel = 3\n names = np.loadtxt(file_list, dtype=bytes,unpack = True, usecols = (0)).astype(str)\n #get file name\n namelist = []\n for name in names:\n name = os.path.join(config.dataPath,name)\n namelist.append(name)\n labels = np.loadtxt(file_list,unpack = True,usecols = (1))\n \n imgName = tf.cast(namelist, tf.string)\n labels = tf.cast(labels,tf.int32)\n input_queue = tf.train.slice_input_producer([imgName, labels])\n label = input_queue[1]\n image_contents = tf.read_file(input_queue[0])\n image = tf.image.decode_jpeg(image_contents,channels = 3)\n \n #need to focus, this is where to consider\n image = tf.image.resize_images(image, (img_height,img_width))\n image = tf.cast(image, tf.float32)\n image = tf.image.per_image_standardization(image)\n if shuffle:\n images, label_batch = tf.train.shuffle_batch([image, label],\n batch_size=batch_size,\n capacity=20000,\n min_after_dequeue=3000,\n num_threads=64)\n else:\n images, label_batch = tf.train.batch([image, label],\n batch_size=batch_size,\n capacity=2000,\n num_threads=64)\n # one-hot coding\n n_classes = config.N_CLASSES\n label_batch = tf.one_hot(label_batch, depth=n_classes)\n label_batch = tf.cast(label_batch, dtype=tf.int32)\n label_batch = tf.reshape(label_batch, [batch_size, n_classes])\n\n return images, label_batch\n\n\ndef read_plantData(label_path = './dataset', is_train=True, batch_size=32, shuffle=True):\n data_dict = {}\n labels =[]\n if is_train:#train data\n label_path = os.path.join(label_path,'AgriculturalDisease_trainingset/AgriculturalDisease_train_annotations.json')\n else:#val data\n label_path = os.path.join(label_path,'AgriculturalDisease_validationset/AgriculturalDisease_validation_annotations.json')\n\n with open(label_path, 'r') as f:\n label_list = json.load(f)\n for image in label_list:\n data_dict[image['image_id']] = int(image['disease_class'])\n\n names = data_dict.keys()\n for _ in data_dict.values():\n labels.extend([_])\n\n namelist = []\n for name in names:\n name = os.path.join(os.path.dirname(label_path),'images',name)\n namelist.append(name)\n \n print(namelist)\n imgName = tf.cast(namelist, tf.string)\n labels = tf.cast(labels,tf.int32)\n input_queue = tf.train.slice_input_producer([imgName, labels])\n label = input_queue[1]\n image_contents = tf.read_file(input_queue[0])\n image = tf.image.decode_jpeg(image_contents,channels = 3)\n \n #need to focus, this is where to consider\n image = tf.image.resize_image_with_crop_or_pad(image, hyper_parameters.FLAGS.img_height, hyper_parameters.FLAGS.img_width)\n image = tf.cast(image, tf.float32)\n image = tf.image.per_image_standardization(image)\n if shuffle:\n images, label_batch = tf.train.shuffle_batch([image, label],\n batch_size=batch_size,\n capacity=20000,\n min_after_dequeue=3000,\n num_threads=64)\n else:\n images, label_batch = tf.train.batch([image, label],\n batch_size=batch_size,\n capacity=2000,\n num_threads=64)\n # one-hot coding\n# n_classes = config.N_CLASSES\n# label_batch = tf.one_hot(label_batch, depth=n_classes)\n# label_batch = tf.cast(label_batch, dtype=tf.int32)\n# label_batch = tf.reshape(label_batch, [batch_size, n_classes])\n#\n return images, label_batch\n\nif __name__=='__main__':\n images_op, label_batch_op = read_plantData(is_train = False,batch_size = 3)\n sess = tf.Session()\n coord = tf.train.Coordinator();\n threads = tf.train.start_queue_runners(sess, coord = coord)\n image,label_batch = sess.run([images_op,label_batch_op])\n print(label_batch)\n\n \n #images_op, label_batch_op = read_cifar10(data_dir='./cifar10_data', is_train=True, batch_size=32, shuffle=True)\n #sess = tf.Session()\n #coord = tf.train.Coordinator();\n #threads = tf.train.start_queue_runners(sess, coord = coord)\n #image,label_batch = sess.run([images_op,label_batch_op])\n #print(image)\n #images, label_batch = read_armyData('./dataset/armydata/filelist.txt',True, batch_size = 32, shuffle = False)\n #sess = tf.Session()\n #coord = tf.train.Coordinator();\n #threads = tf.train.start_queue_runners(sess, coord = coord)\n #label_batch = sess.run(label_batch)\n #print(label_batch)\n" }, { "alpha_fraction": 0.6124837398529053, "alphanum_fraction": 0.6371911764144897, "avg_line_length": 31.04166603088379, "blob_id": "53b08214406f67911c73c52e39d875ae99b41242", "content_id": "f93b8ae41b22e4bf2cd38ccb2f447f64e446b62a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "permissive", "max_line_length": 155, "num_lines": 24, "path": "/json2csv.py", "repo_name": "majian7654/resnet-in-tensorflow", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\n\ndef json2csv(jsonPath):\n with open(jsonPath, 'r') as f:\n label_list = json.load(f)\n print(type(label_list[0]))\n #for label in label_list:\n # print(label)\n\n\n\ndef tmp():\n a = ['one','two','three']\n b = [1,2,3]\n english_column = pd.Series(a, name='english')\n number_column = pd.Series(b, name='number')\n predictions = pd.concat([english_column, number_column], axis=1)\n #another way to handle\n #save = pd.DataFrame({'english':a,'number':b})\n predictions.to_csv('b.txt',index=False,sep=',')\nif __name__=='__main__':\n tmp()\n #json2csv(jsonPath = './dataset/ai_challenger_pdr2018_trainingset_20180905/AgriculturalDisease_trainingset/AgriculturalDisease_train_annotations.json')\n" } ]
5
nidubaba/duhan
https://github.com/nidubaba/duhan
d04c99d3ca70a5e9af27e5f581d3a56c3c056a22
c3ff47d5554d6b4ccb198326098880ff49ecf33a
a00847b2d79fd8571817b613efaf4e7066c0164d
refs/heads/master
2023-04-23T08:30:00.257389
2021-04-23T12:34:00
2021-04-23T12:34:00
359,744,068
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6545454263687134, "alphanum_fraction": 0.7454545497894287, "avg_line_length": 16.33333396911621, "blob_id": "f4ea2092bed3c67a9da72e47f0ca5b0f8d27b386", "content_id": "b83ea12cbdc9a968c09a563c735cb0df58dc9dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 26, "num_lines": 3, "path": "/day3/随机数.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "import random\r\nadc=random.randint(50,150)\r\nprint(adc)\r\n" }, { "alpha_fraction": 0.42748090624809265, "alphanum_fraction": 0.4656488597393036, "avg_line_length": 14.375, "blob_id": "50a6f609d355bb23ea8d6aa5980f811d43a02f1e", "content_id": "8156393e83f5446d5af087e88fa5ed365c742376", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/day3/变量合法.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "\r\n'''\r\n\r\n标识符\t是否合法\t标识符\t是否合法\r\nchar\t合法\t Cy%ty\t 不合法\r\nOax_li\t合法\t $123\t 不合法\r\nfLul\t合法\t 3_3 \t 不合法\r\nBYTE\t合法\t T_T\t 合法\r\n'''" }, { "alpha_fraction": 0.5016393661499023, "alphanum_fraction": 0.5180327892303467, "avg_line_length": 18.33333396911621, "blob_id": "360caca9a98c795c7f5ed79aa2dd632b45ac2c3b", "content_id": "4b03c56c0ba7d1b9d58e291fabd63bb7cd1078e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/day3/密码.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "name=\"root\"\r\nmima=\"admin\"\r\ncount=0\r\nwhile True:\r\n name1=input(\"请输入您的用户名\")\r\n mima=input(\"请输入您的密码\")\r\n count=count+1\r\n if count==3:\r\n print(\"账户被冻结\")\r\n break\r\n if mima==\"admin\"and name1==\"root\":\r\n print(\"登录成功\")\r\n break\r\n else:\r\n print(\"登录失败,用户名或密码错误\")\r\n" }, { "alpha_fraction": 0.4642857015132904, "alphanum_fraction": 0.5153061151504517, "avg_line_length": 18.736841201782227, "blob_id": "e1b374e7831164d5028619d4a8631daed08280e8", "content_id": "823fbfc9cd3e79f2e0a859f9330a459f7fe073b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/day3/猜字.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "import random\r\ndu=random.randint(0,101)\r\ncount=0\r\njinbi=10000\r\nwhile True:\r\n count=count+1\r\n jinbi=jinbi-500\r\n du1=input(\"请输入您要猜的数字:\")\r\n du1=int(du1)\r\n if count==7:\r\n print(\"账户冻结,退出系统\")\r\n break\r\n if du1>du:\r\n print(\"大了\")\r\n elif du1<du:\r\n print(\"小了\")\r\n else:\r\n print(\"回答正确!!!\",\"du\",\"你本次猜了\",count,\"次!!!\",\"剩下\",jinbi,\"金币\")\r\n break" }, { "alpha_fraction": 0.4032633900642395, "alphanum_fraction": 0.41258740425109863, "avg_line_length": 19.61111068725586, "blob_id": "1d987eee8128879a0e0c1ef681fff3332a4a8ede", "content_id": "d03ea2d544b16f55607c1f0ea8d254c5bb7d5d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 58, "num_lines": 18, "path": "/day3/三角形.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "b=input(\"请输入边\")\r\na=input(\"请输入边\")\r\nc=input(\"请输入边\")\r\na=int(a)\r\nb=int(b)\r\nc=int(c)\r\nif a+c>b and b+c>a and a+b>c:\r\n if a==b==c:\r\n print(\"等边三角形\")\r\n elif a==b or a==c or c==b:\r\n print(\"等腰三角形\")\r\n elif a*a+b*b==c*c or +c**2 == a**2 or c*c+a**2==b**2:\r\n print(\"直角三角形\")\r\n elif a+b>c or a+c>b or b+c>a:\r\n print(\"构成三角形+++++\")\r\n\r\nelse:\r\n print(\"不构成三角形\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.4663212299346924, "alphanum_fraction": 0.49740931391716003, "avg_line_length": 13.75, "blob_id": "f9b8a02c6c48684576edf5fc8eb316915c6dc4df", "content_id": "065aa453eebb83fc13be22d6f87812652a760ac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 23, "num_lines": 12, "path": "/day3/task3.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "\r\ncount=0\r\nmaxx=0\r\nsum=0\r\nwhile count<10:\r\n count =count+1\r\n a = input(\"请输入一个数\")\r\n a=int(a)\r\n sum=sum+a\r\n b=sum/count,\r\n if a>maxx:\r\n maxx=a\r\n print(sum,b,maxx)\r\n\r\n" }, { "alpha_fraction": 0.47580644488334656, "alphanum_fraction": 0.47580644488334656, "avg_line_length": 20, "blob_id": "1be9d9e1d1bb5c73ed8c93b7ddcc018f1502d729", "content_id": "7aa1e226ef556c2d4ed9bf4294dec4173b8f5473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/个人信息.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "name = input(\"请输入您的姓名:\")\r\nage=input(\"请输入您的年龄\")\r\nsex=input(\"请输入您的性别\")\r\nsg=input(\"请输入您的身高\")\r\nsfzhm=input(\"请输入您的身份证号码\")\r\naddress=input(\"请输入您的居住地址\")\r\ninfo=''' \r\n-----------------个人信息--------------\r\n 您的姓名:%s,\r\n 您的年龄:%s,\r\n 您的性别:%s,\r\n 您的身高: %s,\r\n 您的身份证号码:%s,\r\n 您的居住地址: %s\r\n -------------------------------------\r\n'''\r\nprint(info % (name,age,sex,sg,sfzhm,address) )" }, { "alpha_fraction": 0.26966291666030884, "alphanum_fraction": 0.2977527976036072, "avg_line_length": 11.076923370361328, "blob_id": "513f49881e6909fa01e07420a92ad922c6677776", "content_id": "486d0d7df11800e90bf5a39c4b86740a6cbf2a1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 18, "num_lines": 13, "path": "/day3/两数调换.py", "repo_name": "nidubaba/duhan", "src_encoding": "UTF-8", "text": "A=56\r\nB=78\r\nc=0\r\nwhile True:\r\n a=input(\"+\")\r\n if a==\"+\":\r\n c=A\r\n A=B\r\n B=c\r\n print(A,B)\r\n break\r\n else:\r\n print(\"`\")\r\n\r\n\r\n\r\n\r\n" } ]
8
gyom/draw
https://github.com/gyom/draw
e82d258377b439f952349998eb5678ff6500ed5b
64fc786c0be99347c1eabd36cf072f24c9cb6d89
0731d2076581eb5531135efb25944a209ce933d0
refs/heads/master
2018-05-29T23:46:00.876488
2015-09-11T17:34:25
2015-09-11T17:34:25
41,319,256
0
0
null
2015-08-24T18:14:14
2015-08-22T00:16:30
2015-07-18T13:22:37
null
[ { "alpha_fraction": 0.5871609449386597, "alphanum_fraction": 0.6127994656562805, "avg_line_length": 43.404396057128906, "blob_id": "e26250a88db5fcebacbcbcde023fb30716ee0149", "content_id": "3531312f55ed1ff59670eb18e850e443a14440e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20204, "license_type": "permissive", "max_line_length": 425, "num_lines": 455, "path": "/legion-train-draw.py", "repo_name": "gyom/draw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom __future__ import division, print_function\n\nimport logging\nimport numpy as np\n\nFORMAT = '[%(asctime)s] %(name)-15s %(message)s'\nDATEFMT = \"%H:%M:%S\"\nlogging.basicConfig(format=FORMAT, datefmt=DATEFMT, level=logging.INFO)\n\n\n####################################################\n# Make sure that you get different values for the\n# random seeds when running Blocks.\n# This goes against the way that Blocks is designed\n# at the current time, but it's necessary when\n# having multiple workers.\nimport os\nif os.environ.has_key('MOAB_JOBARRAYINDEX'):\n np.random.seed(int(os.environ['MOAB_JOBARRAYINDEX']))\n print(\"Using MOAB_JOBARRAYINDEX to np.random.seed(%d).\" % int(os.environ['MOAB_JOBARRAYINDEX']))\nelse:\n print(\"Not on helios, or no MOAB_JOBARRAYINDEX present to seed.\")\n\ns0 = np.random.randint(low=0, high=100000)\ns1 = np.random.randint(low=0, high=100000)\nimport blocks\nimport blocks.config\nimport fuel\nblocks.config.config.default_seed = s0\nfuel.config.default_seed = s1\n####################################################\n\n\nimport os\nimport theano\nimport theano.tensor as T\nimport fuel\nimport ipdb\nimport time\nimport cPickle as pickle\n\n#import blocks.extras\n\nfrom argparse import ArgumentParser\nfrom theano import tensor\n\nfrom fuel.streams import DataStream\nfrom fuel.schemes import SequentialScheme, ShuffledScheme\nfrom fuel.transformers import Flatten\n\nfrom blocks.algorithms import GradientDescent, CompositeRule, StepClipping, RMSProp, Adam\nfrom blocks.bricks import Tanh, Identity\nfrom blocks.bricks.cost import BinaryCrossEntropy\nfrom blocks.bricks.recurrent import SimpleRecurrent, LSTM\nfrom blocks.initialization import Constant, IsotropicGaussian, Orthogonal \nfrom blocks.filter import VariableFilter\nfrom blocks.graph import ComputationGraph\nfrom blocks.roles import PARAMETER\nfrom blocks.monitoring import aggregation\nfrom blocks.extensions import FinishAfter, Timing, Printing, ProgressBar\nfrom blocks.extensions.saveload import Checkpoint\nfrom blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring\n#from blocks.extras.extensions.plot import Plot\nfrom blocks.main_loop import MainLoop\nfrom blocks.model import Model\n\n\nfrom legion.blocks_extensions import SharedParamsAutoSync, SharedParamsRateLimited\nfrom legion.blocks_extensions import Timestamp, StopAfterTimeElapsed\n\n\nimport draw.datasets as datasets\nfrom draw.draw import *\nfrom draw.samplecheckpoint import SampleCheckpoint\nfrom draw.partsonlycheckpoint import PartsOnlyCheckpoint\n\n\n#----------------------------------------------------------------------------\n\ndef main(name, dataset, epochs, batch_size, learning_rate, \n attention, n_iter, enc_dim, dec_dim, z_dim, oldmodel,\n max_total_duration, results_root_dir, nosync,\n adam_args_json,\n rmsprop_args_json):\n\n image_size, channels, data_train, data_valid, data_test = datasets.get_data(dataset)\n\n # Sequential scheme as originally implemented.\n #train_stream = Flatten(DataStream.default_stream(data_train, iteration_scheme=SequentialScheme(data_train.num_examples, batch_size)))\n #valid_stream = Flatten(DataStream.default_stream(data_valid, iteration_scheme=SequentialScheme(data_valid.num_examples, batch_size)))\n #test_stream = Flatten(DataStream.default_stream(data_test, iteration_scheme=SequentialScheme(data_test.num_examples, batch_size)))\n\n # Shuffled version makes more sense for distributed training.\n train_stream = Flatten(DataStream.default_stream(data_train, iteration_scheme=ShuffledScheme(data_train.num_examples, batch_size)))\n valid_stream = Flatten(DataStream.default_stream(data_valid, iteration_scheme=ShuffledScheme(data_valid.num_examples, batch_size)))\n test_stream = Flatten(DataStream.default_stream(data_test, iteration_scheme=ShuffledScheme(data_test.num_examples, batch_size)))\n\n if name is None:\n name = dataset\n\n img_height, img_width = image_size\n x_dim = channels * img_height * img_width\n\n rnninits = {\n #'weights_init': Orthogonal(),\n 'weights_init': IsotropicGaussian(0.01),\n 'biases_init': Constant(0.),\n }\n inits = {\n #'weights_init': Orthogonal(),\n 'weights_init': IsotropicGaussian(0.01),\n 'biases_init': Constant(0.),\n }\n\n # Configure attention mechanism\n if attention != \"\":\n read_N, write_N = attention.split(',')\n \n read_N = int(read_N)\n write_N = int(write_N)\n read_dim = 2 * channels * read_N ** 2\n\n reader = AttentionReader(x_dim=x_dim, dec_dim=dec_dim,\n channels=channels, width=img_width, height=img_height,\n N=read_N, **inits)\n writer = AttentionWriter(input_dim=dec_dim, output_dim=x_dim,\n channels=channels, width=img_width, height=img_height,\n N=write_N, **inits)\n attention_tag = \"r%d-w%d\" % (read_N, write_N)\n else:\n read_dim = 2*x_dim\n\n reader = Reader(x_dim=x_dim, dec_dim=dec_dim, **inits)\n writer = Writer(input_dim=dec_dim, output_dim=x_dim, **inits)\n\n attention_tag = \"full\"\n\n #----------------------------------------------------------------------\n\n if name is None:\n name = dataset\n\n if os.environ.has_key('MOAB_JOBARRAYINDEX'):\n name = name + (\"-%0.3d\" % int(os.environ['MOAB_JOBARRAYINDEX']))\n\n # Learning rate\n def lr_tag(value):\n \"\"\" Convert a float into a short tag-usable string representation. E.g.:\n 0.1 -> 11\n 0.01 -> 12\n 0.001 -> 13\n 0.005 -> 53\n \"\"\"\n exp = np.floor(np.log10(value))\n leading = (\"%e\"%value)[0]\n return \"%s%d\" % (leading, -exp)\n\n lr_str = lr_tag(learning_rate)\n\n subdir = name + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\");\n\n if results_root_dir is not None:\n subdir = os.path.join(subdir, results_root_dir)\n\n longname = \"%s-%s-t%d-enc%d-dec%d-z%d-lr%s\" % (dataset, attention_tag, n_iter, enc_dim, dec_dim, z_dim, lr_str)\n pickle_file = subdir + \"/\" + longname + \".pkl\"\n\n print(\"\\nRunning experiment %s\" % longname)\n print(\" dataset: %s\" % dataset)\n print(\" subdirectory: %s\" % subdir)\n print(\" learning rate: %g\" % learning_rate)\n print(\" attention: %s\" % attention)\n print(\" n_iterations: %d\" % n_iter)\n print(\" encoder dimension: %d\" % enc_dim)\n print(\" z dimension: %d\" % z_dim)\n print(\" decoder dimension: %d\" % dec_dim)\n print(\" batch size: %d\" % batch_size)\n print(\" epochs: %d\" % epochs)\n print(\" max_total_duration: %d\" % max_total_duration)\n print(\" nosync: %s\" % str(nosync))\n print(\" adam_args_json: %s\" % str(adam_args_json))\n print(\" rmsprop_args_json: %s\" % str(rmsprop_args_json))\n print()\n\n #----------------------------------------------------------------------\n\n encoder_rnn = LSTM(dim=enc_dim, name=\"RNN_enc\", **rnninits)\n decoder_rnn = LSTM(dim=dec_dim, name=\"RNN_dec\", **rnninits)\n encoder_mlp = MLP([Identity()], [(read_dim+dec_dim), 4*enc_dim], name=\"MLP_enc\", **inits)\n decoder_mlp = MLP([Identity()], [ z_dim, 4*dec_dim], name=\"MLP_dec\", **inits)\n q_sampler = Qsampler(input_dim=enc_dim, output_dim=z_dim, **inits)\n\n draw = DrawModel(\n n_iter, \n reader=reader,\n encoder_mlp=encoder_mlp,\n encoder_rnn=encoder_rnn,\n sampler=q_sampler,\n decoder_mlp=decoder_mlp,\n decoder_rnn=decoder_rnn,\n writer=writer)\n draw.initialize()\n\n #------------------------------------------------------------------------\n x = tensor.matrix('features')\n \n x_recons, kl_terms = draw.reconstruct(x)\n\n recons_term = BinaryCrossEntropy().apply(x, x_recons)\n recons_term.name = \"recons_term\"\n\n cost = recons_term + kl_terms.sum(axis=0).mean()\n cost.name = \"nll_bound\"\n\n #------------------------------------------------------------\n cg = ComputationGraph([cost])\n params = VariableFilter(roles=[PARAMETER])(cg.variables)\n\n if adam_args_json is not None:\n\n print(\"Setup for Adam with arguments passed by command-line.\")\n import json\n adam_args = json.loads(adam_args_json)\n if learning_rate is not None:\n adam_args['learning_rate'] = learning_rate\n\n algorithm = GradientDescent(\n cost=cost, \n parameters=params,\n step_rule=CompositeRule([\n StepClipping(10.), \n Adam(**adam_args),\n ])\n )\n\n elif rmsprop_args_json is not None:\n\n print(\"Setup for RMSProp with arguments passed by command-line.\")\n import json\n rmsprop_args = json.loads(rmsprop_args_json)\n if learning_rate is not None:\n rmsprop_args['learning_rate'] = learning_rate\n\n algorithm = GradientDescent(\n cost=cost, \n parameters=params,\n step_rule=CompositeRule([\n StepClipping(10.), \n RMSProp(**rmsprop_args),\n ])\n )\n\n else:\n\n print(\"Setup for Adam by default.\")\n # default original behavior\n algorithm = GradientDescent(\n cost=cost, \n parameters=params,\n step_rule=CompositeRule([\n StepClipping(10.), \n Adam(learning_rate),\n ])\n #step_rule=RMSProp(learning_rate),\n #step_rule=Momentum(learning_rate=learning_rate, momentum=0.95)\n )\n\n\n\n #------------------------------------------------------------------------\n # Setup monitors\n monitors = [cost]\n for t in range(n_iter):\n kl_term_t = kl_terms[t,:].mean()\n kl_term_t.name = \"kl_term_%d\" % t\n\n #x_recons_t = T.nnet.sigmoid(c[t,:,:])\n #recons_term_t = BinaryCrossEntropy().apply(x, x_recons_t)\n #recons_term_t = recons_term_t.mean()\n #recons_term_t.name = \"recons_term_%d\" % t\n\n monitors +=[kl_term_t]\n\n train_monitors = monitors[:]\n train_monitors += [aggregation.mean(algorithm.total_gradient_norm)]\n train_monitors += [aggregation.mean(algorithm.total_step_norm)]\n # Live plotting...\n plot_channels = [\n [\"train_nll_bound\", \"test_nll_bound\"],\n [\"train_kl_term_%d\" % t for t in range(n_iter)],\n #[\"train_recons_term_%d\" % t for t in range(n_iter)],\n [\"train_total_gradient_norm\", \"train_total_step_norm\"]\n ]\n\n #------------------------------------------------------------------------\n # Setup for legion\n\n cg = ComputationGraph(cost)\n params_to_sync = {}\n #cg.variables\n counter = 0\n print(\"---- cg.parameters ----\")\n for p in cg.parameters:\n # `p` is of type theano.sandbox.cuda.var.CudaNdarraySharedVariable\n\n # Warning. This is not as deterministic as we would want.\n # For now, however, we don't have much of a choice.\n new_name = p.name\n while params_to_sync.has_key(new_name):\n counter += 1\n new_name = p.name + (\"_%d\" % counter)\n\n params_to_sync[new_name] = p\n print(\"Parameter %s now referred to as %s.\" % (p.name, new_name))\n #import pdb; pdb.set_trace()\n print(\"---- --.---------- ----\")\n\n #------------------------------------------------------------\n\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n\n extensions = [\n Timing(),\n FinishAfter(after_n_epochs=epochs),\n TrainingDataMonitoring(\n train_monitors, \n prefix=\"train\",\n after_epoch=True),\n# DataStreamMonitoring(\n# monitors,\n# valid_stream,\n## updates=scan_updates,\n# prefix=\"valid\"),\n DataStreamMonitoring(\n monitors,\n test_stream,\n# updates=scan_updates, \n prefix=\"test\"),\n PartsOnlyCheckpoint(\"{}/{}\".format(subdir, name), before_training=True, after_epoch=True, save_separately=['log', 'model'])\n #SampleCheckpoint(image_size=image_size[0], channels=channels, save_subdir=subdir, before_training=True, after_epoch=True),\n ]\n\n if not nosync:\n # With parameter sync on legion,\n #extensions.append(SharedParamsRateLimited(params=params_to_sync, before_training=True, alpha=1.0, beta=0.0, maximum_rate=1.0))\n #extensions.append(SharedParamsRateLimited(params=params_to_sync, before_training=True, every_n_batches=4, alpha=0.5, beta=0.5, maximum_rate=0.2, want_sync_timing_log=True))\n extensions.append(SharedParamsRateLimited(params=params_to_sync, before_training=True, every_n_batches=1, alpha=0.99, beta=0.01, maximum_rate=0.2, want_sync_timing_log=True))\n\n\n extensions = extensions + [StopAfterTimeElapsed(every_n_batches=4, total_duration=max_total_duration),\n # Timing information to facilitate plotting.\n Timing(every_n_epochs=1),\n Timestamp(every_n_batches=4),\n # Plot(name, channels=plot_channels),\n ProgressBar(),\n Printing()]\n\n main_loop = MainLoop(\n model=Model(cost),\n data_stream=train_stream,\n algorithm=algorithm,\n extensions=extensions)\n\n\n if oldmodel is not None:\n print(\"Initializing parameters with old model %s\"%oldmodel)\n with open(oldmodel, \"rb\") as f:\n oldmodel = pickle.load(f)\n main_loop.model.set_parameter_values(oldmodel.get_parameter_values())\n del oldmodel\n\n main_loop.run()\n\n#-----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--name\", type=str, dest=\"name\",\n default=None, help=\"Name for this experiment\")\n parser.add_argument(\"--dataset\", type=str, dest=\"dataset\",\n default=\"bmnist\", help=\"Dataset to use: [bmnist|mnist|cifar10]\")\n parser.add_argument(\"--epochs\", type=int, dest=\"epochs\",\n default=100, help=\"Number of training epochs to do\")\n parser.add_argument(\"--bs\", \"--batch-size\", type=int, dest=\"batch_size\",\n default=100, help=\"Size of each mini-batch\")\n parser.add_argument(\"--lr\", \"--learning-rate\", type=float, dest=\"learning_rate\",\n default=1e-3, help=\"Learning rate\")\n parser.add_argument(\"--attention\", \"-a\", type=str, default=\"\",\n help=\"Use attention mechanism (read_window,write_window)\")\n parser.add_argument(\"--niter\", type=int, dest=\"n_iter\",\n default=10, help=\"No. of iterations\")\n parser.add_argument(\"--enc-dim\", type=int, dest=\"enc_dim\",\n default=256, help=\"Encoder RNN state dimension\")\n parser.add_argument(\"--dec-dim\", type=int, dest=\"dec_dim\",\n default=256, help=\"Decoder RNN state dimension\")\n parser.add_argument(\"--z-dim\", type=int, dest=\"z_dim\",\n default=100, help=\"Z-vector dimension\")\n parser.add_argument(\"--oldmodel\", type=str,\n help=\"Use a model pkl file created by a previous run as a starting point for all parameters\")\n parser.add_argument(\"--max_total_duration\", type=int, dest=\"max_total_duration\",\n default=999999, help=\"Ends the training when max_total_duration seconds have elapsed. Used for distributed training.\")\n parser.add_argument(\"--results_root_dir\", type=str, dest=\"results_root_dir\",\n default=None, help=\"Specifies a directory in which the results are to be put. Useful on a cluster like helios.\")\n parser.add_argument(\"--nosync\", action='store_true',\n help=\"Tells the client to avoid syncing with the parameter server. By default, we sync.\")\n parser.add_argument(\"--adam_args_json\", type=str, dest=\"adam_args_json\",\n default=None, help=\"Sets the training method to be Adam and specifies the parameters for Blocks in json format.\")\n parser.add_argument(\"--rmsprop_args_json\", type=str, dest=\"rmsprop_args_json\",\n default=None, help=\"Sets the training method to be RMSProp and specifies the parameters for Blocks in json format.\")\n args = parser.parse_args()\n main(**vars(args))\n\n\n\n\n\n\"\"\"\n\npython legion-train-draw.py --name patate --dataset=cifar10 --epochs=10 --bs=512 --lr=1e-3 --niter=10 --max_total_duration=60\n\n\nlegion legion-train-draw.py --allocation=\"jvb-000-ag\" --instances=4 --walltime=6:00:00 --user_script_args=\"--name patate --dataset=cifar10 --epochs=10 --bs=512 --lr=1e-3 --niter=10 --max_total_duration=60\";\n\n\nlegion legion-train-draw.py --allocation=\"jvb-000-ag\" --instances=1 --debug --debug_devices=gpu0 --walltime=12:00:00 --user_script_args=\"--name patate --dataset=cifar10 --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=60\";\n\n\nlegion ../draw/legion-train-draw.py --job_name=4workers_12h --allocation=\"jvb-000-ag\" --instances=4 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results --name=4workers_12h --dataset=cifar10 --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=36000\";\nlegion ../draw/legion-train-draw.py --job_name=2workers_12h --allocation=\"jvb-000-ag\" --instances=2 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results --name=2workers_12h --dataset=cifar10 --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=36000\";\nlegion ../draw/legion-train-draw.py --job_name=1workers_12h --allocation=\"jvb-000-ag\" --instances=1 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results --name=1workers_12h --dataset=cifar10 --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=36000\";\n\n\nlegion ../draw/legion-train-draw.py --job_name=mnist_4workers_12h --allocation=\"jvb-000-ag\" --instances=4 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results_mnist --name=mnist_4workers_12h --dataset=mnist --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=36000\";\n\nlegion ../draw/legion-train-draw.py --job_name=mnist_2workers_12h --allocation=\"jvb-000-ag\" --instances=2 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results_mnist --name=mnist_2workers_12h --dataset=mnist --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=36000\";\n\nlegion ../draw/legion-train-draw.py --job_name=mnist_1workers_12h --allocation=\"jvb-000-ag\" --instances=1 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results_mnist --name=mnist_1workers_12h --dataset=mnist --epochs=100000 --bs=512 --lr=1e-3 --niter=64 --max_total_duration=36000\";\n\n\n\n\n\nlegion ../draw/legion-train-draw.py --job_name=svhn2_step01_1workers_12h --allocation=\"jvb-000-ag\" --instances=1 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/alaingui/draw_experiment/results_svhn2 --name=svhn2_step01_1workers_12h --max_total_duration=36000 --dataset=svhn2 --attention=5,5 --niter=32 --lr=3e-4 --enc-dim=512 --dec-dim=512\"\n\nlegion ../draw/legion-train-draw.py --job_name=svhn2_1workers_12h --allocation=\"jvb-000-ag\" --instances=1 --debug --debug_devices=gpu0 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/dpln/draw_experiment/results_svhn2 --name=svhn2_1workers_12h --max_total_duration=36000 --dataset=svhn2 --attention=5,5 --niter=32 --lr=3e-4 --enc-dim=512 --dec-dim=512\"\n\n\n\nlegion ../draw/legion-train-draw.py --job_name=svhn2_1workers_12h --allocation=\"jvb-000-ag\" --instances=1 --debug --debug_devices=gpu0 --walltime=12:00:00 --user_script_args=\"--results_root_dir=/home/dpln/Documents/draw_experiment/results_svhn2 --name=svhn2_1workers_12h --max_total_duration=36000 --dataset=svhn2 --attention=5,5 --niter=32 --lr=3e-4 --enc-dim=512 --dec-dim=512 --rmsprop_args_json='{\\\"decay_rate\\\":0.999}'\"\n\n\n\"\"\"\n" }, { "alpha_fraction": 0.4835079610347748, "alphanum_fraction": 0.5537446737289429, "avg_line_length": 35.02797317504883, "blob_id": "1b87d5cfed6f8f217de2ea61283d21fa743576cb", "content_id": "a69452add7d37af402f4161a4259005e636715f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5154, "license_type": "permissive", "max_line_length": 105, "num_lines": 143, "path": "/plot.py", "repo_name": "gyom/draw", "src_encoding": "UTF-8", "text": "\n\nimport os\nimport pickle\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport pylab\nimport matplotlib.pyplot as plt\n\n\n# TODO : Maybe add points to indicate sync points\n# (to convince ourselves that there are plenty of those).\n\n\ndef run_mnist():\n\n results_dir = \"/home/dpln/draw_experiment/results_mnist\"\n\n L_res1 = {'logs' : [\"mnist_step02_1workers_12h-001_log\"], 'color' : \"#ff33ff\", 'nbr_workers' : 1}\n L_res2 = {'logs' : [\"mnist_step02_2workers_12h-001_log\",\n \"mnist_step02_2workers_12h-002_log\"], 'color' : \"#7f00ff\", 'nbr_workers' : 2}\n L_res4 = {'logs' : [\"mnist_step02_4workers_12h-001_log\",\n \"mnist_step02_4workers_12h-002_log\",\n \"mnist_step02_4workers_12h-003_log\",\n \"mnist_step02_4workers_12h-004_log\"], 'color' : \"#000099\", 'nbr_workers' : 4}\n\n L_res = [L_res1, L_res2, L_res4]\n for channel_name in [\"train_nll_bound\", \"test_nll_bound\"]:\n run(results_dir, channel_name, L_res)\n\ndef run_cifar10():\n\n results_dir = \"/home/dpln/draw_experiment/results_cifar10\"\n\n L_res1 = {'logs' : [\"cifar10_step02_1workers_12h-001_log\"], 'color' : \"#ff33ff\", 'nbr_workers' : 1}\n L_res2 = {'logs' : [\"cifar10_step02_2workers_12h-001_log\",\n \"cifar10_step02_2workers_12h-002_log\"], 'color' : \"#7f00ff\", 'nbr_workers' : 2}\n L_res4 = {'logs' : [\"cifar10_step02_4workers_12h-001_log\",\n \"cifar10_step02_4workers_12h-002_log\",\n \"cifar10_step02_4workers_12h-003_log\",\n \"cifar10_step02_4workers_12h-004_log\"], 'color' : \"#000099\", 'nbr_workers' : 4}\n\n L_res = [L_res1, L_res2, L_res4]\n for channel_name in [\"train_nll_bound\", \"test_nll_bound\"]:\n run(results_dir, channel_name, L_res)\n\ndef run_svhn2():\n\n results_dir = \"/home/dpln/draw_experiment/results_svhn2\"\n\n L_resA = {'logs' : [\"svhn2_step01_Aworkers_12h-001_log\"], 'color' : \"#dd77dd\", 'nbr_workers' : -1}\n L_res1 = {'logs' : [\"svhn2_step01_1workers_12h-001_log\"], 'color' : \"#ff33ff\", 'nbr_workers' : 1}\n L_res2 = {'logs' : [\"svhn2_step01_2workers_12h-001_log\",\n \"svhn2_step01_2workers_12h-002_log\"], 'color' : \"#7f00ff\", 'nbr_workers' : 2}\n L_res4 = {'logs' : [\"svhn2_step01_4workers_12h-001_log\",\n \"svhn2_step01_4workers_12h-002_log\",\n \"svhn2_step01_4workers_12h-003_log\",\n \"svhn2_step01_4workers_12h-004_log\"], 'color' : \"#000099\", 'nbr_workers' : 4}\n L_res8 = {'logs' : [\"svhn2_step01_8workers_12h-001_log\",\n \"svhn2_step01_8workers_12h-002_log\",\n \"svhn2_step01_8workers_12h-003_log\",\n \"svhn2_step01_8workers_12h-004_log\",\n \"svhn2_step01_8workers_12h-005_log\",\n \"svhn2_step01_8workers_12h-006_log\",\n \"svhn2_step01_8workers_12h-007_log\",\n \"svhn2_step01_8workers_12h-008_log\"], 'color' : \"#001199\", 'nbr_workers' : 8}\n\n L_res = [L_resA, L_res1, L_res2, L_res4, L_res8]\n for channel_name in [\"train_nll_bound\", \"test_nll_bound\"]:\n run(results_dir, channel_name, L_res)\n\n\ndef run(results_dir, channel_name, L_res):\n\n for res in L_res:\n for logpath in res['logs']:\n assert os.path.exists(os.path.join(results_dir, logpath)), os.path.join(results_dir, logpath)\n\n\n pylab.hold(True)\n\n\n for res in L_res:\n color = res['color']\n nbr_workers = res['nbr_workers']\n\n L_domain = []\n L_X = []\n earliest_timestamp = 0.0\n for logpath in res['logs']: \n (domain, X) = get_stuff_from_pickle(os.path.join(results_dir, logpath), channel_name)\n L_domain.append(domain)\n L_X.append(X)\n earliest_timestamp = np.min([earliest_timestamp, domain.min()])\n\n\n for domain, X in zip(L_domain, L_X):\n print \"domain has shape %s\" % (str(domain.shape),)\n print \"X has shape %s\" % (str(X.shape),)\n h = pylab.plot(domain - earliest_timestamp, X, c=color, label=nbr_workers)\n\n\n plt.legend()\n #if criterion == 'error_rate':\n # pylab.ylim(ymin=0.0, ymax=1.0)\n #elif criterion == 'cost':\n # pylab.ylim(ymin=0.0)\n\n outputfile = os.path.join(results_dir, \"%s.png\" % channel_name)\n\n pylab.draw()\n pylab.savefig(outputfile, dpi=150)\n pylab.close()\n print \"Wrote %s.\" % outputfile\n\n\ndef get_stuff_from_pickle(abspath, channel_name):\n\n E = pickle.load(open(abspath, \"r\"))\n\n L_t = []\n L_x = []\n for (s, data) in sorted(E.items(), key=lambda e: e[0]):\n if data.has_key(channel_name):\n\n if data.has_key('timestamp'):\n L_t.append(data['timestamp'])\n L_x.append(data[channel_name])\n else:\n print \"Found entry with a %s but no timestamp at s=%d.\" % (channel_name, s)\n print data.keys()\n \n\n return (np.array(L_t), np.array(L_x))\n\n\n\n\nif __name__ == \"__main__\":\n #run_mnist()\n #run_cifar10()\n run_svhn2()\n" } ]
2
alexismorin/maya-game-engine-autosetup
https://github.com/alexismorin/maya-game-engine-autosetup
5a53d45703edd3147348e540a1e8974505b57010
4eec0419206811e02607862cb15fb113aab2c340
19984b6d0461afb022209610bf42a4fcb938def9
refs/heads/master
2020-05-18T03:50:00.890856
2019-04-29T23:34:05
2019-04-29T23:34:05
184,156,876
15
1
null
null
null
null
null
[ { "alpha_fraction": 0.5646551847457886, "alphanum_fraction": 0.5775862336158752, "avg_line_length": 22.299999237060547, "blob_id": "6eb2efd84519fa603ca1aba3c6ec05caf435edba", "content_id": "ef8ca5364465d8d9f72da9aefc8467c99dc127fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "permissive", "max_line_length": 65, "num_lines": 10, "path": "/setupUnity.py", "repo_name": "alexismorin/maya-game-engine-autosetup", "src_encoding": "UTF-8", "text": "import maya.cmds as cmds\n\ndef main():\n cmds.currentUnit( l='m' )\n cmds.currentUnit( t='ntsc' )\n cmds.upAxis( ax='y' )\n cmds.grid( reset=True )\n cmds.grid( s=5,d= 4,sp=1 , dab = True,ddl = True, dpl = True)\n \nmain()" }, { "alpha_fraction": 0.7792079448699951, "alphanum_fraction": 0.7792079448699951, "avg_line_length": 52.21052551269531, "blob_id": "61033553238e7fa5318318e2c501f4ffee18ff6f", "content_id": "80c129a607b2aef1fe17292b788408d13ecb3861", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1010, "license_type": "permissive", "max_line_length": 272, "num_lines": 19, "path": "/README.md", "repo_name": "alexismorin/maya-game-engine-autosetup", "src_encoding": "UTF-8", "text": "# maya-game-engine-autosetup\n\n![header](images/gif.gif)\n\nI've always found it a little unwieldy how Maya reverts to its own preset units when creating a new file. Especially as a multi-engine developer, I've found it a pain over time to constantly adjust units depending on whether I was using Unity or Unreal for a given projet.\n\nJust drag these two python files into your *scripts* folder (*\\Users\\You\\Documents\\maya\\scripts*) and when creating a new Maya scene after opening it just run either of these in the python command line:\n\n- *import setupUnreal*\n- *import setupUnity*\n\nAnd the up-axis, working units, animation framerate and grid size will automatically be adjusted for best import results in that given engine! If you create a new scene after that without closing Maya, just run\n\n- *reload(setupUnreal)*\n- *reload(setupUnity)*\n\nIf at any time the grid goes weird just create a new scene and run the *reload()* method again.\n\nNo more tiny models in Unity or weirdly-rotated static meshes in Unreal!" } ]
2
APEX-Lab-Semantic/crossword_puzzle_crawler
https://github.com/APEX-Lab-Semantic/crossword_puzzle_crawler
66e77561cf7e6393eea2291727d7a0cead7a659b
7387ec5b1facc5e27852dd9419f3f9c18ccb438d
ffde027429e237714481be8b2063be52a890bebd
refs/heads/master
2021-01-01T18:54:08.239963
2014-07-23T05:45:43
2014-07-23T05:45:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47863247990608215, "alphanum_fraction": 0.5811966061592102, "avg_line_length": 17.5, "blob_id": "cd9f93d717132e6ea9ba6b7f78b1aaec9fd42167", "content_id": "e8f626264df4a43a7c4e63a9c2652ba95ba977b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 147, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/README.md", "repo_name": "APEX-Lab-Semantic/crossword_puzzle_crawler", "src_encoding": "UTF-8", "text": "crossword_puzzle_crawler\r\n========================\r\n\r\nAPEX 2014 暑期实习 crossword puzzle 数据爬虫\r\n\r\n2014.07.22 目前完成多线程\r\n" }, { "alpha_fraction": 0.5051851868629456, "alphanum_fraction": 0.5081481337547302, "avg_line_length": 31.739999771118164, "blob_id": "ee7487efde16639100a1ecd37ed1970e528db865", "content_id": "73115913e895abfa6d914439782c76a0d3046db7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6750, "license_type": "no_license", "max_line_length": 103, "num_lines": 200, "path": "/cw_puzzle_crawler.py", "repo_name": "APEX-Lab-Semantic/crossword_puzzle_crawler", "src_encoding": "UTF-8", "text": "\r\n# -*- coding: utf-8 -*-\r\n#Data fetched from http://crosswordgiant.com/\r\n#Data including:\r\n # New York Times\r\n # Wall Street Journal\r\n # Universal\r\n # Jonesin'\r\n # USA Today\r\n # Thomas Joseph - King Feature Syndicate\r\n # Eugene Sheffer - King Feature Syndicate\r\n # Premier Sunday - King Feature Syndicate\r\n # Newsday.com\r\n # Ink Well xwords\r\n # L.A. Times Daily\r\n # L.A. Times Magazine\r\n # L.A. Times Sunday\r\n # Canadiana\r\n # The A.V Club\r\n # Thinks.com\r\n # Boston Globe\r\n # Jonesin Crosswords\r\n # The Washington Post\r\n # The Chronicle of Higher Education\r\n # Irish Times (Crosaire)\r\n # Irish Times (Simplex)\r\n # The Guardian - Cryptic\r\n # The Guardian - Quick\r\n\r\nfrom threading import Thread as thd\r\nfrom pyquery import PyQuery as p\r\nfrom datetime import datetime as dt\r\nfrom ordereddict import OrderedDict as odict\r\nimport re\r\nimport os\r\n\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n\r\nMay0305\r\n\r\n\r\nclass UrlFetchThread(thd):\r\n \"\"\"This class is used to fetch the urls which are used to get data\"\"\"\r\n def __init__(self, thd_name, url, path):\r\n super(UrlFetchThread, self).__init__(name = thd_name)\r\n self.URL = url\r\n self.rsc_store_path = path\r\n\r\n def run(self):\r\n base_rsc_url = self.URL\r\n page_number = 1\r\n rsc_url_para = '?page='\r\n input_date_format = '%b %d, %Y'\r\n output_date_format = '%Y.%m.%d'\r\n re_input_date = re.compile('(?<=\\s-\\s)\\w+\\s\\d+,\\s\\d+')\r\n while True:\r\n rsc_url = base_rsc_url + rsc_url_para + str(page_number)\r\n try:\r\n date_page = p(url = rsc_url)\r\n except Exception, e:\r\n page_number += 1\r\n continue\r\n label_a_list = date_page('.information_text a')\r\n if len(label_a_list) == 0:\r\n break\r\n else:\r\n for a in label_a_list:\r\n date_str = re_input_date.search(p(a).text()).group()\r\n date = dt.strptime(date_str, input_date_format)\r\n url = p(a).attr('href')\r\n _date_store_path = self.rsc_store_path + date.strftime(output_date_format) + '.txt'\r\n DataFetchThread(date_str, url, _date_store_path).start()\r\n page_number += 1\r\n\r\n\r\nclass DataFetchThread(thd):\r\n \"\"\"This class is used to fetch data of days\"\"\"\r\n def __init__(self, thd_name, url, path):\r\n super(DataFetchThread, self).__init__(name = thd_name)\r\n self.URL = url\r\n self.Path = path\r\n\r\n def run(self):\r\n date_store_path = self.Path\r\n print self.Path\r\n #begin to fetch and write\r\n date_url = self.URL\r\n try:\r\n puzzle_page = p(url = date_url)\r\n except Exception, e:\r\n return None\r\n fout = open(date_store_path, 'w')\r\n label_tr_list = puzzle_page('table.search_results tr')\r\n label_tr_list.pop(0) #remove the first element which is empty\r\n for line in label_tr_list:\r\n line_list = list(p(line)('td'))\r\n clue = p(line_list[0]).text()\r\n answer = p(line_list[1]).text()\r\n fout.write(answer)\r\n fout.write(' , ')\r\n fout.write(clue)\r\n fout.write('\\r\\n')\r\n fout.close()\r\n\r\n \r\ndef main():\r\n base_store_path = 'crossword_puzzles/'\r\n \r\n#fetch the url dict of puzzle resources\r\n base_url = 'http://crosswordgiant.com/browse'\r\n rsc_page = p(url = base_url)\r\n label_a_list = rsc_page('.information_text a')\r\n for a in label_a_list:\r\n rsc_name = p(a).text()\r\n base_rsc_url = p(a).attr('href')\r\n rsc_store_path = base_store_path + rsc_name + '/'\r\n try:\r\n os.makedirs(rsc_store_path)\r\n except Exception, e:\r\n pass\r\n#fetch data from each resource\r\n UrlFetchThread(rsc_name, base_rsc_url, rsc_store_path).start()\r\n\r\nmain()\r\n\r\n\r\n\r\n# def main():\r\n# base_store_path = 'crossword_puzzles/'\r\n# input_date_format = '%b %d, %Y'\r\n# output_date_format = '%Y.%m.%d'\r\n# re_input_date = re.compile('(?<=\\s-\\s)\\w+\\s\\d+,\\s\\d+')\r\n \r\n# #fetch the url dict of puzzle resources\r\n# base_url = 'http://crosswordgiant.com/browse'\r\n# rsc_page = p(url = base_url)\r\n# label_a_list = rsc_page('.information_text a')\r\n# rsc_url_dict = odict()\r\n# # rsc_url_dict = {}\r\n# for a in label_a_list:\r\n# rsc_url_dict[p(a).text()] = p(a).attr('href')\r\n\r\n# #fetch data from each resource\r\n# for rsc_name in rsc_url_dict:\r\n# print 'Fetching from ' + rsc_name + '...'\r\n# rsc_store_path = base_store_path + rsc_name + '/'\r\n# try:\r\n# os.makedirs(rsc_store_path)\r\n# except Exception, e:\r\n# pass\r\n \r\n# base_rsc_url = rsc_url_dict[rsc_name]\r\n# page_number = 1\r\n# rsc_url_para = '?page='\r\n# date_url_dict = odict()\r\n# # date_url_dict = {}\r\n\r\n# #get url dict of each date\r\n# while True:\r\n# rsc_url = base_rsc_url + rsc_url_para + str(page_number)\r\n# try:\r\n# date_page = p(url = rsc_url)\r\n# except Exception, e:\r\n# page_number += 1\r\n# continue\r\n# label_a_list = date_page('.information_text a')\r\n# if len(label_a_list) == 0:\r\n# break\r\n# else:\r\n# for a in label_a_list:\r\n# date_url_dict[re_input_date.search(p(a).text()).group()] = p(a).attr('href')\r\n# page_number += 1\r\n\r\n# #fetch crossword puzzles of each date\r\n# for date_str in date_url_dict:\r\n# print 'Date: ' + date_str\r\n# #create a new file\r\n# date = dt.strptime(date_str, input_date_format)\r\n# date_store_path = rsc_store_path + date.strftime(output_date_format) + '.txt'\r\n\r\n# #begin to fetch and write\r\n# date_url = date_url_dict[date_str]\r\n# try:\r\n# puzzle_page = p(url = date_url)\r\n# except Exception, e:\r\n# continue\r\n# fout = open(date_store_path, 'w')\r\n# label_tr_list = puzzle_page('table.search_results tr')\r\n# label_tr_list.pop(0) #remove the first element which is empty\r\n# for line in label_tr_list:\r\n# line_list = list(p(line)('td'))\r\n# clue = p(line_list[0]).text()\r\n# answer = p(line_list[1]).text()\r\n# fout.write(answer)\r\n# fout.write(' , ')\r\n# fout.write(clue)\r\n# fout.write('\\r\\n')\r\n# fout.close()\r\n" } ]
2
kasyapcm/mydjango
https://github.com/kasyapcm/mydjango
14fa93097898160baea6f3b6873740f346535176
3b73213ede282f7f2fc5c26d16f46a9460c17e17
04e175971e2fa9e81e9da2509e3c59246ca00486
refs/heads/master
2017-10-13T11:40:15.837128
2017-03-25T22:49:34
2017-03-25T22:49:34
86,193,327
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 46, "blob_id": "f9dc7f19e24ea998b3ac27e2593f5eb4a0f93b83", "content_id": "0b1883e0aa34ea87dc06199a919f84b89e603acf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/lib/python2.7/stat.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/stat.py" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 44, "blob_id": "095809bf5e5eb1f8c4d89a14e3e0ff0235d17cb2", "content_id": "f35d0c8a90b217a7ec13290f6c9f5d2ec23ee7d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/lib/python2.7/os.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/os.py" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "271d79916921f332223b243642efe0a85bf2c96c", "content_id": "52a30c0a37040732bbcfe92a96926d53ebe9ae40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/lib/python2.7/locale.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/locale.py" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 49, "blob_id": "a7d8d6ece13c3877ebcd09510246c83ee998cbee", "content_id": "a5900fd071d900626e3dac6e8a084dc414c9f01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/lib/python2.7/_abcoll.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/_abcoll.py" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 50, "blob_id": "e9378b20af5095bf6408b641bb60fd506f4971df", "content_id": "e5f67b2d02907915ae92f338081842d846e0876d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/lib/python2.7/warnings.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/warnings.py" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.8199999928474426, "avg_line_length": 50, "blob_id": "b7687082899b7da7cf1eaeb8f44ef64f2786b78c", "content_id": "4953ddf8c3635b72210fa85fb310412cafacd577", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/lib/python2.7/copy_reg.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/copy_reg.py" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.8363636136054993, "avg_line_length": 55, "blob_id": "f83f6811f1193c27b5f647170cdb0f64c24b78f5", "content_id": "0d53487db81083dee68bb9eb9f4a28e2efe175c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 55, "num_lines": 1, "path": "/lib/python2.7/sre_constants.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/sre_constants.py" }, { "alpha_fraction": 0.7924528121948242, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 53, "blob_id": "734ea06c52b452dbefb9836247f325f425b74171", "content_id": "0738eba50402c1b03d724f138d8203194941915c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/lib/python2.7/sre_compile.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/sre_compile.py" }, { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.843137264251709, "avg_line_length": 51, "blob_id": "9c3c72dcb33b9171418ba40b7baf3649885577f4", "content_id": "e462cbeefaddd06f1f55e75fe795a8cc09e313ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/lib/python2.7/posixpath.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/posixpath.py" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 48, "blob_id": "7e1119be9f1b0fe9337a5abc2d0a6a515e4e7ce2", "content_id": "beee8c4d7a59a2cd061e61b4c091b4646b3ca324", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/lib/python2.7/codecs.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/codecs.py" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 44, "blob_id": "5d88fbcd39d9c432f73ece3176b3869b19281380", "content_id": "6aa707b417407035660667927d5c340cdbcd4ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/lib/python2.7/re.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/re.py" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8222222328186035, "avg_line_length": 45, "blob_id": "a71975afda7e42db5c0f9ed1cd03f1c0624ce718", "content_id": "8f37a02a6a32a78e82a3e450469ae4129cb9e4fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/lib/python2.7/sre.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/sre.py" }, { "alpha_fraction": 0.795918345451355, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 49, "blob_id": "694c51d84ffbc4c95ac907abae1b3d4a5cf8a7c5", "content_id": "217c42c28b4bac0ecca7f4e8347a331a454c3044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/lib/python2.7/fnmatch.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/fnmatch.py" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 50, "blob_id": "3d02fa75516670fa046d5120a351916a3e99d27f", "content_id": "65fc3d9bae97ee62da14fd88c7aa2b75d6d80cf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 50, "num_lines": 1, "path": "/lib/python2.7/UserDict.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/UserDict.py" }, { "alpha_fraction": 0.8113207817077637, "alphanum_fraction": 0.849056601524353, "avg_line_length": 53, "blob_id": "bcb25c9c503b0c4e55d15e8cfd8dd062229da4bd", "content_id": "84e3049ac75da37a8fe281a0ea5ccbb02f4bd201", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/lib/python2.7/genericpath.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/genericpath.py" }, { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 47, "blob_id": "f8ed279eaae1307418d0f58d9501b98ef04b3049", "content_id": "1e5199cd814c91941f1a94ab00d666fd4bb40627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/lib/python2.7/types.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/types.py" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8222222328186035, "avg_line_length": 45, "blob_id": "6bdf2f66d69c04775e02e19f301ee6057a579b2f", "content_id": "9ce4488d67535c840eb5dc8854464f489bcbcc68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/lib/python2.7/abc.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/abc.py" }, { "alpha_fraction": 0.7924528121948242, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 53, "blob_id": "f85eda6597e06bacfcd74d7ef4d65826710316d1", "content_id": "c117412f1493b27182e56f4d4577145705b6df37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/lib/python2.7/_weakrefset.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/_weakrefset.py" }, { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.843137264251709, "avg_line_length": 51, "blob_id": "0b4cd139948c3a9517c4034646f6260028a050b1", "content_id": "b785357c8733b9d7f84eb829027b8f6b050d7811", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/lib/python2.7/linecache.py", "repo_name": "kasyapcm/mydjango", "src_encoding": "UTF-8", "text": "/Users/vkchilak/anaconda/lib/python2.7/linecache.py" } ]
19
MikeXydas/SiameseLSTM
https://github.com/MikeXydas/SiameseLSTM
ca01071cd40e17f6e5be7a6cebff0543f8c6f6d5
101431cdb3afe61c7cc8b001fad4c3e8465257db
7adb5d5ae18eac2ee1cc27e58d7395717024e5c8
refs/heads/master
2023-03-05T01:34:52.145821
2021-02-10T16:34:50
2021-02-10T16:34:50
337,780,458
2
1
null
2021-02-10T16:18:59
2021-02-10T16:32:58
2021-02-10T16:34:50
Jupyter Notebook
[ { "alpha_fraction": 0.6854153275489807, "alphanum_fraction": 0.7259646654129028, "avg_line_length": 37.224998474121094, "blob_id": "40e26d0b8c2f1eac54378023b082e980009742f4", "content_id": "a7d35d904fb4c63012e5b5785548699fcb34b339", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1529, "license_type": "permissive", "max_line_length": 102, "num_lines": 40, "path": "/SiameseLSTM/plot_creation.py", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ntrain_acc = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-train-tag-epoch_accuracy.csv')\nval_acc = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-validation-tag-epoch_accuracy.csv')\n\ntrain_loss = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-train-tag-epoch_loss.csv')\nval_loss = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-validation-tag-epoch_loss.csv')\n\nfig = plt.figure(figsize=(12, 5))\n\n\nepochs = 500\n\nplt.subplot(1, 2, 1)\nplt.plot(np.arange(epochs), train_loss[:epochs].Value, label=\"Train\", linewidth=3) #, marker='o')\nplt.plot(np.arange(epochs), val_loss[:epochs].Value, label=\"Validation\", linewidth=3) #, marker='o')\nplt.legend(prop={'size': 18}, markerscale=5)\nplt.title('Loss vs. Epochs', fontsize=22)\nplt.xlabel(\"Epoch\", fontsize=19)\nplt.ylabel(\"BCE Loss\", fontsize=19)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.grid()\n\nplt.subplot(1, 2, 2)\nplt.plot(np.arange(epochs), train_acc[:epochs].Value, label=\"Train\", linewidth=3)#, marker='o')\nplt.plot(np.arange(epochs), val_acc[:epochs].Value, label=\"Validation\", linewidth=3)#, marker='o')\nplt.legend(prop={'size': 18}, markerscale=5)\nplt.title('Accuracy vs. Epochs', fontsize=22)\nplt.xlabel(\"Epoch\", fontsize=19)\nplt.ylabel(\"Accuracy\", fontsize=19)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.grid()\n\nfig.tight_layout()\n# plt.show()\nplt.savefig('../storage/datasets/q2b/plots/lstm_dense_features_curves_500.png', bbox_inches='tight')\n" }, { "alpha_fraction": 0.704459011554718, "alphanum_fraction": 0.7172485589981079, "avg_line_length": 49.75438690185547, "blob_id": "ad7e635cb308da51d6510f89a20f6d2b84a04c41", "content_id": "121b146f7caa5e0f9b4d1fc386b48b2e0fd9070d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2893, "license_type": "permissive", "max_line_length": 111, "num_lines": 57, "path": "/SiameseLSTM/SiamLSTMwithFeatures.py", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "import tensorflow.keras.backend as K\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Model\nfrom tensorflow_core.python.keras import regularizers\n\n\ndef create_malstm_features_model(max_seq_length, embedding_dims, embeddings, numb_engineered_features):\n # Parameters\n dropout_lstm = 0.23\n dropout_dense = 0.23\n regularizing = 0.002\n\n n_hidden = 300\n # Input layers\n left_input = layers.Input(shape=(max_seq_length,), dtype='int32')\n right_input = layers.Input(shape=(max_seq_length,), dtype='int32')\n engineered_features_input = layers.Input(shape=(numb_engineered_features,))\n\n # Embedding layer\n embedding_layer = layers.Embedding(len(embeddings), embedding_dims,\n weights=[embeddings], input_length=max_seq_length, trainable=False)\n encoded_left = embedding_layer(left_input)\n encoded_right = embedding_layer(right_input)\n\n # Since this is a siamese network, both sides share the same LSTM\n shared_lstm = layers.LSTM(n_hidden, kernel_regularizer=regularizers.l2(regularizing), dropout=dropout_lstm,\n recurrent_dropout=dropout_lstm, name=\"Siamese_LSTM\")\n left_output = shared_lstm(encoded_left)\n right_output = shared_lstm(encoded_right)\n\n # One fully connected layer to transform the engineered features\n encoded_engineered = layers.Dense(70, activation='relu', name=\"FeatureDense\")(engineered_features_input)\n\n # Concatenate the two question representations and the engineered features if they exist\n concatenated = layers.Concatenate()([left_output, right_output, encoded_engineered])\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization()(concatenated)\n\n concatenated = layers.Dense(150, kernel_regularizer=regularizers.l2(regularizing), activation='relu',\n name=\"ConcatenatedDense_1\")(concatenated)\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization(name=\"BatchNorm1\")(concatenated)\n\n concatenated = layers.Dense(70, kernel_regularizer=regularizers.l2(regularizing), activation='relu',\n name=\"ConcatenatedDense_2\")(concatenated)\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization(name=\"BatchNorm2\")(concatenated)\n\n concatenated = layers.Dense(35, kernel_regularizer=regularizers.l2(regularizing), activation='relu',\n name=\"ConcatenatedDense_3\")(concatenated)\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization(name=\"BatchNorm3\")(concatenated)\n\n output = layers.Dense(1, activation='sigmoid', name=\"Sigmoid\")(concatenated)\n\n return Model([left_input, right_input, engineered_features_input], output)\n" }, { "alpha_fraction": 0.7812715172767639, "alphanum_fraction": 0.7915997505187988, "avg_line_length": 87.92857360839844, "blob_id": "017810bde9b1761800b5c8f16ca745e517e0237a", "content_id": "26b884b66e7188b6dfda6e87468c6b48ff7b9ba7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8714, "license_type": "permissive", "max_line_length": 707, "num_lines": 98, "path": "/README.md", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "# Detecting Duplicate Quora question pairs\n\n*Teammate: Valerios Stais*\n\n## Introduction\n\nIn our MSc we had as part of an exercise to detect duplicate questions posted \non Quora. The dataset, which was given by Quora in a 2017 \n[Kaggle competition](https://www.kaggle.com/c/quora-question-pairs/overview), consisted \n283k question pairs along with their label, duplicate or non-duplicate. We must note that \nthe question pairs were carefully picked so as to be difficult to correctly classify. For \nexample, the question pair \"*Why if my brown fox quick?*\" -\"*Where is my lazy dog?*\" can \neasily be labelled as non-duplicate by a human and any suitably trained model. However, \nthe question pair \"Are brown foxes quick?\" and \"Are brown dogs quick?\" is more difficult \nto be correctly classified by a model. The dataset given consists mostly of the second \nkind of questions.\n\n## Code\n* On `DuplicateDetectionFeatureEngineering.ipynb` we present a range of hand-engineered features\nwhich help us establish a baseline\n* On `EmbeddingMatrix.ipynb` we embed our whole dataset using a pretrained Word2Vec model\n* `Trainer.py` can be considered as the \"main\" which reads the embeddings, sets up the model, defines\nthe callbacks, and finally trains and evaluates our model\n* `SiameseLSTM/` contains all the source code that `Trainer.py` imports\n\n## Baseline approach with feature engineering\n\nIn the assignment the expected approach was to hand-craft a set of heuristics that would help us extract features from the given questions. Some of the extracted features were:\n* Cosine similarity of question tf-idf representation\n* Edit distance of questions*\n* Each question's word length (original question)\n* Each question's word length (preprocessed question)\n* Each question's word length (preprocessed with google's recommended function question)\n* Number of names in each question (beginning with a capital letter, excluding first word)\n* Cosine similarity between average name vector of each question\n* Number of stowords in each question, and difference of that number between questions\n* Number of punctuation symbols in each question, and difference of that number between questions\n* Edit distance between each question's words not present in Google News embedding*\n* Number of digits in each question, and difference of that number between questions\n* Number of nouns in each question, and difference of that number between questions\n* Edit distance between each question's nouns\n* Edit distance between each question's last half/last quarter*\n* Edit distance between each question's first 4 words*\n\n*Concerning edit distance, we consider a collection of metrics such as simple, partial ratios, token sort ratio and token set ratio.\n\nWe experimented with different combinations of the above features and machine learning models (SVM, Logistic Regression, Random Forest, XGBoost), and found that the optimal combination comprises all the aforementioned features and a tuned XGBoost classifier.\n\n| Statistical Measure | XGBoost |\n|---------------------|---------|\n| Accuracy | 0.7657 |\n| Recall | 0.7486 |\n| Precision | 0.7491 |\n| F1-score | 0.7489 |\n*Table 1: Validation average metrics (5-fold) using engineered features*\n\n\nWe get a validation accuracy of 0.7657 which is a nice first result but in order to better capture the distributions of duplicate and non-duplicate question pairs we would have to extract many more features. Instead, we thought of an approach that excels in capturing these distributions using a siamese LSTM.\n\n## Siamese LSTM\nSiamese networks were first proposed as an architecture for efficient face verification (Taigman et al. [1]). The network was given facial images of many people, with few images per person, and had to successfully infer whether two images depicted the same person or not. We must note that, in general, for this technique to work we must present hard pair (Schroff et al. [2]). Luckily, our dataset consists of question pairs whose duplicacy is hard to distinguish.\n\nIn the image field, the siamese networks consist of convolutional kernels. These are great for encoding the information that the network needs to decide whether two face images are of the same person. However, in our case, we have a more suitable tool called LSTM cell, which is great at encoding sequences, in our case questions. We implemented a variation of the siamese architecture proposed by Thyagarajan et al. [3]. We avoided using the Manhattan distance of the question encodings and instead added dense layers that input these encodings. Our hypothesis is that the dense layers will learn the most appropriate distance function.\n\n![Missing LSTM arch image](img/lstm_dense_archtecture.PNG \"Siamese LSTM Architecture\")\n\n*The model is defined on `SiameseLSTM/SiamLSTM.py`*\n\nConcerning the word embeddings, we used the pretrained Google news embeddings. We believe that training our own embeddings from scratch or even fine-tuning the pretrained ones would be overambitious given the limited time and hardware we had.\n\n![Missing LSTM arch image](img/lstm_dense_curves_500.png \"Siamese LSTM training curves\")\n\nWe trained the network with a batch size of 1,024, using the Adam optimizer with a learning rate of 0.001. We appropriately used dropout and regularization on both the LSTM and the dense layers to avoid overfitting. The training took 5 hours on an Nvidia 2060 Super (8GB). In the figure, we can see that even with the usage and tuning of dropout and regularization we were not able to successfully combat overfitting. We believe that further tuning would be able to alleviate the issue.\n\nFurthermore, we can see that even the overfitted network was able to achieve a validation **accuracy of 0.80**. This is a welcomed result, since it represents an increase of 0.04 when compared to the best feature engineering model.\n\n## Combining engineered features with the Siamese LSTM\n\nAs a final model, we wanted to both combine the question encodings created by the siamese LSTM and the engineered features we created in the first part of this task. We believe that adding these hand-crafted features would help the network to better understand our objective of finding duplicate questions.\n\n![Missing LSTM arch image with features](img/lstm_dense_feat_arch.PNG \"Siamese LSTM Architecture with injected features\")\n*The model is defined on `SiameseLSTM/SiamLSTMwithFeatures.py`*\n\nIn the above figure, we present the way we \"inject\" the new features into the duplicate classification decision. We avoid immediately concatenating them next to the LSTM encodings, and we first transform them using a dense layer. The main reason is to avoid scaling issues. For example, the LSTM encodings may have large values while the engineered features (which are first normalized) will have small values around 0. The added dense layer will be able to learn an appropriate re-scaling so as the engineered features will have a chance at being a part of the final decision. We hypothesize that this dense layer may also be replaced by a batch normalization layer, though we did not test this hypothesis.\n\n![Missing LSTM arch image with features](img/lstm_dense_features_curves_500.png \"Siamese LSTM with engineered features training curves\")\n\nFrom the learning curves, we have two main takeaways. First, the validation set accuracy increased from **0.80 to 0.83**. Second, while the overfitting observed before persists, it occurs much later in the training. We must note that the regularization, dropout and learning rate hyperparameters were the same in both runs. This suggests that the added features had an additional regulatory effect.\n\nWe believe the engineered features and the siamese LSTM tackle the duplicate detection task from two different angles. The LSTM is great at capturing context, along with syntactic and grammatical information. On the other hand, our engineered features focus almost exclusively on the content (vocabulary) of the two questions. By combining information from both these techniques our complete neural net is more stable at training when compared to the pure siamese LSTM, and also achieves better accuracy.\n\n## References\n\n[1] Yaniv Taigman, Ming Yang, Marc’Aurelio Ranzato, and Lior Wolf. Deepface:Closing the gap to human-level performance in face verification. InProceedingsof the IEEE Conference on Computer Vision and Pattern Recognition (CVPR),June 2014.\n\n[2] Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified em-bedding for face recognition and clustering. InProceedings of the IEEE Conferenceon Computer Vision and Pattern Recognition (CVPR), June 2015.\n\n[3] Aditya Thyagarajan. Siamese recurrent architectures for learning sentence simi-larity. 11 2015." }, { "alpha_fraction": 0.640339732170105, "alphanum_fraction": 0.6564756035804749, "avg_line_length": 36.39682388305664, "blob_id": "3bff6d91045d8e9672001de477b48c152cc9064c", "content_id": "b5239e6adc32a44ac3d6a7d3a79173d5ee4e2d62", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2355, "license_type": "permissive", "max_line_length": 120, "num_lines": 63, "path": "/SiameseLSTM/utils.py", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport itertools\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\ndef create_dict_datasets(clean_train_df, numb_represantation_train, numb_represantation_test, seed=1212, val_ratio=0.2):\n max_seq_length = 30\n\n # Split to train validation\n validation_size = int(val_ratio * len(clean_train_df))\n training_size = len(clean_train_df) - validation_size\n\n X_train_Q1 = [t[0] for t in numb_represantation_train]\n X_train_Q2 = [t[1] for t in numb_represantation_train]\n X_test_Q1 = [t[0] for t in numb_represantation_test]\n X_test_Q2 = [t[1] for t in numb_represantation_test]\n\n results = {\n \"Q1\": X_train_Q1,\n \"Q2\": X_train_Q2\n }\n X = pd.DataFrame.from_dict(results)\n Y = clean_train_df[['IsDuplicate']]\n\n results = {\n \"Q1\": X_test_Q1,\n \"Q2\": X_test_Q2\n }\n X_test = pd.DataFrame.from_dict(results)\n\n X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size,\n random_state=seed)\n\n # Split to dicts\n X_train = {'left': X_train.Q1, 'right': X_train.Q2}\n X_validation = {'left': X_validation.Q1, 'right': X_validation.Q2}\n X_test = {'left': X_test.Q1, 'right': X_test.Q2}\n\n Y_train = Y_train.values\n Y_validation = Y_validation.values\n\n for dataset, side in itertools.product([X_train, X_validation, X_test], ['left', 'right']):\n dataset[side] = pad_sequences(dataset[side], maxlen=max_seq_length)\n\n return X_train, X_validation, X_test, Y_train, Y_validation, max_seq_length\n\n\ndef split_engineered_features_dataset(X_feat, y_feat, seed=1212, val_ratio=0.2):\n validation_size = int(val_ratio * len(X_feat))\n X_features_train, X_features_val, Y_features_train, Y_features_validation = \\\n train_test_split(X_feat, y_feat, test_size=validation_size, random_state=seed)\n\n return X_features_train, X_features_val, Y_features_train, Y_features_validation, X_feat.shape[1]\n\n\ndef check_validation_acc(model, X_validation, y_validation):\n y_preds = np.round(model.predict([X_validation['left'], X_validation['right']]))[:, 0].astype(int)\n\n print(accuracy_score(y_validation, y_preds))" }, { "alpha_fraction": 0.647514283657074, "alphanum_fraction": 0.6528117656707764, "avg_line_length": 39.88333511352539, "blob_id": "2f60ea0e5ca2ac7d94270aa05965d3f89b3f3156", "content_id": "70511f5d457b8a6eee5f3fb2feffe0b4cd6df93a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "permissive", "max_line_length": 112, "num_lines": 60, "path": "/SiameseLSTM/inference.py", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "import pickle\nimport pandas as pd\nimport numpy as np\n\nfrom SiameseLSTM.SiamLSTM import create_malstm_model\nfrom SiameseLSTM.utils import create_dict_datasets\n\n\ndef create_output_file(model, X_test, outfile, max_seq_length, embeddings, embedding_dims, from_path=False,\n path_to_test='../storage/datasets/q2b/test_without_labels.csv'):\n if from_path:\n loaded_model = create_malstm_model(max_seq_length, embedding_dims=embedding_dims, embeddings=embeddings)\n loaded_model.load_weights(model).expect_partial()\n else:\n loaded_model = model\n\n y_preds = loaded_model.predict(X_test)\n y_preds = np.round(y_preds)[:, 0].astype(int)\n\n test_ids_df = pd.read_csv(path_to_test, usecols=['Id'])\n\n results = {\n \"Id\": list(test_ids_df.Id),\n \"Predicted\": y_preds\n }\n\n results_df = pd.DataFrame.from_dict(results)\n\n results_df.to_csv(outfile, index=False)\n\n\nif __name__ == \"__main__\":\n # This main will work only in the case of not using the features\n # Read the texts\n print(\">>> Reading the texts...\", end='')\n clean_train_df = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv')\n clean_test_df = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv')\n print(\"Done\")\n\n # Load the embeddings\n print(\">>> Reading the embeddings...\", end='')\n embeddings = np.load('../storage/datasets/q2b/word_embeddings/embeddings_matrix.npy', )\n with open('../storage/datasets/q2b/word_embeddings/numb_represantation_train.pkl', 'rb') as handle:\n numb_represantation_train = pickle.load(handle)\n with open('../storage/datasets/q2b/word_embeddings/numb_represantation_test.pkl', 'rb') as handle:\n numb_represantation_test = pickle.load(handle)\n print(\"Done\")\n\n print(\">>> Creating the datasets...\", end='')\n X_train, X_validation, X_test, Y_train, Y_validation, max_seq_length = \\\n create_dict_datasets(clean_train_df, clean_test_df, numb_represantation_train, numb_represantation_test)\n print(\"Done\")\n\n embeddings_dim = len(embeddings[0])\n\n create_output_file(model='../checkpoints/epoch_0042/cp.ckpt',\n X_test=[X_test['left'], X_test['right']],\n outfile=\"../storage/datasets/q2b/results/test.csv\",\n max_seq_length=max_seq_length, embeddings=embeddings,\n embedding_dims=embeddings_dim, from_path=True)\n\n" }, { "alpha_fraction": 0.6416414976119995, "alphanum_fraction": 0.6475432515144348, "avg_line_length": 43.69938659667969, "blob_id": "f4a13dfb34054d346a7276fa9c287b3e2b427904", "content_id": "f365b643c5fe13f342cca24287292977f37b615d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7286, "license_type": "permissive", "max_line_length": 122, "num_lines": 163, "path": "/Trainer.py", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "import shutil\nimport time\nimport pickle\nimport pandas as pd\nimport numpy as np\n\nfrom tqdm.keras import TqdmCallback\n\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Adam\n\nfrom sklearn import preprocessing\n\nfrom SiameseLSTM.SiamLSTM import create_malstm_model\nfrom SiameseLSTM.SiamLSTMwithFeatures import create_malstm_features_model\nfrom SiameseLSTM.utils import create_dict_datasets, check_validation_acc, split_engineered_features_dataset\nfrom SiameseLSTM.inference import create_output_file\n\nif __name__ == \"__main__\":\n # Model variables\n batch_size = 1024\n n_epoch = 500\n use_engineered_features = False\n tensorboard_dir = 'storage\\\\logs\\\\'\n # model_checkpoint = \"checkpoints/epoch_0470/cp.ckpt\"\n model_checkpoint = \"\"\n delete_checkpoints_and_logs = True\n\n # CARE: To save time we have already transformed our texts from words to integers\n # and also created an embedding matrix (index -> embedding). In order to generate the\n # number representations you should use EmbeddingMatrix.ipynb and fix appropriately the\n # paths below\n submit_file = \"storage/datasets/q2b/results/delete.csv\"\n\n train_file = \"storage/datasets/q2b/preprocessed/train_quora_clean.csv\"\n test_file = \"storage/datasets/q2b/preprocessed/test_quora_clean.csv\"\n numb_representations_train_file = \"storage/datasets/q2b/word_embeddings/numb_represantation_train.pkl\"\n numb_representations_test_file = \"storage/datasets/q2b/word_embeddings/numb_represantation_test.pkl\"\n embedding_matrix_file = \"storage/datasets/q2b/word_embeddings/embeddings_matrix.npy\"\n engineered_features_train_file = \"storage/datasets/q2b/features/train_features.csv\"\n engineered_features_test_file = \"storage/datasets/q2b/features/test_features.csv\"\n\n # Deleting previous checkpoints and logs\n if delete_checkpoints_and_logs:\n try:\n shutil.rmtree('checkpoints/')\n print(\">>> Deleted previous checkpoints\")\n shutil.rmtree('storage/logs')\n print(\">>> Deleted previous logs\")\n except FileNotFoundError:\n print(\"No checkpoints or logs found\")\n\n # Setting memory growth of GPU so as TF does not allocate all the available memory\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n assert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\n config = tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n # Read the texts\n print(\">>> Reading the texts...\", end='')\n clean_train_df = pd.read_csv(train_file)\n clean_test_df = pd.read_csv(test_file)\n print(\"Done\")\n\n # Load the embeddings\n print(\">>> Reading the embeddings...\", end='')\n embeddings = np.load(embedding_matrix_file)\n with open(numb_representations_train_file, 'rb') as handle:\n numb_representation_train = pickle.load(handle)\n with open(numb_representations_test_file, 'rb') as handle:\n numb_representation_test = pickle.load(handle)\n print(\"Done\")\n\n # Load the engineered features\n if use_engineered_features:\n print(\">>> Reading the engineered features...\", end='')\n engineered_features_train = np.array(pd.read_csv(engineered_features_train_file))\n X_feat_test = np.array(pd.read_csv(engineered_features_test_file))\n\n X_feat_train = engineered_features_train[:, :-1]\n y_feat_train = engineered_features_train[:, -1]\n\n normalizer = preprocessing.Normalizer().fit(X_feat_train)\n X_feat_train = normalizer.transform(X_feat_train)\n X_feat_test = normalizer.transform(X_feat_test)\n print(\"Done\")\n else:\n X_feat_train, X_feat_test, y_feat_train = None, None, None\n\n embedding_dims = len(embeddings[0])\n\n print(\">>> Creating the datasets...\", end='')\n X_train, X_validation, X_test, Y_train, Y_validation, max_seq_length = \\\n create_dict_datasets(clean_train_df, numb_representation_train, numb_representation_test)\n if X_feat_train is not None:\n X_features_train, X_features_val, Y_features_train, Y_features_validation, feat_size = \\\n split_engineered_features_dataset(X_feat_train, y_feat_train)\n else:\n feat_size = 0\n X_features_train, X_features_val = None, None\n print(\"Done\")\n\n print(\">>> Starting training!\")\n\n if use_engineered_features:\n malstm = create_malstm_features_model(max_seq_length, embedding_dims, embeddings, feat_size)\n else:\n malstm = create_malstm_model(max_seq_length, embedding_dims, embeddings)\n if model_checkpoint != \"\":\n malstm.load_weights(model_checkpoint)\n\n optimizer = Adam()\n\n malstm.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n # Tensorboard logging\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_dir, write_graph=False,\n histogram_freq=5)\n\n # Start training\n training_start_time = time.time()\n\n checkpoint_path = \"checkpoints/epoch_{epoch:04d}/cp.ckpt\"\n # checkpoint_dir = os.path.dirname(checkpoint_path)\n\n # Create a callback that saves the model's weights\n malstm.save_weights(checkpoint_path.format(epoch=0))\n cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, period=5,\n save_weights_only=True,\n verbose=1)\n early_stop_callback = tf.keras.callbacks.EarlyStopping(\n monitor='val_accuracy', min_delta=1e-2, patience=8000, verbose=1, restore_best_weights=True,\n )\n\n if feat_size == 0:\n malstm_trained = malstm.fit([X_train['left'], X_train['right'], ], Y_train, batch_size=batch_size, epochs=n_epoch,\n validation_data=([X_validation['left'], X_validation['right']], Y_validation),\n callbacks=[cp_callback, early_stop_callback, TqdmCallback(verbose=1),\n tensorboard_callback],\n verbose=0)\n else:\n malstm_trained = malstm.fit(\n x=[X_train['left'], X_train['right'], X_features_train], y=Y_train,\n batch_size=batch_size, epochs=n_epoch,\n validation_data=([X_validation['left'], X_validation['right'], X_features_val],\n Y_validation),\n callbacks=[cp_callback, early_stop_callback, TqdmCallback(verbose=1),\n tensorboard_callback],\n verbose=0\n )\n\n print(\">>> Training Finished!\")\n\n # check_validation_acc(malstm, X_validation, Y_validation)\n print(\">>> Predicting test results with the best validation model...\", end='')\n if X_feat_test is None:\n create_output_file(malstm, [X_test['left'], X_test['right']], submit_file,\n max_seq_length, embeddings, embedding_dims, from_path=False,\n path_to_test=test_file)\n else:\n create_output_file(malstm, [X_test['left'], X_test['right'], X_feat_test], submit_file,\n max_seq_length, embeddings, embedding_dims, from_path=False,\n path_to_test=test_file)\n print(\"Done\")\n" }, { "alpha_fraction": 0.718595027923584, "alphanum_fraction": 0.7309917211532593, "avg_line_length": 42.21428680419922, "blob_id": "1f4c7cd5fb21677e6e90012a0a2db2e374c38a75", "content_id": "fb7980ff71023ba3c51ebb7d1e67fa320e43e2d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2420, "license_type": "permissive", "max_line_length": 119, "num_lines": 56, "path": "/SiameseLSTM/SiamLSTM.py", "repo_name": "MikeXydas/SiameseLSTM", "src_encoding": "UTF-8", "text": "import tensorflow.keras.backend as K\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Model\nfrom tensorflow_core.python.keras import regularizers\n\n\ndef exponent_neg_manhattan_distance(left, right):\n return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))\n\n\ndef create_malstm_model(max_seq_length, embedding_dims, embeddings):\n # Parameters\n dropout_lstm = 0.23\n dropout_dense = 0.23\n regularizing = 0.002\n\n n_hidden = 300\n # Input layers\n left_input = layers.Input(shape=(max_seq_length,), dtype='int32')\n right_input = layers.Input(shape=(max_seq_length,), dtype='int32')\n\n embedding_layer = layers.Embedding(len(embeddings), embedding_dims,\n weights=[embeddings], input_length=max_seq_length, trainable=False)\n\n # Embedded version of the inputs\n encoded_left = embedding_layer(left_input)\n encoded_right = embedding_layer(right_input)\n\n # Since this is a siamese network, both sides share the same LSTM\n shared_lstm = layers.LSTM(n_hidden, dropout=dropout_lstm, kernel_regularizer=regularizers.l2(regularizing),\n recurrent_dropout=dropout_lstm)\n\n left_output = shared_lstm(encoded_left)\n right_output = shared_lstm(encoded_right)\n\n # Concatenate the two question representations and the engineered features if they exist\n concatenated = layers.Concatenate()([left_output, right_output])\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization()(concatenated)\n\n concatenated = layers.Dense(150, kernel_regularizer=regularizers.l2(regularizing), activation='relu')(concatenated)\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization()(concatenated)\n\n concatenated = layers.Dense(70, kernel_regularizer=regularizers.l2(regularizing), activation='relu')(concatenated)\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization()(concatenated)\n\n concatenated = layers.Dense(35, kernel_regularizer=regularizers.l2(regularizing), activation='relu')(concatenated)\n concatenated = layers.Dropout(dropout_dense)(concatenated)\n concatenated = layers.BatchNormalization()(concatenated)\n\n output = layers.Dense(1, activation='sigmoid')(concatenated)\n\n return Model([left_input, right_input], output)\n" } ]
7
goldenchan/arxiv-browse
https://github.com/goldenchan/arxiv-browse
7be8df82c03d83ec3e15df58d3088dffb331be84
12f424523eb6724def02009ddf7b1727fdace660
39792b2ac90fb33c991e5ea1670ead0e66bf14b9
refs/heads/master
2023-07-15T20:55:23.198307
2021-08-25T17:35:28
2021-08-25T17:35:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6828627586364746, "alphanum_fraction": 0.6848325729370117, "avg_line_length": 30.081632614135742, "blob_id": "b548221201220a13daad6e187c11e04a2759c38e", "content_id": "b86607b0d77c3c7ae0ce778790929f67e0aff3f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1523, "license_type": "permissive", "max_line_length": 80, "num_lines": 49, "path": "/browse/controllers/bibtexcite.py", "repo_name": "goldenchan/arxiv-browse", "src_encoding": "UTF-8", "text": "\"\"\"Gets bibtex citation for the paper.\"\"\"\nfrom typing import Callable\nfrom flask import make_response, Response\n\nfrom werkzeug.exceptions import InternalServerError\n\nfrom browse.exceptions import AbsNotFound\nfrom browse.services.document import metadata\nfrom browse.services.document.metadata import AbsNotFoundException, \\\n AbsVersionNotFoundException, AbsDeletedException\nfrom browse.services.cite import arxiv_bibtex\n\n\ndef _handle_failure(func: Callable[[str],Response]) -> Callable[[str],Response]:\n \"\"\"Handle errors similar to get_abs_page.\"\"\"\n def wrapper(arxiv_id:str) -> Response:\n try:\n return func(arxiv_id)\n except AbsNotFoundException:\n raise AbsNotFound(data={'reason': 'not_found'})\n except AbsVersionNotFoundException:\n raise AbsNotFound(data={'reason': 'version_not_found'})\n except AbsDeletedException as e:\n raise AbsNotFound(data={'reason': 'deleted', 'message': e})\n except Exception as ee:\n raise InternalServerError() from ee\n\n return wrapper\n\n\n@_handle_failure\ndef bibtex_citation(arxiv_id: str) -> Response:\n \"\"\"Get citation for the paper in bibtex format.\n\n Parameters\n ----------\n arxiv_id : str\n The arXiv identifier as provided in the request.\n\n Returns\n -------\n Flask response\n\n \"\"\"\n abs_meta = metadata.get_abs(arxiv_id)\n bibtex = arxiv_bibtex(abs_meta)\n response = make_response(bibtex, 200)\n response.mimetype = 'text/plain'\n return response\n" } ]
1
dakinfosystems/python-pet-project
https://github.com/dakinfosystems/python-pet-project
9ad8fd5ee67ea20742b5074c4b608217651a33ba
c20ad7a910edd368af809832bd3ed705089ab73f
90f955c076d38689d92eb52063db2a5e1c16497a
HEAD
2019-04-29T07:57:45.660482
2018-04-02T11:41:15
2018-04-02T11:41:15
94,242,103
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5638918280601501, "alphanum_fraction": 0.5686638355255127, "avg_line_length": 22.723270416259766, "blob_id": "d0545181aa2e171c57035d45cb1a562260ba23b7", "content_id": "4333d76359d644486fa01f436f1f4aff519ace9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3772, "license_type": "no_license", "max_line_length": 84, "num_lines": 159, "path": "/fa_tool/main.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import os\nimport sys\nfrom multiprocessing import Process\nimport re as regex\n\nfrom httplib2 import ServerNotFoundError\n\nfrom pprint import pprint\nimport pandas as pd\n\n# For relative imports to work in Python 3.6\ntry:\n sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..')\nexcept OSError:\n pass\nfrom fa_tool.modules import pyxml, gsheet as gs\nfrom fa_tool.test_system import TestSystem\nfrom fa_tool.cp_system import CPSystem\nfrom fa_tool.cp_logs import CpErrorLogs, CpUartLogs, CpUdsLogs\nfrom fa_tool.tool_constant import *\n\n\n# Global\ng_conf = {}\n\n\ndef get_bb():\n conf = []\n entry = {}\n tag = pyxml.getvalue(BIG_BOX)\n try:\n for index in range(0, len(tag['name'])):\n for key in tag.keys():\n entry[key] = tag[key][index]\n conf.append(entry)\n entry = {}\n except KeyError:\n conf = []\n\n return conf\n\n\ndef get_sys():\n tag = pyxml.getvalue(SYSTEMS)\n return tag['system']\n\n\ndef get_fw_ver():\n tag = pyxml.getvalue(FW_VERSION)\n return tag\n\n\ndef get_prev_fw_pattern():\n tag = pyxml.getvalue(REV)\n temp = ''\n prev_c = ''\n for c in tag:\n if prev_c == 'X':\n if temp[-1] == '}':\n num = int(temp[-2], 10)\n num += 1\n temp = temp[:-2] + '{0:d}'.format(num) + '}'\n else:\n temp += '{2}'\n elif c == 'X':\n temp += '[\\dA-F]'\n else:\n temp += c\n prev_c = c\n return temp\n\n\ndef get_tool_conf():\n global g_conf\n\n # get Big boxes\n g_conf[BIG_BOX] = get_bb()\n\n # get Systems\n g_conf[SYSTEMS] = get_sys()\n\n # get FW version\n g_conf[FW_VERSION] = get_fw_ver()\n\n # get FW prev version pattern\n g_conf[REV] = get_prev_fw_pattern()\n\n # TODO: get rest configuration\n # pprint(g_conf)\n\n\ndef worker_process(bb, system, df, conf):\n if type(bb) != str or not isinstance(system, TestSystem) \\\n or type(conf) != dict or not isinstance(df, pd.DataFrame):\n return\n # process CPLogs\n system.analyze_failure(bb, df, conf)\n\n\ndef main_entry():\n processes = []\n\n if not pyxml.validate(TOOL_CONF_XSD, TOOL_CONF_XML):\n return\n\n # get configuration\n get_tool_conf()\n\n try:\n gs.gopen(JOFA_TP_URL)\n except ServerNotFoundError:\n print(\"Server not found or not connected to Internet\")\n exit(1)\n\n # get all fw numbers from gsheet\n all_sheets = gs.get_sheet()\n sheet_name_list = []\n sheet_index = 1\n found = False\n for sheet in all_sheets:\n rfw = regex.compile(g_conf[REV])\n match = rfw.match(sheet.title)\n if match is not None:\n sheet_name_list.append(match.group())\n if match.group() == g_conf[FW_VERSION]:\n found = True\n if not found:\n sheet_index += 1\n\n g_conf[REV] = sheet_name_list[sheet_index:10]\n\n df = pd.DataFrame(gs.get_sheet(g_conf[FW_VERSION]).get_all_records())\n\n # Analyze failed cases from sheet for given Big box(es)\n bbs = [name for d in g_conf[BIG_BOX] for k, name in d.items() if k == 'name']\n g_conf.pop(BIG_BOX)\n for index in range(0, len(bbs)):\n # Create object of systems\n cp_sys = CPSystem()\n\n cp_sys.error_log = CpErrorLogs()\n cp_sys.uart_log = CpUartLogs()\n cp_sys.uds_log = CpUdsLogs()\n\n # filter out big-box data-frame\n bb_df = df.loc[df['Feature Set'] == bbs[index]]\n\n # Create process for big box and pass system objects\n p = Process(target=worker_process, args=(bbs[index], cp_sys, bb_df, g_conf))\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()\n print(\"Done\")\n\n\nif __name__ == '__main__':\n main_entry()\n" }, { "alpha_fraction": 0.7094972133636475, "alphanum_fraction": 0.7374301552772522, "avg_line_length": 28.91666603088379, "blob_id": "fa7c7e40fa4350f0030a12db07ccab523e95de40", "content_id": "07e861b2fde5efd3601dd296944adfb929a57278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 358, "license_type": "no_license", "max_line_length": 125, "num_lines": 12, "path": "/ReadMe.md", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "# Python Projects\n### 1. Introdution\nModule which are regularly used in development of application. So to make development faster, those modules are implemented. \n\n> Leave all hustle to us\n\n### 2. Module\n#### 2.1 oAuth\nAuthorize your social media account to perform operations on them.\n##### 2.1.1 Versions\n* 0.0.1 - Initial version\n * Gmail authorization" }, { "alpha_fraction": 0.6185410618782043, "alphanum_fraction": 0.6215805411338806, "avg_line_length": 18.939393997192383, "blob_id": "2d69179590c6bd78179100cd43a156f7656061d1", "content_id": "2a4330981a4a06635a2b3a4b56d2722e33d79ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 72, "num_lines": 33, "path": "/fa_tool/cp_logs.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import sys\nimport os\n\n# For relative imports to work in Python 3.6\ntry:\n sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..')\nexcept OSError:\n pass\nfrom fa_tool.test_logs import ErrorLogs, UartLogs, UdsLogs\n\n\nclass CpErrorLogs(ErrorLogs):\n def __init__(self):\n ErrorLogs.__init__(self)\n\n def parse(self):\n print(\"parsing error logs\")\n\n\nclass CpUartLogs(UartLogs):\n def __init__(self):\n UartLogs.__init__(self)\n\n def parse(self):\n print(\"parsing UART logs\")\n\n\nclass CpUdsLogs(UdsLogs):\n def __init__(self):\n UdsLogs.__init__(self)\n\n def parse(self):\n print(\"parsing UDS dump\")\n" }, { "alpha_fraction": 0.6292135119438171, "alphanum_fraction": 0.6362359523773193, "avg_line_length": 24.428571701049805, "blob_id": "1509d48d074b15c869f89ffccb83ca3b219ed082", "content_id": "0b0f971693aecd0164b2edefc69386c787847291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 78, "num_lines": 28, "path": "/oAuth/__init__.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import gmail\n\n\n\"\"\"\nGeneric OAuth2 authorization implementation\nfor GMail, Twitter, Facebook etc etc\nAs of now support available for\nGMail - for authorization of IMAP and SMTP\n\"\"\"\n\n\nclass OAuth(object):\n # Constructor\n def __init__(self, app, configfile):\n self.__app = str(app).upper()\n self.__appObj = None\n\n if self.__app == \"GMAIL\":\n self.__appObj = gmail.oGmail(configfile=configfile)\n\n self.__configfile = configfile\n\n # Public members\n def authorize(self, foruser, base64_encode=True):\n auth_string = \"\"\n if self.__appObj is not None:\n auth_string = self.__appObj.authorize_user(foruser, base64_encode)\n return auth_string\n" }, { "alpha_fraction": 0.6967654824256897, "alphanum_fraction": 0.7115902900695801, "avg_line_length": 26.481481552124023, "blob_id": "b6749832963a400d615456d96f6d082f33c25c88", "content_id": "24f7bc2b6b1b55f4b1324079bd9d9f604c5e30e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 99, "num_lines": 27, "path": "/fa_tool/modules/gsheet/gsheet.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\ngbook = None\ngspreadsheets = None\n\n\ndef gopen(path):\n global gbook\n global gspreadsheets\n # use creds to create a client to interact with the Google Drive API\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name('SeagateSAS-5883299e9b03.json', scope)\n gbook = gspread.authorize(creds)\n gbook.login()\n gspreadsheets = gbook.open_by_url(path)\n\n\ndef get_sheet(name=None):\n global gspreadsheets\n if gspreadsheets is not None:\n if name is None:\n return gspreadsheets.worksheets()\n else:\n return gspreadsheets.worksheet(name)\n return None\n" }, { "alpha_fraction": 0.5511775612831116, "alphanum_fraction": 0.563858687877655, "avg_line_length": 28.83783721923828, "blob_id": "0149ebc7aae837cf4c4c1bfed9f3a59b2ecafb8b", "content_id": "45d36bfcf4f812e34e152cb6d1e7357aa59988ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2208, "license_type": "no_license", "max_line_length": 85, "num_lines": 74, "path": "/test/test_oAuth.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import smtplib\nimport re as regex\nimport imaplib\nimport sys\nimport oAuth\nimport base64, quopri\n\n\ndef send_mail(auth_string):\n print\n smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)\n smtp_conn.set_debuglevel(True)\n smtp_conn.ehlo('test')\n smtp_conn.starttls()\n smtp_conn.docmd('AUTH', 'XOAUTH2 ' + auth_string)\n\n smtp_conn.quit()\n\n\ndef retrieve_mail(auth_string):\n rMlistn = regex.compile(r\"\\(.*\\\\(HasChildren).*\\)\\s\\\"(.*)\\\"\\s\\\"(?P<label>.+)\\\"\")\n rMlist = regex.compile(r\"\\(.*\\\\(HasNoChildren).*\\)\\s\\\"(.*)\\\"\\s\\\"(?P<label>.+)\\\"\")\n rSubj = regex.compile(r\"\\nSubject:\\s*(.*)\", regex.MULTILINE)\n encoded = regex.compile(r\"=\\?(.+)\\?([B|Q])\\?(.+)\\?=\")\n\n imap_obj = imaplib.IMAP4_SSL('imap.gmail.com')\n imap_obj.debug = 0\n imap_obj.authenticate('XOAUTH2', lambda x: auth_string)\n mlistResp = imap_obj.list()\n mlists = mlistResp[1]\n for mlist in mlists:\n # print (mlist)\n if rMlistn.match(mlist) is not None:\n # print(rMlistn.match(mlist).group('label') + \" has children\")\n pass\n else:\n pass\n # print(rMlist.match(mlist).group('label') + \" has no children\")\n\n imap_obj.select(\"Bank/Amex\")\n status, messages = imap_obj.search(None, \"ALL\")\n if status == 'OK':\n ids = messages[0].split()\n result, data = imap_obj.fetch(ids[-1], \"(RFC822)\")\n if result == 'OK':\n subject = rSubj.search(data[0][1]).group(1)\n if encoded.match(subject) is not None:\n if encoded.match(subject).group(2) == \"B\":\n print base64.b64decode(encoded.match(subject).group(3))\n elif encoded.match(subject).group(2) == \"Q\":\n print quopri.decodestring(encoded.match(subject).group(3))\n else:\n print subject\n # print data[0][1]\n else:\n print \"No mail found\"\n\n imap_obj.select(\"INBOX\")\n imap_obj.close()\n\n\n# print site\ndef main(argv):\n oauth = oAuth.OAuth(\"GMAIL\", \"conf.cfg\")\n auth_str = oauth.authorize(\"[email protected]\", False)\n\n if auth_str is not None:\n retrieve_mail(auth_str)\n\n print \"Done!\"\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" }, { "alpha_fraction": 0.5701960921287537, "alphanum_fraction": 0.5747058987617493, "avg_line_length": 33.91549301147461, "blob_id": "91b0fa09f2fd52ce4f096dbf507ba400b0b2855a", "content_id": "af3ad4ef5438e2e736c40be6f110bfa6321a01b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5100, "license_type": "no_license", "max_line_length": 118, "num_lines": 142, "path": "/oAuth/gmail.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "from util import UrlFormatter, UrlRequest, Configuration\r\nimport requests\r\nimport json\r\nimport base64\r\n\r\n\r\nclass oGmail(object):\r\n \"\"\"\r\n oGmail class handles OAuth for gmail account. It uses config file to read already present access token fo mail id.\r\n If it is not present to that user it creates entry for this.\r\n \"\"\"\r\n\r\n def __init__(self, configfile=None, clientid=None, secretkey=None):\r\n \"\"\"\r\n Constructor of oGmail class.\r\n :param configfile: path to configuration file\r\n :param clientid: client id provide by google to applicaition\r\n :param secretkey: secret key of client id\r\n \"\"\"\r\n self.__getclientdetails = False\r\n\r\n # read config file\r\n if configfile is not None:\r\n self.__parser = Configuration(configfile)\r\n self.__clientid = self.__getClientId()\r\n self.__secretkey = self.__getSecretKey()\r\n\r\n # If file is not present then read client id and secret key\r\n elif clientid is not None and secretkey is not None:\r\n self.__clientid = clientid\r\n self.__secretkey = secretkey\r\n\r\n # if nothing found then ask client details from user before proceeding\r\n else:\r\n self.__getclientdetails = True\r\n\r\n \"\"\"\r\n public members\r\n \"\"\"\r\n def authorize_user(self, user, base64_encode=True):\r\n \"\"\"\r\n :param user: user's mail id\r\n :param base64_encode: convert auth string to base64\r\n :return: access token to access mails of user\r\n \"\"\"\r\n clientid = \"\"\r\n secretkey = \"\"\r\n access_token = ''\r\n\r\n if self.__getclientdetails :\r\n clientid = raw_input(\"Please enter client id: \")\r\n secretkey = raw_input(\"Please enter secret key: \")\r\n else:\r\n clientid = self.__clientid\r\n secretkey = self.__secretkey\r\n\r\n tokens = self.__authorize_token(clientid, secretkey)\r\n # print tokens\r\n\r\n try:\r\n access_token = tokens['access_token']\r\n auth_string = self.__gmail_auth_string_format % (user, access_token)\r\n # print auth_string\r\n if base64_encode:\r\n auth_string = base64.b64encode(auth_string)\r\n except KeyError as ker:\r\n print tokens\r\n auth_string = None\r\n\r\n return auth_string\r\n\r\n \"\"\"\r\n Private members\r\n \"\"\"\r\n __PROTOCOL = \"https\"\r\n __HOST = \"accounts.google.com\"\r\n __URL = \"oauth/token\"\r\n __gmail_auth_string_format = 'user=%s\\1auth=Bearer %s\\1\\1'\r\n\r\n # Dummy redirect URI for non-web apps.\r\n __REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'\r\n\r\n def __getClientId(self):\r\n attrs = self.__parser.getattributesof(\"GMAIL\")\r\n return attrs['client_id']\r\n\r\n def __getSecretKey(self):\r\n attrs = self.__parser.getattributesof(\"GMAIL\")\r\n return attrs['secret_key']\r\n\r\n def __getAccessKey(self):\r\n attrs = self.__parser.getattributesof(\"GMAIL\")\r\n return attrs['access_token']\r\n\r\n def __getRefreshKey(self):\r\n attrs = self.__parser.getattributesof(\"GMAIL\")\r\n return attrs['refresh_token']\r\n\r\n def __getScope(self):\r\n attrs = self.__parser.getattributesof(\"GMAIL\")\r\n return attrs['scope']\r\n\r\n def __generatePermissionUrl(self):\r\n \"\"\"Generates the URL for authorizing access.\r\n This uses the \"OAuth2 for Installed Applications\" flow described at\r\n https://developers.google.com/accounts/docs/OAuth2InstalledApp\r\n Args:\r\n \"\"\"\r\n params = {}\r\n params['client_id'] = self.__clientid\r\n params['redirect_uri'] = self.__REDIRECT_URI\r\n params['scope'] = self.__getScope()\r\n params['response_type'] = 'code'\r\n return '%s://%s/%s?%s' % (self.__PROTOCOL, self.__HOST, 'o/oauth2/auth',\r\n UrlFormatter.formatUrlParams(params))\r\n\r\n def __authorize_token(self, username, secret_key):\r\n params = {}\r\n params['client_id'] = username\r\n params['client_secret'] = secret_key\r\n refresh_token = self.__getRefreshKey()\r\n\r\n if refresh_token is None:\r\n openbrowser = raw_input(\"Do want to open link to authorize access?[y/n]: \")\r\n print openbrowser\r\n if openbrowser == \"y\":\r\n UrlRequest.openRequest(self.__generatePermissionUrl())\r\n else:\r\n print (\"Below link is copied. Open browser and get verification code\")\r\n print (self.__generatePermissionUrl())\r\n authorization_code = input('Enter verification code:')\r\n params['code'] = authorization_code\r\n params['redirect_uri'] = self.__REDIRECT_URI\r\n params['grant_type'] = 'authorization_code'\r\n else:\r\n params['refresh_token'] = refresh_token\r\n params['grant_type'] = 'refresh_token'\r\n\r\n request_url = '%s://%s/%s' % (self.__PROTOCOL, self.__HOST, 'o/oauth2/token')\r\n\r\n response = requests.post(request_url, params)\r\n return json.loads(response.content)\r\n" }, { "alpha_fraction": 0.5618115067481995, "alphanum_fraction": 0.5618115067481995, "avg_line_length": 15.34000015258789, "blob_id": "9d3782212a0dae6c253ee50d9f6644edce0bb75e", "content_id": "6316873468414fb109af279dd498ea9bbeace6b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 38, "num_lines": 50, "path": "/fa_tool/modules/pyxml/xmlreader.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "from xmlschema import XMLSchema\n\n\ng_schema = None\ng_xml = None\n\n\ndef validateread(xsd, xml):\n global g_schema\n global g_xml\n\n g_xml = xml\n g_schema = XMLSchema(xsd)\n if g_schema.is_valid(g_xml):\n is_valid = True\n conf = g_schema.to_dict(g_xml)\n else:\n is_valid = False\n conf = None\n\n return is_valid, conf\n\n\ndef validate(xsd, xml):\n global g_schema\n global g_xml\n\n g_xml = xml\n g_schema = XMLSchema(xsd)\n if g_schema.is_valid(g_xml):\n is_valid = True\n else:\n is_valid = False\n\n return is_valid\n\n\ndef getvalue(tag=None):\n global g_schema\n global g_xml\n\n conf = g_schema.to_dict(g_xml)\n\n try:\n if tag is None:\n return conf\n else:\n return conf[tag]\n except KeyError:\n return {}\n" }, { "alpha_fraction": 0.5175583958625793, "alphanum_fraction": 0.5232579708099365, "avg_line_length": 24.531707763671875, "blob_id": "80c596a52ea82f79b12fc32bf60d160adac5061b", "content_id": "63bfec98108f36a00c294c08d57e20d02bbc824b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5439, "license_type": "no_license", "max_line_length": 101, "num_lines": 205, "path": "/stock_analyzer/analyzer.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import urllib2\r\nimport json\r\nimport csv\r\nimport codecs\r\nfrom multiprocessing import Process, Pipe\r\n\r\n\r\nSTOCK = {\r\n \"INFY\": \"Infosys\",\r\n \"HDFCBANK\": \"HDFC Bank\",\r\n \"IDEA\": \"Idea Telecommnication\"\r\n}\r\nsite = \"http://finance.google.com/finance/info?client=ig&q=\"\r\n\r\n\r\nclass Stock(object):\r\n def __init__(self, symbol):\r\n self.__symbol = symbol\r\n self.__high = 0\r\n self.__open = 0\r\n self.__close = 0\r\n self.__low = 0\r\n\r\n @property\r\n def symbol(self):\r\n return self.__symbol\r\n\r\n @property\r\n def high_price(self):\r\n return self.__high\r\n\r\n @property\r\n def open_price(self):\r\n return self.__open\r\n\r\n @property\r\n def close_price(self):\r\n return self.__close\r\n\r\n @property\r\n def low_price(self):\r\n return self.__low\r\n\r\n @close_price.setter\r\n def close_price(self, value):\r\n self.__close = value\r\n # print \"Stock close\"\r\n\r\n @open_price.setter\r\n def open_price(self, value):\r\n self.__open = value\r\n\r\n @high_price.setter\r\n def high_price(self, value):\r\n self.__high = value\r\n\r\n @low_price.setter\r\n def low_price(self, value):\r\n self.__low = value\r\n\r\n\r\nclass Portfolio(Stock):\r\n def __init__(self, symbol, quantity=0, price=0):\r\n self.__inv_price = price\r\n self.__inv_quantity = quantity\r\n self.__inv_return = 0\r\n super(Portfolio, self).__init__(symbol)\r\n\r\n @property\r\n def invested_price(self):\r\n return self.__inv_price\r\n\r\n @property\r\n def invested_quantity(self):\r\n return self.__inv_quantity\r\n\r\n @property\r\n def returns(self):\r\n return self.__inv_return\r\n\r\n @invested_price.setter\r\n def invested_price(self, value):\r\n self.__inv_price = value\r\n\r\n @invested_quantity.setter\r\n def invested_quantity(self, value):\r\n self.__inv_quantity = value\r\n\r\n def close_price(self, value):\r\n if self.__inv_quantity > 0:\r\n self.__inv_return = ((value - (float(self.__inv_price))) / float(self.__inv_price)) * 100\r\n # print self.__inv_price\r\n self.__close = value\r\n\r\n\r\ndef test(pageurl):\r\n res = urllib2.urlopen(pageurl)\r\n src_code = res.read()\r\n src_code = src_code.replace(\"// \", \"\")\r\n\r\n stocks = json.loads(src_code)\r\n\r\n myPortfolioList = []\r\n stock_to_monitor = []\r\n for stock in stocks:\r\n sym = \"\"\r\n if 't' in stock.keys():\r\n sym = stock['t']\r\n myPortfolioStock = Portfolio(symbol=sym)\r\n myStock = Stock(symbol=sym)\r\n # print stock\r\n\r\n if 'l' in stock.keys():\r\n value = stock['l']\r\n myStock.close_price = float(value.replace(\",\",\"\"))\r\n myPortfolioStock.close_price(float(value.replace(\",\",\"\")))\r\n stock_to_monitor.append(myStock)\r\n myPortfolioList.append(myPortfolioStock)\r\n\r\n invested = 0\r\n for item in myPortfolioList:\r\n if 0 < item.invested_price:\r\n print item.symbol + \": \" + str(item.invested_price)\r\n invested += 1\r\n\r\n if 0 == invested:\r\n print \"No investment made yet!!\"\r\n for item in stock_to_monitor:\r\n print item.symbol + \": \" + str(item.close_price)\r\n\r\n\r\ndef data_process(conn):\r\n print \"Processing stock data\"\r\n site_url = site\r\n count = 0\r\n csv_file = None\r\n portfolio = {}\r\n try:\r\n csv_file = open(\"portfolio.csv\", \"rb\")\r\n csv_reader = csv.reader(csv_file, delimiter=' ', quotechar='|')\r\n for row in csv_reader:\r\n for item in row:\r\n columns = item.split(',')\r\n portfolio_stock = Portfolio(columns[0], columns[1], columns[2])\r\n portfolio[columns[0]] = portfolio_stock\r\n if 0 < count:\r\n site_url += \",\"\r\n site_url = site_url + \"NSE%3A\" + columns[0]\r\n count += 1\r\n csv_file.close()\r\n\r\n # print site_url\r\n res = urllib2.urlopen(site_url)\r\n src_code = res.read()\r\n src_code = src_code.replace(\"// \", \"\")\r\n\r\n stocks = json.loads(src_code)\r\n for stock in stocks:\r\n sym = \"\"\r\n if 't' in stock.keys():\r\n sym = stock['t']\r\n if sym in portfolio:\r\n if 'l' in stock.keys():\r\n value = stock['l']\r\n portfolio[sym].close_price(float(value.replace(\",\", \"\")))\r\n for v in portfolio.values():\r\n print v.symbol + \": \" + str(v.returns)\r\n except IOError as e:\r\n print e\r\n except Exception as be:\r\n print \"Some other error occurred:\"+be.message\r\n\r\n print \"End processing stock data\"\r\n\r\n\r\ndef mail_process(conn):\r\n print \"Handle mail processing\"\r\n\r\n\r\ndef processtest():\r\n parent_conn, child_conn = Pipe()\r\n procs = []\r\n p = Process(target=data_process, args=(child_conn,))\r\n p.start()\r\n procs.append(p)\r\n p = Process(target=mail_process, args=(parent_conn,))\r\n p.start()\r\n procs.append(p)\r\n\r\n print(\"Waiting for process child to complete\")\r\n for p in procs:\r\n p.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n queryCount = 0;\r\n l_site = \"\"\r\n for k, v in STOCK.items():\r\n if 0 < queryCount:\r\n l_site = site + \",\"\r\n l_site = l_site + \"NSE%3A\" + k\r\n queryCount += 1\r\n# print site\r\n# test(l_site)\r\n processtest()\r\n" }, { "alpha_fraction": 0.5977859497070312, "alphanum_fraction": 0.5996310114860535, "avg_line_length": 21.12244987487793, "blob_id": "cdb1afd25f7e8a91f4e06c280fff5ae8f9e981be", "content_id": "178f516597018791bb7c4d9a83636eb7f3081f6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 72, "num_lines": 49, "path": "/fa_tool/test_system.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom abc import ABC, abstractmethod\n\n# For relative imports to work in Python 3.6\ntry:\n sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..')\nexcept OSError:\n pass\nfrom fa_tool.test_logs import ErrorLogs, UartLogs, UdsLogs\n\n\nclass TestSystem(ABC):\n\n def __init__(self):\n self.__error_log = None\n self.__uart_log = None\n self.__uds_log = None\n\n @property\n def error_log(self):\n return self.__error_log\n\n @property\n def uart_log(self):\n return self.__uart_log\n\n @property\n def uds_log(self):\n return self.__uds_log\n\n @error_log.setter\n def error_log(self, value):\n if isinstance(value, ErrorLogs):\n self.__error_log = value\n\n @uart_log.setter\n def uart_log(self, value):\n if isinstance(value, UartLogs):\n self.__uart_log = value\n\n @uds_log.setter\n def uds_log(self, value):\n if isinstance(value, UdsLogs):\n self.__uds_log = value\n\n @abstractmethod\n def analyze_failure(self, bb, df, conf):\n pass\n" }, { "alpha_fraction": 0.8035714030265808, "alphanum_fraction": 0.8035714030265808, "avg_line_length": 27, "blob_id": "9e3464a603aadaa55e3eb8b218a065c4281b5ba6", "content_id": "4d4d2117eb03a5be15fee07083c3a92d05951e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/fa_tool/modules/gsheet/__init__.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "from .gsheet import gopen\nfrom .gsheet import get_sheet\n" }, { "alpha_fraction": 0.45416831970214844, "alphanum_fraction": 0.4626629650592804, "avg_line_length": 32.9731559753418, "blob_id": "1c17c2d1f80430cffbba309c2824a075f88c0a14", "content_id": "d703de8d3f012c08c8e0e03e2fc3d556a24117cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5062, "license_type": "no_license", "max_line_length": 86, "num_lines": 149, "path": "/fa_tool/cp_system.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport re as regex\n\nimport pandas as pd\nfrom gspread.exceptions import WorksheetNotFound\n\n# For relative imports to work in Python 3.6\ntry:\n sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/..')\nexcept OSError:\n pass\n\nfrom fa_tool.test_system import TestSystem\nfrom fa_tool.modules.gsheet import gopen, get_sheet\nfrom fa_tool.tool_constant import JOFA_TP_URL, FW_VERSION, rfailed, REV\n\n\ndef get_prev_version(fw):\n # Converting hex string to int\n ifw = int(fw, 16)\n ifw -= 1\n # Returning hex string without 0x\n return '{:x}'.format(ifw)\n\n\nclass CPSystem(TestSystem):\n\n def __init__(self):\n TestSystem.__init__(self)\n self.__sheets = {}\n self.__bb = ''\n\n def __get_old_fw(self, fw, bb):\n if not isinstance(fw, str) \\\n or not isinstance(bb, str):\n return None, None\n\n if fw in self.__sheets:\n df = self.__sheets[fw]\n else:\n gopen(JOFA_TP_URL)\n # print(fw)\n try:\n df = pd.DataFrame(get_sheet(fw).get_all_records())\n self.__sheets[fw] = df\n except WorksheetNotFound:\n self.__sheets[fw] = None\n return None, None\n\n try:\n if df is None:\n return None, None\n\n bb_df = df.loc[df['Feature Set'] == bb]\n\n # Find table header == value\n\n idx = df.loc[df['Feature Set'] == 'Feature Set'].index[0]\n mapped_issues = df.iloc[1:idx][['Model Number Status', 'Failed Log Path']]\n # TODO: trim if any empty row found\n except IndexError:\n bb_df = None\n mapped_issues = None\n except KeyError:\n bb_df = None\n mapped_issues = None\n print('Big Box: {0}, FW: {1}'.format(bb, fw))\n exit(0)\n\n return bb_df, mapped_issues\n\n def __check_in_prev_ver(self, test_name, fwlist, config_type):\n rgfail = regex.compile(rfailed)\n issue_dec = {\n 'config': config_type\n }\n iterations = 10\n\n for fw in fwlist:\n # iterations -= 1\n # fw = get_prev_version(fw)\n\n issue_dec['fw'] = fw\n bb_df, mapped_issues = self.__get_old_fw(fw, self.__bb)\n if bb_df is None or mapped_issues is None:\n iterations += 1\n continue\n\n # find test row\n test_row = bb_df[bb_df['TEST NAME'] == test_name]\n status = test_row[config_type].values\n try:\n # check if test is mapped for config type\n status = status[0]\n matches = rgfail.match(status)\n if matches is not None and matches.group(1) is not None:\n issue_num = int(matches.group(1))\n issue_dec['desc'] = mapped_issues.loc[\n mapped_issues['Model Number Status'] == issue_num\n ]['Failed Log Path'].values[0]\n break\n except IndexError:\n issue_dec['fw'] = ''\n issue_dec['desc'] = ''\n break\n\n return issue_dec\n\n def analyze_failure(self, bb, df, conf):\n self.__bb = bb\n # First analyze sheet first\n fail_trs = df[(df['CONFIG_1'] == 'Fail')\n | (df['CONFIG_2'] == 'Fail')\n | (df['CONFIG_3'] == 'Fail')\n | (df['CONFIG_4'] == 'Fail')\n | (df['CONFIG_5'] == 'Fail')\n | (df['CONFIG_6'] == 'Fail')][['TEST NAME',\n 'CONFIG_1',\n 'CONFIG_2',\n 'CONFIG_3',\n 'CONFIG_4',\n 'CONFIG_5',\n 'CONFIG_6']].values.tolist()\n for routine in fail_trs:\n test = routine[0]\n if 'Fail' == routine[1]:\n desc = self.__check_in_prev_ver(test, conf[REV], 'CONFIG_1')\n routine.append(desc)\n if 'Fail' == routine[2]:\n desc = self.__check_in_prev_ver(test, conf[REV], 'CONFIG_2')\n routine.append(desc)\n if 'Fail' == routine[3]:\n desc = self.__check_in_prev_ver(test, conf[REV], 'CONFIG_3')\n routine.append(desc)\n if 'Fail' == routine[4]:\n desc = self.__check_in_prev_ver(test, conf[REV], 'CONFIG_4')\n routine.append(desc)\n if 'Fail' == routine[5]:\n desc = self.__check_in_prev_ver(test, conf[REV], 'CONFIG_5')\n routine.append(desc)\n\n from pprint import pprint\n pprint(fail_trs)\n # TODO: Now analyze directory\n import time\n time.sleep(3)\n # print(df)\n print(\"Exiting {}...\".format(bb))\n" }, { "alpha_fraction": 0.6873786449432373, "alphanum_fraction": 0.6970874071121216, "avg_line_length": 29.294116973876953, "blob_id": "8bfb67b5c11acc6959325b7117784bdf43164a27", "content_id": "1d6c4bd40179ab72ca9d6d6411110c5be4eb8584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 104, "num_lines": 17, "path": "/fa_tool/tool_constant.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "# CONSTANTS\nBIG_BOX = 'big_box'\nFW_VERSION = 'fw'\nREV = 'fw_rev_pattern'\nBETA_REV = 'fw_beta_rev_pattern'\nSYSTEMS = 'system_type'\nCONFS = 'configurations'\nLOG_PATH = 'log_path'\nLOG_EXT = 'log_extensions'\nSBM_LOC = 'sbm_path'\nREPORT_TEMPLATE = 'report_template_path'\nWB_TITLE = 'Jofa Test Plans'\nJOFA_TP_URL = 'https://docs.google.com/spreadsheets/d/1yOlgw7hmgHU9dKBUWjX9OohoVFpZ4NuJJfnY-gogazI/edit'\nTOOL_CONF_XML = \"./fa_tool_config.xml\"\nTOOL_CONF_XSD = \"./fa_tool_config.xsd\"\n\nrfailed = r\"Fail(?:ed\\s\\((\\d+)\\))?\"\n" }, { "alpha_fraction": 0.5776677131652832, "alphanum_fraction": 0.5805943012237549, "avg_line_length": 30.188405990600586, "blob_id": "e13dc1614e057310a2a3c4230fa8d83bc01155c2", "content_id": "c9b8cfe9114a1a887e75b73477ff8bff7c97eac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4442, "license_type": "no_license", "max_line_length": 103, "num_lines": 138, "path": "/oAuth/util.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "\"\"\"\r\nUtility for package which\r\n\"\"\"\r\nimport requests\r\nimport json\r\nimport webbrowser\r\ntry:\r\n # Python 2.x.x\r\n import ConfigParser as cp\r\nexcept ImportError:\r\n # Python 3.x.x\r\n import configparser as cp\r\n\r\ntry:\r\n import urllib.parse as parse\r\nexcept ImportError:\r\n import urllib as parse\r\n\r\nclass Configuration(object):\r\n def __init__(self, configfile):\r\n \"\"\"Constructor\r\n :param configfile: path/to/configfile\r\n \"\"\"\r\n self.__parser = self.__getparser(configfile)\r\n self.__configfile = configfile\r\n\r\n def addattributeinconfig(self, section, attribute, value):\r\n \"\"\"Adds attribute to config file.\r\n :param section: section in config file\r\n :param attribute: attribute under section\r\n :param value: value of attribute\r\n :return: None\r\n \"\"\"\r\n parser = self.__parser\r\n parser.set(section, attribute, value)\r\n with open(self.__configfile, 'wb+') as configfile:\r\n parser.write(configfile)\r\n\r\n def getattributesof(self, section):\r\n \"\"\"Retrieve attribute from config file.\r\n :param section: section in config file\r\n :return: all attribute of given section in dict form\r\n \"\"\"\r\n attributes = {}\r\n parser = self.__parser\r\n if parser.has_section(section):\r\n for attribute in parser.items(section):\r\n if type(attribute) is tuple:\r\n attributes[attribute[0]] = attribute[1]\r\n else:\r\n attributes[attribute] = parser.get(section, attribute)\r\n return attributes\r\n\r\n \"\"\" Private Memeber \"\"\"\r\n def __getparser(self, configfile):\r\n \"\"\"Private function for getting parser object\r\n :param configfile: path/to/config.file\r\n :return: parser of config file\r\n \"\"\"\r\n parser = cp.ConfigParser()\r\n if configfile is not None:\r\n parser.read(configfile)\r\n\r\n return parser\r\n\r\n\r\nclass UrlFormatter(object):\r\n @staticmethod\r\n def encodeinurl(params, encoding='utf-8'):\r\n \"\"\"Encodes params in url encode. Default encoding\r\n type is utf-8\r\n :param params: parameter with url\r\n :param encoding: encoding type\r\n :returns encoded parameter\"\"\"\r\n try:\r\n urlenc = parse.urlencode(params).encode(encoding)\r\n print (urlenc)\r\n except AttributeError:\r\n pass\r\n\r\n return urlenc\r\n\r\n @staticmethod\r\n def urlEscape(text, safe='/'):\r\n # See OAUTH 5.1 for a definition of which characters need to be escaped. safe='~-._'\r\n urlesc = parse.quote(text, safe)\r\n return urlesc\r\n\r\n @staticmethod\r\n def urlUnescape(self, text):\r\n # See OAUTH 5.1 for a definition of which characters need to be escaped.\r\n urlunesc = parse.unquote(text)\r\n return urlunesc\r\n\r\n @staticmethod\r\n def formatUrlParams(params):\r\n \"\"\"Formats parameters into a URL query string.\r\n Args:\r\n params: A key-value map.\r\n Returns:\r\n A URL query string version of the given parameters.\r\n \"\"\"\r\n param_fragments = []\r\n for param in sorted(params.items(), key=lambda x: x[0]):\r\n param_fragments.append('%s=%s' % (param[0], UrlFormatter.urlEscape(param[1], safe='~-._')))\r\n return '&'.join(param_fragments)\r\n\r\n\r\nclass UrlRequest(object):\r\n \"\"\"UrlRequest class sends async request\"\"\"\r\n @staticmethod\r\n def sendRequest(method, url, header=None, param=None):\r\n \"\"\"\r\n :param method: Type of method of request\r\n :param url: url where request has to be made\r\n :param header: request header if any\r\n :param param: parameter with request\r\n :return: returns response\r\n \"\"\"\r\n response = None\r\n if method is \"GET\":\r\n try:\r\n response = requests.get(url, param, headers=header).text\r\n except requests.HTTPError as e:\r\n response = json.dumps(e.message)\r\n elif method is \"POST\":\r\n try:\r\n response = requests.post(url, param, headers=header).text\r\n except requests.HTTPError as e:\r\n response = json.dumps(e.message)\r\n\r\n return response\r\n\r\n @staticmethod\r\n def openRequest(url):\r\n \"\"\"Open url in default browser\"\"\"\r\n browser = webbrowser.get()\r\n browser.open_new_tab(url)\r\n" }, { "alpha_fraction": 0.5590550899505615, "alphanum_fraction": 0.5590550899505615, "avg_line_length": 12.607142448425293, "blob_id": "43c8b12b71aaf8766d66250f761d24976b2f887e", "content_id": "628d8b76aa917a25b8cb27c9e1a6a5113230d052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 35, "num_lines": 28, "path": "/fa_tool/test_logs.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "from abc import ABC, abstractmethod\n\n\nclass ErrorLogs(ABC):\n def __init__(self):\n pass\n\n @abstractmethod\n def parse(self):\n pass\n\n\nclass UartLogs(ABC):\n def __init__(self):\n pass\n\n @abstractmethod\n def parse(self):\n pass\n\n\nclass UdsLogs(ABC):\n def __init__(self):\n pass\n\n @abstractmethod\n def parse(self):\n pass\n" }, { "alpha_fraction": 0.7424749135971069, "alphanum_fraction": 0.7591972947120667, "avg_line_length": 28.899999618530273, "blob_id": "52d3b00969498d4ad9bbd0797d2b800449e22a3d", "content_id": "e4ed652e4be71b912b78d21125006b053ba6a048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 299, "license_type": "no_license", "max_line_length": 193, "num_lines": 10, "path": "/oAuth/ReadMe.md", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "# Generic OAUTH2 \n### Introduction\nOAuth is authorization framework which allow user to use limited and secure way. This module takes care of oauth process so that you can create application without worrying about authorization.\n\n### How to use\n<TBD>\n\n### Versions\n#### 0.00.1\nGmail Oauth is added.\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 9, "blob_id": "b40d40134b416058e43e160af2f817a27dbd6ec4", "content_id": "9ed0f87b5a89bd1e2dd5bd4026d5d163ab682a4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10, "license_type": "no_license", "max_line_length": 9, "num_lines": 1, "path": "/fa_tool/readme.md", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "# FA Tool\n" }, { "alpha_fraction": 0.8392857313156128, "alphanum_fraction": 0.8392857313156128, "avg_line_length": 55, "blob_id": "86f9d323706cad52afaff0d2ed5e46280b8f5fc9", "content_id": "af70c99e571a478d592b7b79ea6d10550739d7b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 55, "num_lines": 1, "path": "/fa_tool/modules/pyxml/__init__.py", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "from .xmlreader import validateread, validate, getvalue\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 15, "blob_id": "69cedcbaa3814526074a1480b772040b66d8f15e", "content_id": "515fb685fc41868846d28fdb8f6a2f219ac2020d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 48, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/fa_tool/modules/readme.md", "repo_name": "dakinfosystems/python-pet-project", "src_encoding": "UTF-8", "text": "## Modules\n* Name of the module\n * great dan\n" } ]
19
cebarnes/SoftwareDesign
https://github.com/cebarnes/SoftwareDesign
eaf4a94d3efe8b3434c67c13c2b730060ffb7c40
590146236e94c1198258a350d26304caececec84
c2055bc20e35531b29a90edbb789092eb1fe2f98
refs/heads/master
2021-01-17T01:59:21.893057
2013-12-06T07:13:46
2013-12-06T07:13:46
13,083,018
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5545454621315002, "alphanum_fraction": 0.5863636136054993, "avg_line_length": 14.785714149475098, "blob_id": "2cbf44d30d347641479a98ba808d8c738392af86", "content_id": "63458088bf9c3d5e468eb51da0f4cbb7ba81f297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/HW3/polygon.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from swampy.TurtleWorld import *\n\ndef polygon(t, length, n):\n for i in range(n):\n fd(t, length)\n lt(t,360/n)\n print t\n wait_for_user()\n\n \nworld = TurtleWorld()\nbob = Turtle()\n\npolygon(bob,50,12)" }, { "alpha_fraction": 0.550000011920929, "alphanum_fraction": 0.5694444179534912, "avg_line_length": 24.785715103149414, "blob_id": "3cfb67da9fb2168b2c02d383faa9ac33289d6eba", "content_id": "e0e8ac969ef11de7748d5f3ba7b66a337deeb04e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "no_license", "max_line_length": 80, "num_lines": 14, "path": "/HW6/cartalk1.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def triple_double_letters():\n fin = open('words.txt')\n for word in fin:\n if check_triple(word):\n print word\n\ndef check_triple(word):\n if len(word)<6:\n return False\n for i in range(len(word)-6):\n if word[i]==word[i+1] and word[i+2]==word[i+3] and word[i+4]==word[i+5]:\n return True\n\ntriple_double_letters()" }, { "alpha_fraction": 0.5716535449028015, "alphanum_fraction": 0.5748031735420227, "avg_line_length": 18.272727966308594, "blob_id": "d667390c84f33c6bf8ec6619fd63a38b9b3e778c", "content_id": "d9b5cbce51c3cae71229fc8f235e36b14ce5313b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 65, "num_lines": 33, "path": "/HW8/analyze_book.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "import string\n\ndef process_file(fin,header):\n hist = {}\n\n if header:\n skip_header(fin)\n\n for line in fin:\n process_line(line,hist)\n\n return hist\n\ndef skip_header(fin):\n for line in fin:\n if line.startswith('[Illustration:]'):\n break\n\ndef process_line(line,hist):\n line = line.replace('-',' ')\n\n for word in line.split():\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n\n hist[word] = hist.get(word,0) + 1\n\ndef main():\n fin = file('fairies_and_folk.txt')\n print process_file(fin, True)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5823863744735718, "alphanum_fraction": 0.5823863744735718, "avg_line_length": 15.7619047164917, "blob_id": "cd1cd0cd72fae7dfe58676dbae2acb3d5b8f477c", "content_id": "35ec3c58fe2da5f8d53607d395c0040e78c14c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/HW4/test_palindrome.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "\"\"\"Test code for palindrome.py.\n\nAuthor: Allen B. Downey\n\"\"\"\n\nfrom palindrome import is_palindrome\n\n\ndef main():\n for line in open('words.txt'):\n\n # remove whitespace from the beginning and end\n word = line.strip()\n\n # only print palindromes\n if is_palindrome(word):\n print word\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.4413619041442871, "alphanum_fraction": 0.44766709208488464, "avg_line_length": 27.35714340209961, "blob_id": "705cdac3363459c88a40ea735d3eaceb7a672644", "content_id": "f212dbb7cca2be0d07a6e03e4e529120f486d6bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 70, "num_lines": 28, "path": "/HW5/rotate.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def rotate_word(word, n):\n \"\"\" \n This function takes a string, word, and rotates it by a number, n.\n \"\"\"\n rotated = ''\n first = ord('a')\n last = ord('z')\n for letter in word:\n number = ord(letter)\n if n >= 0:\n if number < (last - n):\n number = number + n\n letter = chr(number)\n else:\n number = first+(n-(last-ord(letter))) - 1\n letter = chr(number)\n if n < 0:\n if number > (first - n):\n number = number + n\n letter = chr(number)\n else:\n number = last + (n + (number - first)) +1\n letter = chr(number)\n rotated += letter\n return rotated\n\nword = \"cheer\"\nprint rotate_word(word,7)" }, { "alpha_fraction": 0.5075757503509521, "alphanum_fraction": 0.5568181872367859, "avg_line_length": 12.947368621826172, "blob_id": "f043a383b0640d0a0b087f1b6572efd28b8b8b12", "content_id": "e046a62b6a40c920870247dbca85747630a88dc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 32, "num_lines": 19, "path": "/HW4/dragon.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from swampy.TurtleWorld import *\n\ndef dragon(t,n):\n m = n/\n dragon(t,m)\n lt(t,90)\n dragon(t,m)\n rt(t,90)\n dragon(t,m)\n lt(t,90)\n dragon(t,m)\n rt(t,90)\n dragon(t,m)\n\nworld = TurtleWorld()\nbob = Turtle()\nbob.delay = .01\n\ndragon(bob,100)" }, { "alpha_fraction": 0.5680473446846008, "alphanum_fraction": 0.5739644765853882, "avg_line_length": 21.04347801208496, "blob_id": "abf6d19cd6793dae5271a823db190eabbba10025", "content_id": "9025df94ebe99ccb9555f63fa46c4d2098900780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 40, "num_lines": 23, "path": "/HW6/reverse_pair.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from inlist import*\n\ndef reverse_pair(word_list,word):\n rev_word = word[::-1]\n return in_bisect(word_list,rev_word)\n\n\n for word in fin:\n if word[::-1] in d.keys():\n rev_words.append(word)\n return rev_words\n\ndef make_word_list():\n stuff = list(open('words.txt'))\n fin = map(str.strip, stuff)\n return fin\n\nif __name__ == '__main__':\n word_list = make_word_list()\n\n for word in word_list:\n if reverse_pair(word_list,word):\n print word, word[::-1]\n" }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6476190686225891, "avg_line_length": 14, "blob_id": "0401c84d885d7e31c6b7c8373d5c07d41c869e31", "content_id": "f21099da50d265a17b27fcfd532b1be78abf1fbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/HW3/ex3_3.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def right_justify(s):\n\tlength = len(s)\n\tspace = 70-length\n\n\tprint ' '*space + s\n\nright_justify('Claire')\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 26, "blob_id": "6057da8f744cf503ccb4bbb662f15dd2194b4397", "content_id": "23c604020c66a81590874a4bc4bfb1cb0e328d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 108, "license_type": "no_license", "max_line_length": 76, "num_lines": 4, "path": "/README.md", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "SoftwareDesign\n==============\n\nDirectories and unit tests for exercises in Software Design at Olin College.\n" }, { "alpha_fraction": 0.5366300344467163, "alphanum_fraction": 0.5824176073074341, "avg_line_length": 22.65217399597168, "blob_id": "04015c7ad012315b4fe705b4ca5307e5d746f450", "content_id": "b86625e32d44a44b92664055280566041946c188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/HW6/cartalk2.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def has_palindrome(number,start,length):\n \"\"\"\n Takes an integer and checks to see if it has a palindrome\n \"\"\"\n word = str(number)[start:start+length]\n return word[::-1]==word\n\ndef check_i(i):\n return (has_palindrome(i,2,4) and\n has_palindrome(i+1,1,5) and\n has_palindrome(i+2,1,4) and\n has_palindrome(i+3,0,6))\n\ndef check_all_numbers():\n answers = []\n i = 100000\n while i<=999999:\n if check_i(i):\n answers.append(i)\n i = i+1\n return answers\n\nprint check_all_numbers()\n\n\n" }, { "alpha_fraction": 0.5797752737998962, "alphanum_fraction": 0.5932584404945374, "avg_line_length": 22.36842155456543, "blob_id": "9fdc530332f2bb9facd039a2b534eddb501e5980", "content_id": "95892365725e00783939021a0d93b8171ca77e7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 68, "num_lines": 19, "path": "/HW6/interlock.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from inlist import*\n\ndef interlock(word_list,word):\n evens = word[::2]\n odds = word[1::2]\n\n return in_bisect(word_list, evens) and in_bisect(word_list,odds)\n\ndef make_word_list():\n stuff = list(open('words.txt'))\n fin = map(str.strip, stuff)\n return fin\n\nif __name__ == '__main__':\n word_list = make_word_list()\n\n for word in word_list:\n if interlock(word_list,word):\n print word, word[::2],word[1::2]\n\n" }, { "alpha_fraction": 0.574923574924469, "alphanum_fraction": 0.5902140736579895, "avg_line_length": 13.909090995788574, "blob_id": "8e58759237847c6f1912e54fc64054b4fe537bc1", "content_id": "1f2193eb3eba25437fcec00ec916338ef3d0d1f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 33, "num_lines": 22, "path": "/HW3/ex3_5.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def first_line(x):\n\tfor i in range(x):\n\t\tprint '+',\n\t\tprint '-'*4,\n\tprint '+'\n\ndef second_line(x):\n\tfor i in range(4):\n\t\tfor i in range(x):\n\t\t\tprint '|',\n\t\t\tprint ' '*4,\n\t\tprint '|'\n\n\ndef draw_grid(num_col, num_rows):\n\tfor i in range(num_rows):\n\t\tfirst_line(num_col)\n\t\tsecond_line(num_col)\n\tfirst_line(num_col)\n\n\ndraw_grid(4,4)" }, { "alpha_fraction": 0.4320000112056732, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 21.117647171020508, "blob_id": "66c080fd6bcf4ff97cee2e112b0f81666e1f0d75", "content_id": "b7cdc79d58bf31873971acc883263e939bfd76c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 91, "num_lines": 17, "path": "/HW5/estimate_pi.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from math import *\n\ndef estimate_pi():\n epsilon = 0\n k = 0\n stuff = 1\n\n while stuff > 1e-15:\n stuff =(factorial(4.0*k)*(1103.0+26390.0*k))/((factorial(k)**4.0)*(396**(4.0*k))) \n epsilon = epsilon + stuff\n k = k+1\n\n pii = 1.0/((2.0*sqrt(2.0)/(9801.0))*epsilon)\n return pii\n\nprint 'pi estimation =', estimate_pi()\nprint 'math.pi =', pi" }, { "alpha_fraction": 0.4554455578327179, "alphanum_fraction": 0.5099009871482849, "avg_line_length": 12.5, "blob_id": "c1a3ac13016f7ce8433212b267c910b2d297e569", "content_id": "96ab65f32046c53679c6bf31b9e481ede8642df0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 32, "num_lines": 30, "path": "/HW4/koch.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from swampy.TurtleWorld import *\n\n\ndef koch(t,n):\n if n<3:\n fd(t,n)\n return\n m = n/3.0\n print m\n koch(t,m)\n lt(t,60)\n koch(t,m)\n rt(t,120)\n koch(t,m)\n lt(t,60)\n koch(t,m)\n \n\ndef snowflake(t,n):\n koch(t,n)\n rt(t,120)\n koch(t,n)\n rt(t,120)\n koch(t,n)\n wait_for_user()\n\nworld = TurtleWorld()\nbob = Turtle()\nbob.delay = 0.01\nsnowflake(bob,333)" }, { "alpha_fraction": 0.6145339608192444, "alphanum_fraction": 0.6240126490592957, "avg_line_length": 14.824999809265137, "blob_id": "64f9ca1289b95f8cb8779ac8e877de167873aeee", "content_id": "9004bc0e4508042dbd0ea3488ef75240b3d2fed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 62, "num_lines": 40, "path": "/HW4/palindrome.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "\"\"\"Module that provides is_palindrome.\n\nAuthor of is_palindrome: you\n\"\"\"\n\ndef first(word):\n \"\"\"Returns the first character of a word.\n\n word: string\n\n returns: length 1 string\n \"\"\"\n return word[0]\n\n\ndef last(word):\n \"\"\"Returns the first character of a word.\n\n word: string\n\n returns: length 1 string\n \"\"\"\n return word[-1]\n\n\ndef middle(word):\n \"\"\"Returns all but the first and last character of a word.\n\n word: string\n\n returns: string\n \"\"\"\n return word[1:-1]\n\n\ndef is_palindrome(word):\n \"\"\"Write a good Docstring here.\"\"\"\n\n # TODO: fill in the body of this function\n return True\n" }, { "alpha_fraction": 0.5545918345451355, "alphanum_fraction": 0.5566326379776001, "avg_line_length": 21.033708572387695, "blob_id": "63b08f608b470903befb2b163a3f8a286c0ee539", "content_id": "a405e98053e42caaf70e9ca5760e3213135f776c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1960, "license_type": "no_license", "max_line_length": 57, "num_lines": 89, "path": "/HW7/anagram_sets.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def make_word_dict():\n t = {}\n fin = open('words.txt')\n for line in fin:\n word = line.strip().lower()\n key = tuple(sorted(tuple(word)))\n if key in t:\n pass\n else:\n t[key] = []\n return t\n\ndef make_word_list():\n t = []\n fin = open('words.txt')\n for line in fin:\n word = line.strip().lower()\n t.append(word)\n return t\n\n\ndef assign_words_to_letters(word_list,letters):\n d = letters\n for word in word_list:\n t = tuple(sorted(tuple(word)))\n d[t].append(word)\n return d\n\ndef anagram_sets(d):\n anagrams = {}\n for key in d.keys():\n if len(d[key])>1:\n anagrams[key]=d[key]\n else:\n pass\n return anagrams\n\ndef largest_sets(anagrams):\n gram = []\n for val in anagrams.values():\n gram.append((len(val),val))\n gram.sort(reverse=True)\n\n sort_gram = []\n for length,value in gram:\n sort_gram.append(value)\n return sort_gram\n\ndef maxLen(t):\n lengths = []\n for value in t:\n lengths.append(len(value))\n biggest = max(lengths)\n return biggest\n\ndef largest_bingo(anagrams):\n gram = []\n for val in anagrams.values():\n gram.append((len(val),val))\n gram.sort(reverse=True)\n\n sort_gram = []\n largest_bingo = []\n for length,value in gram:\n sort_gram.append(value)\n for value in sort_gram:\n if len(value)==maxLen(sort_gram):\n largest_bingo.append(value)\n t = largest_bingo[0]\n letters = tuple(t[0])\n return sorted(letters)\n\ndef bingo(anagrams):\n bingo = {}\n for key in anagrams.keys():\n if len(key)==8:\n bingo[key] = anagrams[key]\n return bingo\n\n\nif __name__ == '__main__':\n letters = make_word_dict()\n word_list = make_word_list()\n anagrams = assign_words_to_letters(word_list,letters)\n grams = anagram_sets(anagrams)\n bing = bingo(grams)\n \n \n print largest_bingo(bing)" }, { "alpha_fraction": 0.47078463435173035, "alphanum_fraction": 0.4766277074813843, "avg_line_length": 25.065217971801758, "blob_id": "adb07d7b11d4eb7ac3534c93b61f50576d4014c4", "content_id": "154a8ae81bd14d290237e10007c503f19f52e3b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1198, "license_type": "no_license", "max_line_length": 70, "num_lines": 46, "path": "/HW7/rotate_pairs.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def make_word_dict():\n d = {}\n fin = open('words.txt')\n for line in fin:\n word = line.strip().lower()\n d[word]=word\n return d\n\ndef rotate_pairs(word_dict,word):\n for i in range(1,14):\n rotated = rotate_word(word,i)\n if rotated in word_dict:\n print word, i, rotated\n\n\ndef rotate_word(word, n):\n \"\"\" \n This function takes a string, word, and rotates it by a number, n.\n \"\"\"\n rotated = ''\n first = ord('a')\n last = ord('z')\n for letter in word:\n number = ord(letter)\n if n >= 0:\n if number < (last - n):\n number = number + n\n letter = chr(number)\n else:\n number = first+(n-(last-ord(letter))) - 1\n letter = chr(number)\n if n < 0:\n if number > (first - n):\n number = number + n\n letter = chr(number)\n else:\n number = last + (n + (number - first)) +1\n letter = chr(number)\n rotated += letter\n return rotated\n\nif __name__ == '__main__':\n word_dict = make_word_dict()\n\n for word in word_dict:\n rotate_pairs(word_dict,word)" }, { "alpha_fraction": 0.41534990072250366, "alphanum_fraction": 0.4740406274795532, "avg_line_length": 19.136363983154297, "blob_id": "df310e6356116fd70c2612f6d06ed52ea031c0a4", "content_id": "bba9bf246c3bea2b1f7b997c1fed4a7fb2af78e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 50, "num_lines": 22, "path": "/HW8/13_5.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "import random\n\ndef histogram(t):\n d = {}\n for c in t:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n return d\n\ndef choose_from_hist(hist):\n t = []\n for key in hist.keys():\n for i in range(hist[key]):\n t.append(key)\n return random.choice(t)\n\nif __name__ == '__main__':\n t1 = [5,5,7,3,4,5,6,7,8,2,3,4,1,3,6,7,8,9,2,0]\n d1 = histogram(t1)\n print choose_from_hist(d1)\n" }, { "alpha_fraction": 0.5058139562606812, "alphanum_fraction": 0.5127906799316406, "avg_line_length": 18.133333206176758, "blob_id": "6049395045a0b820fd6817bcb839eef144096808", "content_id": "4259089f457f6d4226857c85d9e9f86595797e3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "no_license", "max_line_length": 65, "num_lines": 45, "path": "/HW8/markov.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "import string\n\ndef process_file(fin,header):\n t = []\n\n if header:\n skip_header(fin)\n\n for line in fin:\n process_line(line,t)\n\n return t\n\ndef skip_header(fin):\n for line in fin:\n if line.startswith('[Illustration:]'):\n break\n\ndef process_line(line,t):\n line = line.replace('-',' ')\n\n for word in line.split():\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n\n t.append(word)\n\ndef name(t, pre_len):\n d = {}\n for i in list(range(len(t)))[::pre_len]:\n f = tuple(t[i:i+pre_len])\n if f[:-1] in d.keys():\n d[f[:-1]].extend(f[-1])\n else:\n d[f[:-1]] = [f[-1]]\n\n return d\n\ndef main():\n fin = file('fairies_short.txt')\n t = process_file(fin, True)\n print name(t,3)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6066666841506958, "alphanum_fraction": 0.6066666841506958, "avg_line_length": 9.785714149475098, "blob_id": "709911c2af376973de152a2ca00f79b6d90008b2", "content_id": "ab30b9fd5d5055a6ea943d57c4b6a54fb37e4d91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "no_license", "max_line_length": 28, "num_lines": 14, "path": "/HW3/ex3_4.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "def do_twice(f,s):\n\tf(s)\n\tf(s)\n\ndef print_twice(x):\n\tprint x\n\tprint x\n\ndef do_four(g, y):\n\tdo_twice(g,y)\n\tdo_twice(g,y)\n\n\ndo_four(print_twice, 'spam')" }, { "alpha_fraction": 0.4881209433078766, "alphanum_fraction": 0.4881209433078766, "avg_line_length": 22.200000762939453, "blob_id": "79d373fa55f763c088d39760778eadf13b373d4f", "content_id": "b21e8377b65fdd5c5cda71ce040dcdb7320eb037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 46, "num_lines": 20, "path": "/HW8/13_1.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "# res = []\n# import string\n# fin = open('fairies_short.txt')\n\n# for line in fin:\n# words = line.rsplit(' ')\n# for word in words:\n# word = word.strip()\n# for char in word:\n# if char in string.punctuation:\n# word = word.replace(char,'')\n# if word =='':\n# pass\n# else:\n# res.append(word.lower())\n \n# print res\n\nfin = open('fairies_short.txt')\nprint fin.readline()" }, { "alpha_fraction": 0.5667396187782288, "alphanum_fraction": 0.5696572065353394, "avg_line_length": 20.746030807495117, "blob_id": "a41fe4b7922dcae190ebaf2dad6c296445d6e366", "content_id": "c6cc32b36c830de8c83f6401a06b00a90df6942d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1371, "license_type": "no_license", "max_line_length": 47, "num_lines": 63, "path": "/HW7/reducible.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from inlist import * \n\ndef make_word_list():\n stuff = list(open('words.txt'))\n fin = map(str.strip, stuff)\n return fin\n\ndef find_children(word_list,word):\n children = []\n for i in range(len(word)):\n child = word[:i]+word[i+1:]\n if in_bisect(word_list,child):\n children.append(child)\n return children\n\nmemo = {}\nmemo[''] = ['']\n\ndef reducible(word_list,word):\n if word in memo:\n return memo[word]\n\n res = []\n for child in find_children(word_list,word):\n t = reducible(word_list,child)\n if t:\n res.append(child)\n\n memo[word] = res\n return res\n\ndef all_reducible(word_list):\n res = []\n for word in word_list:\n t = reducible(word_list,word)\n if t != []:\n res.append(word)\n return res \n\ndef print_trail(word_list,word):\n if len(word)==1:\n print word\n return\n print word,\n t = reducible(word_list,word)\n print_trail(word_list,t[0])\n\ndef find_longest(word_list):\n reducibles = all_reducible(word_list)\n longest = []\n for word in reducibles:\n longest.append((len(word),word))\n longest.sort(reverse=True)\n\n for length,word in longest[:5]:\n print '\\n'\n print_trail(word_list,word)\n print '\\n'\n \n\nif __name__ == '__main__':\n word_list = make_word_list()\n find_longest(word_list)\n\n" }, { "alpha_fraction": 0.5532212853431702, "alphanum_fraction": 0.5700280070304871, "avg_line_length": 17.789474487304688, "blob_id": "2eb04d392324906ad96dcbc013fdb9e0dc8febf5", "content_id": "7c1000dc1c7138b33f50751887e69d4abec08539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 50, "num_lines": 38, "path": "/HW8/analyze_book3.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "import random\nfrom bisect import bisect_left\n\ndef create_freq(d):\n t = []\n freq = [0]\n i = 1\n for key in d.keys():\n t.append(key)\n freq.append(d[key]+freq[i-1])\n i += 1\n freq.remove(0)\n \n return freq,t\n\ndef choose_num(freq):\n return random.randint(1,freq[-1])\n\ndef find_index(freq,num):\n return bisect_left(freq,num)\n\ndef choose_word(word_list, i):\n return word_list[i]\n\ndef main():\n d = {'poop':17, 'merp':5, 'larp':7, 'frump':2}\n\n freq, word_list = create_freq(d)\n print freq\n print word_list\n num = choose_num(freq)\n print num\n i = find_index(freq,num)\n print i\n print choose_word(word_list,i)\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5536791086196899, "alphanum_fraction": 0.5705669522285461, "avg_line_length": 20.842105865478516, "blob_id": "aa3118bd4c87592d367b88e6687904dfa46db048", "content_id": "83b14ca1d4a7e9eb224f70849ed2054d2939670d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "no_license", "max_line_length": 50, "num_lines": 38, "path": "/HW7/homophone.py", "repo_name": "cebarnes/SoftwareDesign", "src_encoding": "UTF-8", "text": "from pronounce import*\n\ndef make_words_dict():\n d = {}\n fin = open('words.txt')\n for line in fin:\n word = line.strip().lower()\n d[word]=word\n return d\n\ndef homophone(a,b,pro):\n if a not in pro or b not in pro:\n return False\n \n return pro[a]==pro[b]\n\ndef check(word_dict,word,pro):\n word1 = word[1:]\n if word1 not in word_dict:\n return False\n if not homophone(word,word1,pro):\n return False\n\n word2 = word[0]+word[2:]\n if word2 not in word_dict:\n return False\n if not homophone(word,word2,pro):\n return False\n\n return True\n\nif __name__ == '__main__':\n pro = read_dictionary('c06d.txt')\n word_dict = make_words_dict()\n\n for word in word_dict:\n if check(word_dict,word,pro):\n print word, word[1:], word[0]+word[2:]" } ]
24
sun123qiang123/olevba_analyzer
https://github.com/sun123qiang123/olevba_analyzer
672e2caa5b4b87a35cf61c23fa8d1a387cd8c8dc
ea21ffc51285715da778fe22ab08e7558ee2a3b5
069eb78cd15a8d4f7c22cbef76aca893b262ef4e
refs/heads/master
2020-03-06T16:55:19.978632
2017-08-11T14:01:01
2017-08-11T14:01:01
126,981,444
1
0
null
2018-03-27T12:15:55
2017-09-14T18:41:16
2017-08-11T14:01:44
null
[ { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.7954545617103577, "avg_line_length": 13.666666984558105, "blob_id": "f870cb3202825e2b7852db350eb190ce61c97a55", "content_id": "a977bc22214ae6cc96ae95697b8d114bef3fa0f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/requirements.txt", "repo_name": "sun123qiang123/olevba_analyzer", "src_encoding": "UTF-8", "text": "oletools==0.46\ncoloredlogs==7.0\npyinstaller\n" }, { "alpha_fraction": 0.6158382296562195, "alphanum_fraction": 0.6200505495071411, "avg_line_length": 39.011234283447266, "blob_id": "e7edee5c1d5915e45d63a8b96b298f6058fd61e9", "content_id": "2361902c8f94b40aefce6a2514897c49faf6cd27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3561, "license_type": "no_license", "max_line_length": 115, "num_lines": 89, "path": "/olevba_analyzer/main.py", "repo_name": "sun123qiang123/olevba_analyzer", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport shutil\nimport logging\nimport hashlib\nimport coloredlogs\nfrom oletools import olevba\nfrom argparse import ArgumentParser\n\nlevel_styles = {'info': {'color': 'green'},\n 'warning': {'color': 'yellow'},\n 'debug': {'color': 'blue', 'bold': True},\n 'critical': {'color': 'red', 'bold': True}}\n\nlogging.basicConfig(level=logging.INFO)\ncoloredlogs.install(level='DEBUG', fmt=' %(message)s', level_styles=level_styles)\n\n\nclass OleVbaAnalyzer:\n def __init__(self, file_path):\n self.file_path = file_path\n self.separator = '\\n%s\\n' % ('-' * 30)\n\n with open(self.file_path, 'rb') as f:\n sha256 = hashlib.sha256(f.read()).hexdigest()\n self.analysis_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), sha256)\n\n if os.path.isdir(self.analysis_path):\n shutil.rmtree(self.analysis_path, ignore_errors=True)\n os.mkdir(self.analysis_path)\n\n self.make_analysis()\n\n def make_analysis(self):\n \"\"\"\n Function that make the file analysis\n :return:\n \"\"\"\n logging.debug('\\n\\n%s' % self.separator)\n ole_parser = olevba.VBA_Parser(self.file_path)\n if not ole_parser.detect_vba_macros():\n return logging.info('No VBA Macros were found in this file')\n logging.critical('VBA Macros found')\n\n # Extracts the macro into analysis path\n for filename, stream_path, vba_filename, vba_code in ole_parser.extract_macros():\n logging.debug(self.separator)\n logging.info('OLE stream : %s' % stream_path)\n logging.info('VBA filename : %s' % vba_filename)\n\n tp = os.path.join(self.analysis_path, vba_filename)\n with open(tp, 'w') as f:\n f.write(vba_code)\n\n logging.warning('\\nSaved in: \"%s\"\\n' % tp)\n\n # Analyze all macros\n logging.debug(self.separator)\n logging.critical('Keywords: \\n')\n for kw_type, keyword, description in ole_parser.analyze_macros():\n logging.warning('Type: %s' % kw_type)\n logging.info('Keyword: %s\\nDescription: %s\\n' % (keyword, description))\n\n logging.debug(self.separator)\n logging.critical('Analysis: \\n')\n logging.warning('VBA obfuscated strings: %d' % ole_parser.nb_vbastrings)\n logging.warning('IOCs: %d' % ole_parser.nb_iocs)\n logging.warning('AutoExec keywords: %d' % ole_parser.nb_autoexec)\n logging.warning('Suspicious keywords: %d' % ole_parser.nb_suspicious)\n logging.warning('Hex obfuscated strings: %d' % ole_parser.nb_hexstrings)\n logging.warning('Base64 obfuscated strings: %d' % ole_parser.nb_base64strings)\n logging.warning('Dridex obfuscated strings: %d' % ole_parser.nb_dridexstrings)\n\nif __name__ == '__main__':\n\n parser = ArgumentParser(description='OLE VBA Analyzer', epilog='Example: \\n OleVbaAnalyzer.exe -f \"bla.doc\"')\n parser.add_argument('-f', '--file', help='File path', type=str, required=False, default=False)\n parser.add_argument('-v', '--version', help='Show version', action='store_true', required=False, default=False)\n args = parser.parse_args()\n if args.version:\n from olevba_analyzer.__version__ import __version__\n\n logging.info('OLE VBA Analyzer version: {}'.format(__version__))\n sys.exit()\n\n elif args.file:\n analyzer = OleVbaAnalyzer(args.file)\n else:\n logging.info('Please, check the Help to know how to use this code :)')\n" }, { "alpha_fraction": 0.7242152690887451, "alphanum_fraction": 0.7511211037635803, "avg_line_length": 28.733333587646484, "blob_id": "5292141c878fae1c931dd4bcb513ea3d3ba62826", "content_id": "da7cc950838948b77909615d3c396dcc9b73a6d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 446, "license_type": "no_license", "max_line_length": 110, "num_lines": 15, "path": "/readme.md", "repo_name": "sun123qiang123/olevba_analyzer", "src_encoding": "UTF-8", "text": "## OLE VBA Analyzer\nWritten by Gustavo Palazolo.\n\n## Purpose\nExtract Malicious VBA Macros inside Microsoft Office Documents.\n\n## Usage\nOleVbaAnalyzer.exe -f \"C:\\SomeDocument.doc\"\n\n## Changes\n* v1.0.0 -- Initial release.\n\n## python-oletools\nThis code uses the library \"python-oletool\"(https://github.com/decalage2/oletools/blob/master/README.md)\nThe python-oletools package is copyright (c) 2012-2017 Philippe Lagadec (http://www.decalage.info). All rights reserved.\n" } ]
3
vvarrela/TuFlixPlusOSE
https://github.com/vvarrela/TuFlixPlusOSE
987b7a41956e8330d37a466c7daa666e4fda98e7
69d86de54e6449dff394cebabe6dcce685204591
ad898ec93f212e5c5cf565375a0cdbf9e5552659
refs/heads/main
2023-08-04T16:07:25.829000
2021-09-22T01:56:43
2021-09-22T01:56:43
406,157,579
0
0
MIT
2021-09-13T23:20:01
2021-09-10T19:19:18
2021-09-13T21:02:12
null
[ { "alpha_fraction": 0.753333330154419, "alphanum_fraction": 0.753333330154419, "avg_line_length": 24, "blob_id": "22b7b94dbf2e0d6f32574d0e4a5a68726b5ab81b", "content_id": "b479d38b409854e01b1ac9fb06910c042679109d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "permissive", "max_line_length": 56, "num_lines": 6, "path": "/TuFlix/Servicios/apps.py", "repo_name": "vvarrela/TuFlixPlusOSE", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ServiciosConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'Servicios'\n" }, { "alpha_fraction": 0.8011695742607117, "alphanum_fraction": 0.8011695742607117, "avg_line_length": 27.66666603088379, "blob_id": "798d9458e3826ad10047e90012f7ae985615f701", "content_id": "92ec00f1fb9763962d048a3cf4da57b372f43e73", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "permissive", "max_line_length": 49, "num_lines": 6, "path": "/TuFlix/Servicios/views.py", "repo_name": "vvarrela/TuFlixPlusOSE", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef Ejemplo(request):\n return HttpResponse(\"Bienvenido a Servicios\")" }, { "alpha_fraction": 0.756302535533905, "alphanum_fraction": 0.756302535533905, "avg_line_length": 18.83333396911621, "blob_id": "42cef5e7560b1e01f7ed6f20be1bced2e731c91c", "content_id": "5a8e810a1fa0b21b8ed45f0554c5ce50f0f6d98e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "permissive", "max_line_length": 38, "num_lines": 6, "path": "/TuFlix/Usuarios/urls.py", "repo_name": "vvarrela/TuFlixPlusOSE", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom Usuarios.views import vistaPrueba\n\nurlpatterns = [\n path('prueba', vistaPrueba)\n]\n" }, { "alpha_fraction": 0.7433628439903259, "alphanum_fraction": 0.7433628439903259, "avg_line_length": 17.83333396911621, "blob_id": "e6e6dc4cb005493cb14749fc3ac36bf60c29d2e3", "content_id": "fc2f1d6ae6c5361c15f52c0308e9856ebcf1e500", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "permissive", "max_line_length": 35, "num_lines": 6, "path": "/TuFlix/Servicios/urls.py", "repo_name": "vvarrela/TuFlixPlusOSE", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom Servicios.views import Ejemplo\n\nurlpatterns = [\n path('ejemplo', Ejemplo)\n]\n" } ]
4
rialarifin/LSTM
https://github.com/rialarifin/LSTM
49b1aed8afdcc847eafb910fd3547d6ebbd566b9
1e131c5fa769121c68eff16c061128bca653610e
e3295434b73ccd4c3a54e08109402711340085c3
refs/heads/main
2023-01-29T04:05:02.757091
2020-12-13T10:57:16
2020-12-13T10:57:16
320,566,686
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6577560901641846, "alphanum_fraction": 0.6835122108459473, "avg_line_length": 25.63783836364746, "blob_id": "8ed1f7edffdfcf88ee94304d03a4916bd6e7508c", "content_id": "a2988d2a8d33891e9c03701adf99b07bce3027eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5125, "license_type": "no_license", "max_line_length": 107, "num_lines": 185, "path": "/LSTM_Solarirradiance.py", "repo_name": "rialarifin/LSTM", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n# \"\"\"\r\n# Created on Thu Aug 13 11:52:56 2020\r\n\r\n# @author: Rial\r\n\r\n\r\nimport math\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense,LSTM\r\nfrom keras.layers import Dropout\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('fivethirtyeight')\r\n\r\n#get GHI data\r\ndf=df=pd.read_excel(r'Data_Preparation.xlsx')\r\ndf=df[['date','GHI_Average']]\r\n# df['date'] = pd.to_datetime(df['date'])\r\n# df['hour'] = df['date'].dt.hour\r\n# df=df[(df.hour >= 7) & (df.hour <= 19)]\r\ndf=df[['date','GHI_Average']]\r\ndf=df.set_index('date')\r\ndf=df[:8760]\r\n#number rows and columns\r\nshape=df.shape\r\n\r\n#plot graph\r\nplt.figure(figsize=(16,8))\r\nplt.title('GHI_2018')\r\nplt.plot(df['GHI_Average'])\r\nplt.xlabel('date', fontsize=16)\r\nplt.ylabel('GHI_2018')\r\n\r\n#convert data to numpy\r\ndata=df.values\r\n\r\n#number of rows to train (80%)\r\ntraining_data_len=math.ceil(len(data)*.8)\r\n\r\n\r\n#scaled the data\r\nscaler=MinMaxScaler(feature_range=(0,1))\r\nscaled_data=scaler.fit_transform(data)\r\n\r\n#training data set\r\n#scaled training data set\r\ntrain_data=scaled_data[0:training_data_len, :]\r\n#split the data into x_train and y_train data sets\r\nx_train=[]\r\ny_train=[]\r\n\r\nfor i in range(60, len(train_data)):\r\n x_train.append(train_data[i-60:i,0])\r\n y_train.append(train_data[i,0])\r\n if i<=60:\r\n print(x_train)\r\n print(y_train)\r\n print()\r\n \r\n#convert x train y train to numpy\r\nx_train, y_train=np.array(x_train), np.array(y_train)\r\n\r\n#reshape the data\r\nx_train=np.reshape(x_train,(x_train.shape[0],x_train.shape[1],1))\r\nx_train.shape\r\n\r\n#build the LSTM model\r\n# Initialising the RNN\r\nmodel = Sequential()\r\n\r\n# Adding the first LSTM layer and some Dropout regularisation\r\nmodel.add(LSTM(units = 50, activation='relu',return_sequences = True, input_shape = (x_train.shape[1], 1)))\r\n# model.add(Dropout(0.2))\r\n\r\n# # Adding a second LSTM layer and some Dropout regularisation\r\n# model.add(LSTM(units = 50, return_sequences = True))\r\n# model.add(Dropout(0.2))\r\n\r\n# # Adding a third LSTM layer and some Dropout regularisation\r\n# model.add(LSTM(units = 50, return_sequences = True))\r\n# model.add(Dropout(0.2))\r\n\r\n# Adding a fourth LSTM layer and some Dropout regularisation\r\nmodel.add(LSTM(units = 50))\r\n# model.add(Dropout(0.2))\r\n\r\n# Adding the output layer\r\nmodel.add(Dense(units = 25)) \r\nmodel.add(Dense(units = 1)) \r\n\r\n#compile the model\r\nmodel.compile(optimizer='adam', loss='mean_squared_error')\r\n\r\n#train the model\r\nmodel.fit(x_train,y_train,batch_size=10, epochs=100)\r\n\r\n#create the testing data set\r\n#create a new array containing scaled values from index 10424 to 10484\r\ntest_data=scaled_data[training_data_len-60:,:]\r\n\r\n#create the data sets x_tes and y_test\r\nx_test=[]\r\ny_test=data[training_data_len:,:]\r\nfor i in range(60,len(test_data)):\r\n x_test.append(test_data[i-60:i,0])\r\n \r\n#convert data to numpy\r\nx_test=np.array(x_test)\r\n\r\n#reshape\r\nx_test=np.reshape(x_test,(x_test.shape[0],x_test.shape[1],1))\r\n\r\n#get the model predicted ghi\r\npredictions=model.predict(x_test)\r\npredictions=scaler.inverse_transform(predictions)\r\n\r\n#get the rmse\r\nrmse=np.sqrt(np.mean(predictions-y_test)**2)\r\n\r\n#plot the ddata\r\ntrain=df[:training_data_len]\r\nvalid=df[training_data_len:]\r\nvalid['Predictions']=predictions\r\ntrain_data2=df[:training_data_len]\r\ntrain_data2['Predictions']=np.nan\r\nall_data=train_data2.append(valid)\r\n\r\n#visualize the data\r\nplt.figure(figsize=(16,8))\r\nplt.title('Model')\r\nplt.xlabel('date',fontsize=14)\r\nplt.ylabel('GHI',fontsize=14)\r\nplt.plot(train['GHI_Average'])\r\nplt.plot(all_data[['GHI_Average','Predictions']])\r\nplt.legend(['Train','Val','Predictions'],loc='lower right')\r\nplt.scatter(valid['GHI_Average'],valid['Predictions'],marker='o')\r\n\r\n\r\nfrom sklearn.metrics import r2_score\r\n\r\ncoefficient_of_dermination = r2_score(valid['GHI_Average'],valid['Predictions'])\r\n\r\n\r\n# #predict future\r\n# from keras.preprocessing.sequence import TimeseriesGenerator\r\n\r\n# train=df\r\n# scaler.fit(train)\r\n# train=scaler.transform(train)\r\n\r\n# n_input=60\r\n# n_features=1\r\n\r\n# generator=TimeseriesGenerator(train,train,length=n_input, batch_size=6)\r\n# model.fit_generator(generator, epochs=10)\r\n\r\n# pred_list=[]\r\n# batch=train[-n_input:].reshape((1,n_input,n_features))\r\n\r\n# for i in range(n_input):\r\n# pred_list.append(model.predict(batch)[0])\r\n# batch=np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)\r\n \r\n# from pandas.tseries.offsets import DateOffset\r\n# add_dates=[df.index[-1]+DateOffset(hours=x) for x in range(0,61)]\r\n# future_dates=pd.DataFrame(index=add_dates[1:],columns=df.columns)\r\n\r\n\r\n# df_predict=pd.DataFrame(scaler.inverse_transform(pred_list),\r\n# index=future_dates[-n_input:].index,columns=['Prediction'])\r\n\r\n# df_proj=pd.concat([df,df_predict],axis=1)\r\n\r\n# plt.figure(figsize=(10,4))\r\n# plt.plot(df_proj.index,df_proj['GHI_Average'])\r\n# plt.plot(df_proj.index,df_proj['Prediction'], color='r')\r\n# plt.legend(loc='best',fontsize='large')\r\n# plt.xticks(fontsize=12)\r\n# plt.yticks(fontsize=12)\r\n# plt.show()\r\n\r\n\r\n\r\n \r\n" }, { "alpha_fraction": 0.8474576473236084, "alphanum_fraction": 0.8474576473236084, "avg_line_length": 28.5, "blob_id": "433af27bfe4d5ecc41c8014fd486a02a84d161f0", "content_id": "04101c0768585124c8b545ae90e257ccbd61f33b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "no_license", "max_line_length": 69, "num_lines": 4, "path": "/README.md", "repo_name": "rialarifin/LSTM", "src_encoding": "UTF-8", "text": "# LSTM\nSolar irradiance forecasting using LSTM\n\nForecast solar irradiance by hourly solar irradiance data measurement\n" } ]
2
BiscuitTaylor/BiscuitWebsite
https://github.com/BiscuitTaylor/BiscuitWebsite
52ec28bc8074e0810fe87c52eaf3c94e0d8e5374
ee1827a69c01035c3507c0d3f0c8882023e538da
b0a83cd9b4b5d037e0fba2f61550e5d50977aab5
refs/heads/master
2021-06-26T06:35:56.502042
2017-02-17T05:01:50
2017-02-17T05:01:50
7,289,697
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6837748289108276, "alphanum_fraction": 0.7086092829704285, "avg_line_length": 31.648649215698242, "blob_id": "329f2e2d9c53d2b3e4bdf5e258b70d661c243312", "content_id": "5f0e4a58dfd2138e46ca4369620c11888411f0da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1208, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/WebContent/Biscuit/Hikes/PhoenixLake/PhoenixLake.html", "repo_name": "BiscuitTaylor/BiscuitWebsite", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n<html>\n<head>\n <meta http-equiv=\"content-type\"\n content=\"text/html; charset=ISO-8859-1\">\n <title>PhoenixLake</title>\n <style type=\"text/css\">img {border: 1px solid ;\"}\n \t\t\t\t\t\tdiv {clear:left;}\n </style>\n</head>\n<body background=\"../tinytrees.jpg\">\nLate summer, 2005. Last camping trip before Nathan leaving for Sweden - \na four day, 20-mile round trip to Old Man Mountain,\nin the Grouse Ridge area of Nevada County, California<br>\n\n<hr style=\"width: 100%; height: 2px;\">\n<a href=\"NathanIsASpeckOnOldManSummit.JPG\"><br>\n<img src=\"NathanIsASpeckOnOldManSummit_thumb.JPG\"\n title=\"Nathan is a speck on the top of Old Man Mountain\"\n alt=\"Nathan is a speck on the top of Old Man Mountain\"><br>\nView of Old Man Mountain from Phoenix Lake... Nathan at the summit<br>\n</a>\n\n<hr style=\"width: 100%; height: 2px;\">\n<a href=\"ViewOfOldManFromGrouseRidge.JPG\">\n<img src=\"ViewOfOldManFromGrouseRidge_thumb.JPG\"\n title=\"Old Man from Grouse Ridge\"\n alt=\"Old Man from Grouse Ridge\"\n style=\"width: 367px; height: 245px;\"><br>\nView of Old Man Mountain from Grouse Ridge lookout<br>\n</a>\n\n<br><hr>\n<a href=\"../Hikes.html\"><br>Other Hikes</a>\n\n</body>\n</html>\n" }, { "alpha_fraction": 0.7433649301528931, "alphanum_fraction": 0.750947892665863, "avg_line_length": 50.46341323852539, "blob_id": "ca3d089c34099122674be5798536eb90a7ce53c9", "content_id": "a6e58093c1cb2eddede8d7ef67245ca7c6e4f508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4220, "license_type": "no_license", "max_line_length": 89, "num_lines": 82, "path": "/WebContent/Tricycle/Pocket/HealthAndSafety.html", "repo_name": "BiscuitTaylor/BiscuitWebsite", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n<html>\n<head>\n <meta http-equiv=\"content-type\"\n content=\"text/html; charset=ISO-8859-1\">\n <title>Catrike Pocket #266</title>\n</head>\n<body>\n<div style=\"text-align: center;\">\n<div style=\"text-align: center;\"><big><big>Health &amp; Safety</big></big><br>\n<br>\n<a href=\"DisposableDigital/StagecoachBeerwagonTricycle_large.jpg\"><img\n src=\"DisposableDigital/StagecoachBeerwagonTricycle.jpg\" title=\"\"\n alt=\"Stagecoach, Beer wagon and tricycle\"\n style=\"border: 0px solid ; width: 450px; height: 300px;\"></a><br>\nTradition of yellow<br>\n<br>\n<big>Safety</big><br>\n<div style=\"text-align: left;\"> &nbsp; I am visible yellow.\n&nbsp;Visibility is my main safety concern when riding any cycle on the\nstreet, especially at night. &nbsp;So, I have good lights. &nbsp;The low\nheight of the tricycle does not seem to prevent drivers from seeing\nme.&nbsp; Some wisdom from Dana Lieberman, on the bentrideronline trike\nforum:&nbsp; \"drivers have a hard time seeing you from the side, not the\nfront and back. Thus, you have to be aware of situations where you\nbecome invisible, and ride appropriately\".&nbsp; The only time that I\nreally get concerned about the low profile is when I'm riding through a\nparking lot, and someone might back up without seeing me. &nbsp;There is\na flag.&nbsp; I just use the factory flag; I don't feel the need to add\nany additional flaggage, though I do admire those who <a\n href=\"crossbones_greenman62.jpg\">fly the laundry</a> proudly.&nbsp;\n(Photo from <strong><span style=\"font-weight: normal;\">greenman62, </span><span\n style=\"font-weight: normal;\">on the Trike forum at bentrideronline.)</span></strong><br>\n</div>\n<div style=\"text-align: left;\"> &nbsp; My lights are bright. &nbsp;I\nactually feel more visible at night than in the day. &nbsp;I have a\nNiteRider headlight (32 watts of halogen) and high-intensity LED\ntaillight, which both run off of the water-bottle style battery mounted\nunder the seat. &nbsp;I originally installed the taillight directly\nunder the top of the back of the rack - but then I realized that the\nlight was not visible from the sides, since it was between the panniers.\n&nbsp;The taillight is now installed on the top side of the rack.\n&nbsp;(The NiteRider taillight is designed to provide some visibility\nfrom the sides.) &nbsp;For backup, and just extra lighting for\nhigh-traffic experiences, I have two Cateye EL-400 LED headlights and a\nCateye LED taillight. &nbsp;I use flashing mode for all of the LED\nlights (to be seen better).<br>\n</div>\n<div style=\"text-align: left;\"> &nbsp; I recommend clipless pedals when\nriding a trike. &nbsp;Having a foot slip off of the pedal could result\nin a foot hitting the ground, which could be extremely unpleasant at\nhigh speed. &nbsp;Having a clipless pedal system does not eliminate the\npossibility of a foot slip, but it reduces it.<br>\n&nbsp;&nbsp;I tried Shimano SPD pedals, but felt like my feet did not\nhave enough freedom of movement, and my knees were at risk. &nbsp;The\nCrank Brothers' egg-beater style pedals are much more comfortable.\n&nbsp;They seem to allow more 'degrees of freedom'. &nbsp;The SPD's\nseemed to rotate ('float') about a single fixed point only, whereas the\negg-beaters seem to rotate about any point that I want them to.<br>\n</div>\n<div style=\"text-align: left;\"> &nbsp; The dual mirrors are helpful.\n&nbsp;It is difficult for me to turn my head to see behind me, without\naccidentally adding some unwanted input to the steering. &nbsp;I use the\nmiror on the left mostly, but depending on the situation, sometimes I\ncan get a better view out of the right-hand mirror. &nbsp;And, the dual\nmirror setup provides a couple of extra hand positions, resting the hand\nfully or partially on the mirror booms.<br>\n</div>\n<br>\n<big>Health</big><br>\n<div style=\"text-align: left;\"> &nbsp; The exercise that I get on the\nPocket is highly aerobic. &nbsp;With the low gear range of the Pocket\n(thanks to small wheels coupled with standard drivetrain components), I\ndon't need to 'mash' up the big hills, I can 'spin'. </div>\n</div>\n</div>\n<hr style=\"width: 100%; height: 2px;\"> <a href=\"Technycle.html\">Back</a><br>\n<br>\nemail [email protected]<br>\n<br>\n</body>\n</html>\n" }, { "alpha_fraction": 0.518287718296051, "alphanum_fraction": 0.5215388536453247, "avg_line_length": 40.943180084228516, "blob_id": "f6dca3417e927ea8a521ea55f9be9b818a469890", "content_id": "b9a423f70d87abc5e7b5decf6b8d486564ae5960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3691, "license_type": "no_license", "max_line_length": 168, "num_lines": 88, "path": "/WebContent/slides/gen_html_div.py", "repo_name": "BiscuitTaylor/BiscuitWebsite", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\n\nimport sys\nimport argparse # for parsing command-line arguments in main()\n\n\ndef generate_div_from_subdir(subdir):\n print(\"Generating <div> for \" + str(subdir))\n\n onlyfiles = [ f for f in listdir(subdir) if isfile(join(subdir,f)) ]\n print(onlyfiles)\n\n # Use subdir name as a section title\n print('<h2>{s}</h2>'.format(s=subdir))\n # Set up a galleria gallery for each subdir\n print('<div id=\"gallery\"; align=\"center\">')\n\n for f in onlyfiles:\n if ('.tif' in f):\n # one of our slides\n # Convert filename to jpg_image_big, jpg_image, thumbnail_image, slide_title, and slide_alt_title\n # NOTE - we are assuming that these files have been created with my mac Automator Service 'slide2jpegs'\n tifname = f\n\n jpg_image_big = \"/\".join([subdir, tifname.replace('.tif', '_big.jpg')])\n jpg_image = \"/\".join([subdir, tifname.replace('.tif', '.jpg')])\n thumbnail_image = \"/\".join([subdir, tifname.replace('.tif', '_tn.jpg')])\n\n slide_title = tifname.replace('.tif', '')\n slide_title = slide_title.replace('_', ' ') # separate words\n slide_title = slide_title.title() # Capitalize First Letter Of Each Word\n\n slide_alt_title = slide_title.split()[0]\n\n html_line = ' <a rel=\"{big}\" href=\"{jpg}\">\\n' \\\n ' <img src=\"{thumb}\" title=\"{title}\" alt=\"{alt}\"></a>'.format(big=jpg_image_big,\n jpg=jpg_image,\n thumb = thumbnail_image,\n title=slide_title,\n alt=slide_alt_title)\n print(html_line+\"\\n\")\n print('</div>')\n print('<hr>')\n\n# <a rel=\"japan/merchants_48.JPG\" href=\"japan/merchants_48.JPG\"><img src=\"japan/merchants_48_tn.jpg\" title=\"Merchants waiting to board our ship\" alt=\"Merchants\"></a>\n\n##########################################################################################################\n'''Use both Defaults and Raw Descriptions from argparse formatter_class'''\nclass ArgParseCustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):\n pass\n\ndef main(argv):\n\n try:\n description_text = __doc__ # Reuse docstring at top of file\n examples_text = \\\n'''Examples:\n python gallery_htmler.py\n '''\n parser = argparse.ArgumentParser(description=description_text,\n epilog=examples_text,\n formatter_class=ArgParseCustomFormatter)\n\n except Exception as e:\n print(\"argparse error: {exc}\".format(exc=e))\n parser.print_help()\n sys.exit(2)\n\n # Parse command-line arguments\n parser.add_argument(\"-d\", \"--dir\",\n default=None,\n help=\"The directory to generate an html div from. (Default=traverse subdirs of current dir)\")\n\n args = parser.parse_args()\n\n if (args.dir):\n specified_directory = join('.',args.dir)\n generate_div_from_subdir(specified_directory)\n else:\n # No directory specified... look in all subdirs of this dir.\n subdirs = [ f for f in listdir('.') if not isfile(join('.',f)) ]\n\n for subdir in subdirs:\n generate_div_from_subdir(subdir)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.6429448127746582, "alphanum_fraction": 0.7012270092964172, "avg_line_length": 55.24137878417969, "blob_id": "31695e6af6afdcf68f24bacb69393687a056fe61", "content_id": "93c49a1be5aa81cf648a55100237f76324ab94d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 97, "num_lines": 29, "path": "/WebContent/pierre/cityday/jamtli.html", "repo_name": "BiscuitTaylor/BiscuitWebsite", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n<html>\n<head>\n <meta http-equiv=\"content-type\"\n content=\"text/html; charset=ISO-8859-1\">\n <title>Jamtli</title>\n</head>\n<body style=\"color: rgb(0, 0, 0); background-color: rgb(200, 200, 200);\"\n alink=\"#000099\" link=\"#000099\" vlink=\"#990099\">\n<h1 style=\"text-align: center;\">A little bit outside of Jamtli</h1>\n<div style=\"text-align: center;\">Jamtli, pronounced like a 'y'. <br>\n<img src=\"sjamtli.jpg\" style=\"width: 400px; height: 300px;\"><br>Jamtli (the spire there-over\nis the entrance)<br>\n<img src=\"shut.jpg\" style=\"width: 400px; height: 300px;\"><br>A hut from some time in the \npast.Notice the half-obvious grouse-ish bird and the secretive owl in the upper right corner.<br>\n<img src=\"sredcross.jpg\" style=\"width: 400px; height: 300px;\"><br>Some fake (it isn't a real\ntrail) red crosses (typical of Sweden) on a trail in Jamtli.<br>\n<img src=\"ssodhut.jpg\" style=\"width: 400px; height: 300px;\"><br>A sod hut.<br>\n<img src=\"sindianhuts.jpg\" style=\"width: 400px; height: 300px;\"><br>The teepee on the left\nwill be what Sean and my fort will look like eventually.<br>\n<img src=\"streefort.jpg\" style=\"width: 400px; height: 300px;\"><br>The really cool tree fort;\na Sami grain-storage bin.<br>\n<img src=\"starpit.jpg\" style=\"width: 400px; height: 300px;\"><br>A tar pit? I don't know how that\nworks, but that is what it is.<br>\n<img src=\"speligro.jpg\" style=\"width: 400px; height: 300px;\"><br>What?<br>\n<img src=\"strees.jpg\" style=\"width: 400px; height: 300px;\"><br>If your processor is still \nworking, some nice trees.<br>That is all.< / div > </div>\n</body>\n</html>" }, { "alpha_fraction": 0.7345700860023499, "alphanum_fraction": 0.749910831451416, "avg_line_length": 41.469696044921875, "blob_id": "83dc52ce5d570f61d3513692cf0dcd175b8ab16d", "content_id": "d7947724f493ed1b52e7f1ba85bb4bfe54855fc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2803, "license_type": "no_license", "max_line_length": 93, "num_lines": 66, "path": "/WebContent/pierre/sweden.html", "repo_name": "BiscuitTaylor/BiscuitWebsite", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n<html>\n<head>\n <meta http-equiv=\"content-type\"\n content=\"text/html; charset=ISO-8859-1\">\n <title>Nathan Taylor's home on the Web</title>\n</head>\n<body style=\"color: rgb(0, 0, 0); background-color: rgb(255, 200, 255);\"\n alink=\"#000099\" link=\"#000099\" vlink=\"#990099\">\n<h1 style=\"text-align: center;\">Nathan Taylor</h1>\n<div style=\"text-align: center;\"><img alt=\"Nathan on the moose hunt\"\n src=\"../Images/snathansideways.JPG\">\n<br>\nFriends, supporters, and other people!:\n<p>As you probably already know because you came to this\nsite, I am in a foreign exchange program to Sweden.<br>\nI left on August 30 after much preparation\nwith the Swedish language, and now I will be staying here until the end\nof June.</p>\n<p>It turned out to be a lot harder with the language than I expected,\nbut I am doing well.<br>\nI am even taking other foreign language courses here,and though that\nsounds difficult,\nit is actually very helpful because Spanish has many expressions which\nare more like English\nthan Swedish; it helps in learning Swedish to work with it from two\nangles. </p>\nI have been\nhaving fun here in Sweden, learning about the Swedish culture that lies\nunder the outside appearance. (Sweden is an industrialized country just\nlike the United States, and many things are apparently the same. That\nmeans there are cars, supermarkets (both the same), computers,\nthe same movies mostly even though they are about two months late.)\nEven though so much is the\nsame, it is also somehow really different.\n<p>Take for example the food. Though it is generally the same, there\nare differences in their eating habits. They seem to eat many more\n'whole' foods,\nand more wholesome meals throughout the day. We recieve a lunch at\nschool which could pass for a normal dinner in the U.S. And there are\nmany, kids mostly, people who drink milk with their\nfood.</p>\n<p>Another thing is their cleanliness. It is quite clean; my\nhost-father, Sven-Martin\n, cleans pretty much whenever there is something to be cleaned. We\noften just lay our lunch or snack open sandwiches on the table; it is\njust about as clean as the plates.</p>\n<br>\n<br>\n<br>\nWhere is &Ouml;stersund?<br>\nOr where is Sweden, even? <br>\nGo to the Swedish version of the below site for a cool zoom-in.\n&Ouml;stersund is on the right side of the big lake.<br>\nOr, if you are just interested in the big lake animal (monster)<br>\n<a href=\"http://www.storsjoodjuret.com\">Storsj&ouml;djuret</a><br>\n<br>\nThis site is mainly an introductory site; my less official yet more\ndeveloped real web-site is here: <a href=\"http://biscuit.nfshost.com/pierre/LePagine.html\">La\nPagina de Nathan</a><br>\nAnd here is <a href=\"http://biscuit.nfshost.com/Biscuit/Biscuit.html\">My Dad's\nhome\nsite.</a><br>\n</div>\n</body>\n</html>\n" } ]
5
ThomasEA/MachineLearning_1
https://github.com/ThomasEA/MachineLearning_1
d54ab15e9e69ac8b2dc8f3db54274814b0069ed0
5b7e346fe7baa55137ba7677739967cc411b765e
c68d648b21229ce727f745b7a7ead966c813d3df
refs/heads/master
2020-03-22T16:52:22.919790
2018-08-08T13:59:26
2018-08-08T13:59:26
140,357,513
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6055490970611572, "alphanum_fraction": 0.6266977190971375, "avg_line_length": 29.672618865966797, "blob_id": "94e4ca336cb62b70982ef1dee816319a2bfa5889", "content_id": "4a6096ae55c1e16a3d34fa7fd8610efccc550d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5194, "license_type": "no_license", "max_line_length": 115, "num_lines": 168, "path": "/grad_desc_multi_houses.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 22:56:37 2018\n\n@author: Everton Thomas e Gustavo Emmel\n\nO melhor score obtido foi com a seguinte configuração\n Quantidade de iterações: 10\n Alpha: 0.1\n Score: 460.39\n Custo mínimo: 183\n \nÉ possível verificar a ocorrência de underfiting, já que as previsão \nno conjunto de treino não é muito precisa, contemplando inclusive valores negativos para\nas casas.\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_squared_error\n\ndef nan_to_average(X):\n if X.isnull().values.any():\n cols = X.columns.tolist()\n for x in cols:\n col_mean = np.mean(X[x])\n X[np.isnan(X[x])] = col_mean\n \n return X\n \ndef normalize(X, columns):\n columns[:-1]\n\n scaled_features = StandardScaler().fit_transform(X.iloc[:,:-1].values)\n scaled_features_df = pd.DataFrame(scaled_features, index=X.index, columns=columns[:-1])\n scaled_features_df['MEDV'] = X['MEDV']\n X = scaled_features_df\n return X\n\ndef add_x0(X):\n x0 = np.ones(X.shape[0]).reshape(X.shape[0], 1)\n X['x0'] = x0\n #reposiciona a última coluna (x0) para a primeira posição\n cols = X.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n X = X[cols]\n return X\n\ndef scatter_features(x, fx, lblx, lbly, color):\n plt.scatter(x, fx, c=color)\n plt.xlabel('x -> {0}'.format(lblx))\n plt.ylabel('fx -> {0}'.format(lbly))\n plt.show()\n\ndef hypothesis(X, thetas):\n return np.dot(X, thetas.T)\n\ndef computeCost(X,y,theta):\n tobesummed = np.power(((X @ theta.T)-y),2)\n return np.sum(tobesummed)/(2 * len(X))\n\ndef cost_function(X, y, h, theta):\n N = len(X)\n \n #Soma vetorizada\n soma = np.sum((h(X, theta) - y) ** 2.)\n \n return (1./(2. * float(N))) * soma\n\ndef gradient_descent(X, y, thetas, alpha, max_iter):\n costs = np.zeros(max_iter, dtype=np.float64)\n N = len(X)\n J = len(thetas)\n soma = 0.\n thetas_tmp = thetas\n \n prev_cost = cost_function(X, y, hypothesis, thetas)\n \n thetas_final = []\n \n for i in range(max_iter):\n for j in range(J):\n for n in range(N):\n soma += (hypothesis(X[n], thetas) - y[n]) * X[n][j]\n \n thetas_tmp[j] = thetas_tmp[j] - (alpha/len(X)) * soma\n \n thetas = thetas_tmp\n\n cost = cost_function(X, y, hypothesis, thetas)\n \n costs[i] = cost\n \n if cost < prev_cost:\n thetas_final = thetas\n prev_cost = cost\n \n\n return thetas_final, costs, np.min(costs)\n\ncolumns = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV']\n\ndf = pd.read_csv('datasets/casas/house.data.csv', sep='\\t', names=columns)\n\n#1. Seta os valores NAN para a média da coluna\ndf = nan_to_average(df)\n\n#2. Normaliza os dados, exceto a classe\ndf = normalize(df, columns)\n\n#3. adiciona x0 ao dataset para viabilizar o algoritmo\ndf = add_x0(df)\n\n#4. Terminado o préprocessamento, divide DS em treino e teste\ntrainset, testset = train_test_split(df, test_size=0.3)\n\n#5. Avaliamos algumas features e sua relação com a classe\nscatter_features(df['CRIM'], df['MEDV'], 'Taxa crimes per capta', 'Valor médio casas', 'red')\nscatter_features(df['AGE'], df['MEDV'], 'Ocupação casas constr. antes 1940', 'Valor médio casas', 'red')\nscatter_features(df['RAD'], df['MEDV'], 'Indice acessib. a rodovias', 'Valor médio casas', 'red')\nscatter_features(df['DIS'], df['MEDV'], 'Dist. 5 centros empregatícios em Boston', 'Valor médio casas', 'red')\nscatter_features(df['RM'], df['MEDV'], 'Número médio de quartos', 'Valor médio casas', 'red')\n\n#6. Aplicamos o Gradiente Descendente no conjunto de treino\nX = np.array(trainset.iloc[:,:-1], dtype=np.float32)\ny = np.array(trainset.iloc[:,-1], dtype=np.float32)\n\nmax_iter = 6\nalpha = 0.1\nthetas = np.ones(X.shape[1])\n\ncost = cost_function(X, y, hypothesis, thetas)\n\ntheta_final, costs, min_cost = gradient_descent(X, y, thetas, alpha, max_iter)\n\n#7. Plot dos custos e do mínimo global (para a quantidade de iterações)\nm = np.vstack((np.arange(max_iter), costs))\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nline, = ax.plot(m[0], m[1], c='r')\npoint, = ax.plot(m[0][np.argmin(m[1])], m[1][np.argmin(m[1])], 'bo')\nax.set_xlabel('Iterações')\nax.set_ylabel('Custo')\n\nax.annotate('mínimo global (para as iterações)', \n xy=(m[0][np.argmin(m[1])], m[1][np.argmin(m[1])]), \n xytext=(m[0][np.argmin(m[1])] + 2, m[1][np.argmin(m[1])]),\n arrowprops=dict(facecolor='black', shrink=0.05),\n )\nax.annotate('Valor custo mínimo: {0}'.format(round(m[1][np.argmin(m[1])]),3),xy=(max_iter/2,m[1][np.argmax(m[1])]))\n\n#8. Faz as predições sobre o dataset de teste\npredicted = []\nX_test = np.array(testset.iloc[:,:-1], dtype=np.float32)\ny_test = np.array(testset.iloc[:,-1], dtype=np.float32)\n\nfor p in range(len(X_test)):\n predicted.append(hypothesis(X_test[p], theta_final))\n\n#9. Calcula o score\nscore = mean_squared_error(y_test, predicted)\n\nprint('Score: ', score)\n\n" }, { "alpha_fraction": 0.7008995413780212, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 26.77083396911621, "blob_id": "fe95aa73daf514771b51b89498506698ca761704", "content_id": "85b50e9e0acfaa51b2932d63a99958d1d211bcd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1343, "license_type": "no_license", "max_line_length": 68, "num_lines": 48, "path": "/ex_aula7_metricas_avaliacao.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 20:17:04 2018\n\n@author: alu2015111446\n\nExercício Aula 7\nMétrica de avaliação\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter #conta e agrupa itens em uma coleção\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\ndf = pd.read_csv('RH.csv', nrows=None, header=0, index_col=None)\n\n#RETIRADO POIS O KNN NÃO ACEITA VARIÁVEIS TEXTO\n#NA VIDA REAL DEVEM SER DISCRETIZADAS\ndf = df.drop(['sales','salary'], axis=1)\n\ntrain_set, test_set = train_test_split(df, test_size=0.2)\n\nn_folds=5\n\nX = train_set.drop(['left'], axis=1)\ny = train_set['left']\n\nskf = StratifiedKFold(n_splits=n_folds)\n\nfor train_index, test_index in skf.split(X,y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n\n predictions = []\n k = 3 #NÃO PODE SER PAR NO KNN\n \n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n predicted_knn = knn.predict(X_test)\n \n accuracy = accuracy_score(y_test, predicted_knn)\n print accuracy\n\n" }, { "alpha_fraction": 0.6148980855941772, "alphanum_fraction": 0.6535488367080688, "avg_line_length": 19.314285278320312, "blob_id": "0c8036ae452b0881f411d38bf8064eb811584b28", "content_id": "dd32d9df8b8d290fdff834b99f7a627de74dc247", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1428, "license_type": "no_license", "max_line_length": 93, "num_lines": 70, "path": "/preprocessing.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 16 00:45:50 2018\n\n@author: evert\n\nExercícios Pós Data Science\nMachine Learning I\nPré-processamento de datasets\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n\n### Step 2. Leia o dataset iris\niris = pd.read_csv('datasets/iris/iris.data', sep=',', header=None)\n\n# print iris\n\n### Step 3. Crie as colunas para o dataset\n# 1. sepal_length (in cm)\n# 2. sepal_width (in cm)\n# 3. petal_length (in cm)\n# 4. petal_width (in cm)\n# 5. class\n\ncolumns = np.array([['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class']])\n\ntmp = np.concatenate((columns, iris), axis=0)\n\niris.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class']\n\ndf = pd.DataFrame(data=tmp[1:,:], columns=tmp[0,:])\n\n### Step 4. Algum dado ausente?\n### Não há nenhum dado ausente\n\n### Step 5 Verifique os valores das linhas 10 ate 29 da coluna\n\nu = tmp[9:28,:]\nprint(u)\n\nk = df.iloc[9:28, [1]]\n\n### Step 6. Certo, modifique os valores NaN por 1.0\n\ndf = df.fillna(1.)\n\nklp = iris.copy()\nklp = klp.iloc[:,:-1]\nklp[np.isnan(klp)] = 1.0\n\ntmp[np.isnan(klp)] = 1.0\n\n### Step 7. Agora delete a coluna da classe\n\ndeleted_class = df.iloc[:,:-1]\n\n### Step 8. Coloque as 3 primeiras linhas como NaN\n\ndf.iloc[:3,:]=np.NaN\n\n### Step 9. Delete as linhas que contenham NaN\n\ndf = df.dropna(axis=0)\n\n### Step 10. Reset o index para que ele inicie em 0 novamente\n\ndf = df.reset_index(drop=True)\n\n" }, { "alpha_fraction": 0.5982009172439575, "alphanum_fraction": 0.620189905166626, "avg_line_length": 25.69333267211914, "blob_id": "761a380ea04fa6e578122641015ec7d8043e5df8", "content_id": "d29e675988f6579dc130989471ab7055f13c3463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 101, "num_lines": 75, "path": "/test_gd_multi.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 15:42:52 2018\n\n@author: evert\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport random\n\n# m denotes the number of examples here, not the number of features\ndef gradientDescent(x, y, theta, alpha, m, numIterations):\n xTrans = x.transpose()\n for i in range(0, numIterations):\n hypothesis = np.dot(theta.T, x)\n loss = hypothesis - y\n # avg cost per example (the 2 in 2*m doesn't really matter here.\n # But to be consistent with the gradient, I include it)\n cost = np.sum(loss ** 2) / (2 * m)\n print(\"Iteration %d | Cost: %f\" % (i, cost))\n # avg gradient per example\n gradient = np.dot(xTrans, loss) / m\n # update\n theta = theta - alpha * gradient\n return theta\n\n#computecost\ndef computeCost(X,y,theta):\n tobesummed = np.power(((X @ theta.T)-y),2)\n return np.sum(tobesummed)/(2 * len(X))\n\ndef hypothesis(X, theta):\n return np.dot(X, theta.T)\n\ndef cost_function(X, fx, h, theta):\n soma = 0.\n N = len(X)\n \n for i in range(N):\n soma += (h(X[i], theta) - fx[i]) ** 2.\n \n return (1./(2. * float(N))) * soma\n\n#gradient descent\ndef gradientDescent_2(X,y,theta,iters,alpha):\n cost = np.zeros(iters)\n for i in range(iters):\n theta = theta - (alpha/len(X)) * np.sum(X * (X @ theta.T - y), axis=0)\n cost[i] = computeCost(X, y, theta)\n \n return theta,cost\n\nmy_data = pd.read_csv('datasets/casas/ex1data2.csv', names=['size', 'rooms', 'value'])\n\n#setting the matrixes\nX = my_data.iloc[:,0:2]\nones = np.ones([X.shape[0],1])\nX = np.concatenate((ones,X),axis=1)\n\ny = my_data.iloc[:,2:3].values #.values converts it from pandas.core.frame.DataFrame to numpy.ndarray\ntheta = np.zeros([1,3])\n\n#set hyper parameters\nalpha = 0.01\niters = 1000\n\ncost_atu = cost_function(X, y, hypothesis, theta)\n\n\n#running the gd and cost function\ng,cost = gradientDescent_2(X,y,theta,iters,alpha)\nprint(g)\n\nfinalCost = computeCost(X,y,g)\nprint(finalCost)" }, { "alpha_fraction": 0.6000308990478516, "alphanum_fraction": 0.6228951215744019, "avg_line_length": 31.68181800842285, "blob_id": "9a42a19aaab316a11c1053625253376cf9589ffa", "content_id": "1fc9e8f0bd89f78e6a5d31ced04a481719a90a16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6530, "license_type": "no_license", "max_line_length": 117, "num_lines": 198, "path": "/grad_desc_multi_houses_v2.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 22:56:37 2018\n\n@author: Everton Thomas e Gustavo Emmel\n\nDentro do solicitado pelo trabalho, o melhor score obtido foi:\n alpha = 0.1 # taxa de aprendizado\n threshold = 0.001 # diferença aceitável entre custos\n batch_size = 8 # tamanho do batch\n max_epoch = 10 # máximo número de iterações permitido\n \n Score obtido: 50.38\n Valor custo mínimo obtido: 26.0\n\nOutros testes foram realizados, e o melhor score obtido foi com as configurações:\n alpha = 0.1 # taxa de aprendizado\n threshold = 0.001 # diferença aceitável entre custos\n batch_size = 48 # tamanho do batch\n epoch = 0\n max_epoch = 50 # máximo número de iterações permitido\n \n Score obtido: 14.04\n Valor custo mínimo obtido: 11.0\n \n\nÉ possível verificar a ocorrência de underfiting quando utilizamos valores\nbaixos para o batch (entre 1 e 5), já que os valores preditos não se assemelham\ncom os valores reais.\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_squared_error\n\ndef nan_to_average(X):\n if X.isnull().values.any():\n cols = X.columns.tolist()\n for x in cols:\n col_mean = np.mean(X[x])\n X[np.isnan(X[x])] = col_mean\n \n return X\n \ndef normalize(X, columns):\n columns[:-1]\n\n scaled_features = StandardScaler().fit_transform(X.iloc[:,:-1].values)\n scaled_features_df = pd.DataFrame(scaled_features, index=X.index, columns=columns[:-1])\n scaled_features_df['MEDV'] = X['MEDV']\n X = scaled_features_df\n return X\n\ndef add_x0(X):\n x0 = np.ones(X.shape[0]).reshape(X.shape[0], 1)\n X['x0'] = x0\n #reposiciona a última coluna (x0) para a primeira posição\n cols = X.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n X = X[cols]\n return X\n\ndef scatter_features(x, fx, lblx, lbly, color):\n plt.scatter(x, fx, c=color)\n plt.xlabel('x -> {0}'.format(lblx))\n plt.ylabel('fx -> {0}'.format(lbly))\n plt.show()\n\ndef hypothesis(X, thetas):\n return np.dot(X, thetas.T)\n\ndef computeCost(X,y,theta):\n tobesummed = np.power(((X @ theta.T)-y),2)\n return np.sum(tobesummed)/(2 * len(X))\n\ndef cost_function(X, y, h, theta):\n N = len(X)\n \n #Soma vetorizada\n soma = np.sum((h(X, theta) - y) ** 2.)\n \n return (1./(2. * float(N))) * soma\n\ndef update_theta(X, fx, h, thetas, alpha, idx_theta):\n N = len(X)\n soma = 0.\n \n for i in range(N):\n if idx_theta == 0:\n soma += (h(X[i], thetas) - fx[i])\n else:\n soma += (h(X[i], thetas) - fx[i]) * X[i]\n \n if idx_theta == 0:\n return thetas[idx_theta] - ((alpha * (1./float(N))) * soma)\n else:\n return (thetas[idx_theta] - ((alpha * (1./float(N))) * soma))[idx_theta]\n\ndef gradient_descent(X, y, thetas, alpha, max_epoch, threshold, batch_size):\n costs = []\n epoch = 0\n prev = np.inf # custo anterior\n curr = cost_function(X, y, hypothesis, thetas) # custo atual\n\n J = len(thetas)\n\n while (abs(curr - prev) > threshold) and (epoch < max_epoch):\n bc = 0 # contador de quantas instâncias passaram pelo batch\n tmp = np.zeros(len(thetas), dtype=np.float64)\n \n for i in range(batch_size):\n X_local = X[bc:(bc + batch_size)]\n fx_local = y[bc:(bc + batch_size)]\n\n for j in range(J):\n tmp[j] = update_theta(X_local, fx_local, hypothesis, thetas, alpha, j)\n\n thetas = tmp\n\n bc += 1\n\n prev = curr\n curr = cost_function(X, y, hypothesis, thetas)\n costs.append(curr)\n epoch += 1\n\n return thetas, costs, np.min(costs)\n\ncolumns = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV']\n\ndf = pd.read_csv('datasets/casas/house.data.csv', sep='\\t', names=columns)\n\n#1. Seta os valores NAN para a média da coluna\ndf = nan_to_average(df)\n\n#2. Normaliza os dados, exceto a classe\ndf = normalize(df, columns)\n\n#3. adiciona x0 ao dataset para viabilizar o algoritmo\ndf = add_x0(df)\n\n#4. Terminado o préprocessamento, divide DS em treino e teste\ntrainset, testset = train_test_split(df, test_size=0.2)\n\n#5. Avaliamos algumas features e sua relação com a classe\nscatter_features(df['CRIM'], df['MEDV'], 'Taxa crimes per capta', 'Valor médio casas', 'red')\nscatter_features(df['AGE'], df['MEDV'], 'Ocupação casas constr. antes 1940', 'Valor médio casas', 'red')\nscatter_features(df['RAD'], df['MEDV'], 'Indice acessib. a rodovias', 'Valor médio casas', 'red')\nscatter_features(df['DIS'], df['MEDV'], 'Dist. 5 centros empregatícios em Boston', 'Valor médio casas', 'red')\nscatter_features(df['RM'], df['MEDV'], 'Número médio de quartos', 'Valor médio casas', 'red')\n\n#6. Aplicamos o Gradiente Descendente no conjunto de treino\nX = np.array(trainset.iloc[:,:-1], dtype=np.float32)\ny = np.array(trainset.iloc[:,-1], dtype=np.float32)\n\nalpha = 0.1 # taxa de aprendizado\nthreshold = 0.001 # diferença aceitável entre custos\nbatch_size = 8 # tamanho do batch\nepoch = 0\nmax_epoch = 10 # máximo número de iterações permitido\n \nthetas = np.ones(X.shape[1])\n\ntheta_final, costs, min_cost = gradient_descent(X, y, thetas, alpha, max_epoch, threshold, batch_size)\n\n#7. Plot dos custos e do mínimo global (para a quantidade de iterações)\nm = np.vstack((np.arange(len(costs)), costs))\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nline, = ax.plot(m[0], m[1], c='r')\npoint, = ax.plot(m[0][np.argmin(m[1])], m[1][np.argmin(m[1])], 'bo')\nax.set_xlabel('Épocas')\nax.set_ylabel('Custo')\n\nax.annotate('mínimo global (para as iterações)', \n xy=(m[0][np.argmin(m[1])], m[1][np.argmin(m[1])]), \n xytext=(m[0][np.argmin(m[1])] + 2, m[1][np.argmin(m[1])]),\n arrowprops=dict(facecolor='black', shrink=0.05),\n )\nax.annotate('Valor custo mínimo: {0}'.format(round(m[1][np.argmin(m[1])]),3),xy=(len(costs)/2,m[1][np.argmax(m[1])]))\n\n#8. Faz as predições sobre o dataset de teste com a função aprendida\npredicted = []\nX_test = np.array(testset.iloc[:,:-1], dtype=np.float32)\ny_test = np.array(testset.iloc[:,-1], dtype=np.float32)\n\nfor p in range(len(X_test)):\n predicted.append(hypothesis(X_test[p], theta_final))\n\n#9. Calcula o score\nscore = mean_squared_error(y_test, predicted)\n\nprint('Score: ', score)\n\n\n" }, { "alpha_fraction": 0.6349999904632568, "alphanum_fraction": 0.6809090971946716, "avg_line_length": 25.202381134033203, "blob_id": "b36bda354cbfc7155bc628730fc93c45224a6603", "content_id": "1092ba3bbd39dde6317d0c8ce97fcf121fde4bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2211, "license_type": "no_license", "max_line_length": 91, "num_lines": 84, "path": "/pandas_basico.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 11 20:35:47 2018\n\n@author: alu2015111446\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ns = pd.Series([1,3,5,np.nan,6,8], dtype = np.float32)\nprint s\n\ndates = pd.date_range('20130101', periods=6)\nprint dates\n\n#Não informando o Index o Pandas vai criar a coluna com 1, 2, 3, 4, ...\ndf = pd.DataFrame(np.random.randn(6,4), columns=list('ABCD'))\nprint df\n\n#Agora com o Index sendo das datas\ndf = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))\nprint df\n\nprint df.head() #primeiros n-1\nprint df.tail(3) #cauda (os últimos)\nprint df.index\nprint df.columns\nprint df.values #O Numpy array interno do DataFrame (sem o index e os nomes de colunas)\n\nprint df.T #transposição do DF\n\nprint df.describe()\n\nprint df.sort_index(axis = 0, ascending=False) #ordena as linhas em ordem descrescente\nprint df.sort_index(axis = 1, ascending=False) #ordena as colunas em ordem descrescente\nprint df.sort_values(by=['B'], ascending=False) #ordena pela coluna B em ordem descrescente\n\nprint df['A'] #somente os valores da coluna A\nprint df[['A','B']] #somente os valores das colunas A e B\nprint df[0:3] #pega da linha 0 a 3 exclusive\n\n#Utiliza os valores do Index e das colunas. Por isso não é exclusive\nprint df.loc[dates[0]]\nprint df.loc[:,['A','B']]\nprint df.loc['20130101':'20130103', ['A','B']]\nprint df.loc['20130101', ['A','B']]\n\n#Utiliza o index (zero-based), por isso é xclusivo\nprint df.iloc[3]\nprint df.iloc[3:5]\n\n#Filtra o DF onde todo o valor da coluna A for maior que zero\nprint df[df.A > 0]\nprint df[df.A > 0]['A']\nprint df[df > 0]\n\ndf2 = df.copy()\nprint df2\ndf2['E'] = ['one', 'two', 'three', 'four', 'five', 'six']\n\nprint df2[df2['E'].isin(['two', 'three'])]\n\nprint df[(df['A'] > 0) & df['D'] > 0][['B','C']]\nprint df[(df['A'] > 0) | df['D'] > 0]\n\n#Estatística\nprint df.mean() #média de todas as colunas\nprint df.mean(0) #média de todas as colunas\nprint df.mean(1) #média de todas as linhas\n\ndf.apply(np.cumsum) #soma os valores de forma cumulativa\n\ns = pd.Series(np.random.randint(0,7, size=10))\nprint s\ns.value_counts\n\nprint df.groupby('A').sum()\n\ns.plot()\n\ndf.to_csv('x.csv')\nx = pd.read_csv('x.csv')" }, { "alpha_fraction": 0.6788285970687866, "alphanum_fraction": 0.7009121179580688, "avg_line_length": 24.728395462036133, "blob_id": "e1a12a00a5153b784ff84f2d459459177d780375", "content_id": "0dac272287db84a6725c0b897fdabb1b73c8b150", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2088, "license_type": "no_license", "max_line_length": 104, "num_lines": 81, "path": "/k_means_implementation.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 20:25:28 2018\n\n@author: alu2015111446\n\nK-means algorithm\n\"\"\"\n\nfrom sklearn.datasets import load_iris\nimport numpy as np\nimport pandas as pd\n\ndata = load_iris()\ndf = pd.DataFrame(data['data'],\n columns=data['feature_names'])\n\nfrom sklearn.cluster import KMeans\ncl = KMeans(n_clusters=3, init='k-means++', n_init=10, max_iter=300)\ncl = cl.fit(df)\n\nlabels = cl.labels_\ncentroids = cl.cluster_centers_\n\nprint centroids\nprint labels\n\n\n#Para testar medidas de similaridade\nfrom sklearn.metrics import silhouette_score, calinski_harabaz_score\n\nfor k in xrange(2,11):\n cl = KMeans(n_clusters=k, init='k-means++', n_init=10, max_iter=300)\n cl = cl.fit(df)\n labels = cl.labels_\n score_k_means = silhouette_score(df, labels)\n score_km_ca = calinski_harabaz_score(df, labels)\n print k, score_k_means, score_km_ca\n \n \n\n#------ CLUSTER HIERÁRQUICO NO SCIPY\n \nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.cluster.hierarchy import fcluster\nfrom matplotlib import pyplot as plt\n\nZ = linkage(df, 'single') #single, complete, ward\nprint Z\n\ndendrogram(Z)\nplt.plot\n\nk=3\nclusters = fcluster(Z, k, criterion='maxclust')\nprint clusters\n\n#Como avaliar os clusters\n#========================\n\n#Normalmente existem 3 tipos de medidas\n\n#Externos: avalia o grau entre a estrutura encontrada e a estrutura conhecida (se já existem as classes)\n# Rand Index, Jaccard, Fowlkes-Mallows\n#Internos: avalia o grau entre a estrutura encontrada e a estrutura dos dados\n# SSE (soma dos erros ao quadrado)\n#Relativos: avalia entre duas ou mais estruturas qual a melhor\n# Silhueta, Davis-Bouldin, ...\n\n#Silhueta - varia entre -1 e 1\n# -1 = cluster ruim\n# 0 e 0.5 = clusters não definidos\n# 1 = cluster bem definidos e compactos\n\n# --- Comparando duas partições com a silhueta ---\nfrom sklearn.metrics import silhouette_score\nscore_k_means = silhouette_score(df, labels)\nprint score_k_means\n\nscore_hierarquico = silhouette_score(df, clusters)\nprint score_hierarquico" }, { "alpha_fraction": 0.5888564586639404, "alphanum_fraction": 0.6045832633972168, "avg_line_length": 26.121952056884766, "blob_id": "8f1d9b5085f1817d1eeedd149325ab384c4591e3", "content_id": "5feafcda95b9db0b28c5648c64a6771e32dca96d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4470, "license_type": "no_license", "max_line_length": 110, "num_lines": 164, "path": "/entregas/entrega_reg_linear_multi.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 1 22:05:48 2018\n\n@author: Everton Thomas e Gustavo Emmel\n\nRegressão multivariada\n\n\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\ndef nan_to_average(X):\n if X.isnull().values.any():\n cols = X.columns.tolist()\n for x in cols:\n col_mean = np.mean(X[x])\n X[np.isnan(X[x])] = col_mean\n \n return X\n \ndef normalize(X, columns):\n columns[:-1]\n\n scaled_features = StandardScaler().fit_transform(X.iloc[:,:-1].values)\n scaled_features_df = pd.DataFrame(scaled_features, index=X.index, columns=columns[:-1])\n scaled_features_df['MEDV'] = X['MEDV']\n X = scaled_features_df\n return X\n\ndef add_x0(X):\n x0 = np.ones(X.shape[0]).reshape(X.shape[0], 1)\n X['x0'] = x0\n #reposiciona a última coluna (x0) para a primeira posição\n cols = X.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n X = X[cols]\n return X\n\ndef scatter_features(x, fx, lblx, lbly, color):\n plt.scatter(x, fx, c=color)\n plt.xlabel('x -> {0}'.format(lblx))\n plt.ylabel('fx -> {0}'.format(lbly))\n plt.show()\n\ndef hypothesis(X, theta):\n return np.dot(theta.T, X)\n\ndef cost_function(X, fx, h, theta):\n soma = 0.\n N = len(X)\n \n for i in range(N):\n soma += (h(X.iloc[i], theta) - fx.iloc[i]) ** 2.\n \n return (1./(2. * float(N))) * soma\n\ndef update_t(X, fx, h, theta, alpha):\n tethas_tmp = [None] * len(thetas)\n \n N = len(X)\n \n soma = 0.\n for j in range(len(theta)):\n for i in range(N):\n #if j == 0:\n # soma += (h(X.iloc[i], theta) - fx.iloc[i]) * 1\n #else:\n soma += (h(X.iloc[i], theta) - fx.iloc[i]) * X.iloc[i,j]\n \n tethas_tmp[j] = theta[j] - ((alpha * (1./float(N))) * soma)\n \n return tethas_tmp\n\n\ncolumns = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV']\n\ndf = pd.read_csv('../datasets/casas/house.data.csv', sep='\\t', names=columns)\n\n#1. Seta os valores NAN para a média da coluna\ndf = nan_to_average(df)\n\n#2. Normaliza os dados, exceto a classe\ndf = normalize(df, columns)\n\n#3. adiciona x0 ao dataset para viabilizar o algoritmo\ndf = add_x0(df)\n\n#4. Terminado o préprocessamento, divide DS em treino e teste\ntrainset, testset = train_test_split(df, test_size=0.2, shuffle=True)\n\n#5. Avaliamos algumas features e sua relação com a classe\nscatter_features(df['CRIM'], df['MEDV'], 'Taxa crimes per capta', 'Valor médio casas', 'red')\nscatter_features(df['AGE'], df['MEDV'], 'Ocupação casas constr. antes 1940', 'Valor médio casas', 'red')\nscatter_features(df['RAD'], df['MEDV'], 'Indice acessib. a rodovias', 'Valor médio casas', 'red')\nscatter_features(df['DIS'], df['MEDV'], 'Dist. 5 centros empregatícios em Boston', 'Valor médio casas', 'red')\nscatter_features(df['RM'], df['MEDV'], 'Número médio de quartos', 'Valor médio casas', 'red')\n\nX = trainset.iloc[:,:-1]\nfx = trainset.iloc[:,-1]\n\nthetas = np.ones(X.columns.shape[0])\nalpha = 0.5\n\nthreshold = 0.01\nbatch_size = 8\nepoch = 0.\nmax_epoch = 10\n\nprev = np.inf\ncurr = cost_function(X, fx, hypothesis, thetas)\n\nthetas_final = []\nepochs_cost = np.zeros(max_epoch)\nepoch_cnt = 0\nwhile (abs(curr - prev) > threshold) and (epoch < max_epoch):\n bc_cnt = 0 #contador de batch\n \n for i in range(batch_size):\n X_local = X.iloc[bc_cnt:(bc_cnt + batch_size)]\n fx_local = fx.iloc[bc_cnt:(bc_cnt + batch_size)]\n\n tmp_thetas = update_t(X_local, fx_local, hypothesis, thetas, alpha)\n \n thetas = np.array(tmp_thetas)\n \n bc_cnt += 1\n \n prev = curr\n curr = cost_function(X_local, fx_local, hypothesis, thetas)\n print('custo na época ', epoch, ': ', curr)\n \n epochs_cost[epoch_cnt] = curr\n \n epoch += 1\n epoch_cnt += 1\n\nprint('>>> thetas: ', thetas_final)\n\n#Aplicando sobre os dados a serem preditos\nX_t = testset.iloc[1, :-1]\nfx_t = testset.iloc[1, -1]\n\n#predict = []\nval = hypothesis(X_t, thetas)\n\nprint('Valor real: ', fx_t, ' / Valor predito: ', val)\n#val = cost_function()\n#for i in range(len(X_t)):\n# predict.append(hypothesis(X_t.iloc[i], thetas))\n\nu = np.arange(len(epochs_cost))\n\nplt.plot(u, epochs_cost, 'r') \nplt.plot(u, epochs_cost, 'b') \nplt.xlabel('Iterations') \nplt.ylabel('Cost') \nplt.title('Error vs. Training Epoch') \n\n\n" }, { "alpha_fraction": 0.625987708568573, "alphanum_fraction": 0.6575943827629089, "avg_line_length": 26, "blob_id": "2a062390264ccc07a5ebd0befb27eb392ccd0c6d", "content_id": "117bb98d0fffe33155a13277c68ef5af721334df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 81, "num_lines": 42, "path": "/regressao_linerar_multi.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 30 21:32:12 2018\n\n@author: ALU2015111446\n\"\"\"\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('datasets/casas/ex1data2.csv', names=['size', 'rooms', 'value'])\n\ntrainset, testset = train_test_split(df, test_size=0.2)\n\nclf = linear_model.LinearRegression(normalize=True)\n\nxs = trainset[['size', 'rooms']]\nfx = trainset[['value']]\n\ntest_xs = trainset[['size', 'rooms']]\ntest_fx = trainset[['value']]\n\nclf.fit(xs, fx)\npredictions = clf.predict(test_xs)\nscore = clf.score(test_xs, test_fx)\nprint('Intercept (theta 0)...: ', clf.intercept_)\nprint('Thetas....: ', clf.coef_)\nprint('Score..: ', score)\n\nx1 = plt.scatter(test_xs[['size']], test_fx, c='red')\nx2 = plt.scatter(test_xs[['size']], predictions, c='blue')\nplt.legend((x1,x2),('Real', 'Predito'))\nplt.xlabel('x')\nplt.ylabel('f(x)')\n\nx1 = plt.scatter(test_xs[['rooms']], test_fx, c='red')\nx2 = plt.scatter(test_xs[['rooms']], predictions, c='blue')\nplt.legend((x1,x2),('Real', 'Predito'))\nplt.xlabel('x')\nplt.ylabel('f(x)')\n\n\n\n\n\n" }, { "alpha_fraction": 0.6784719228744507, "alphanum_fraction": 0.712693989276886, "avg_line_length": 37.09090805053711, "blob_id": "6d03f5dce090a4e256e12a6d7bf8f615874b4801", "content_id": "61d40f040cdcc49dd03340a1ea7c9a984c29b639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2531, "license_type": "no_license", "max_line_length": 108, "num_lines": 66, "path": "/entregas/apriori_Everton_Thomas_Gustavo_Emmel.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 30 14:36:49 2018\n\n@author: Everton Thomas e Gustavo Emmel\n\nSuporte: Fração das transações que contém X e Y\n\nConfiança: Frequência que itens em Y aparecem em transações que contem X\n\nLift: Probabilidade de ocorrência de X e Y independente de um ou outro\n 1 = Indica independêcia\n 1 < Indica correlação positiva\n 1 > Indica correção negativa\n\n\"\"\"\n\nimport pandas as pd\nfrom mlxtend.frequent_patterns import apriori\nfrom mlxtend.frequent_patterns import association_rules\n\nretail = pd.read_csv('../datasets/online_retail/online_retail.csv', sep=';')\n\nretail_fr = retail[retail['Country'] == 'France']\n\ndummies = pd.get_dummies(retail_fr['StockCode'])\n\ncombine = pd.concat([retail_fr['InvoiceNo'], dummies], axis=1)\n\ntransactions = combine.groupby(['InvoiceNo']).sum().reset_index()\n\ntransactions = transactions.drop(['InvoiceNo'], axis=1)\n\n#Alguns itens aparecem com indicador 2, ao invés de 0 e 1.\n#Então normalizo tudo para 1 quando \ntransactions[~transactions.isin([0,1])] = 1\n\n#encontrando regras com no mínimo 5% de suporte\nfrequent_items = apriori(transactions, min_support=0.05, use_colnames=True)\n\n#encontrar regras com lift maior que 1\nrules = association_rules(frequent_items, metric='lift', min_threshold=1)\n\n#10 primeiras regras com maior suporte\nrules.sort_values('support', ascending= False).head(10)\n\n#10 primeiras regras com maior confianca\nrules.sort_values('confidence', ascending= False).head(10)\n\n#10 primeiras regras com maior lift\nrules.sort_values('lift', ascending= False).head(10)\n\n#selecionando regras com lift maior que 2, confianca maior que 0.6 e suporte maior que 0.1\nrules[(rules['lift'] >= 2) & (rules['confidence'] >= 0.6) & (rules['support'] >= 0.1 )]\n\n# pegamos os dados de vendas da Franca agrupando os itens por stock code e chegamos nas conclusoes a seguir:\n\n#selecionando regras com lift maior que 2, confianca maior que 0.6 e suporte maior que 0.1\nprint \"melhores combos com lift maior que 2\"\nprint rules[(rules['lift'] >= 2) & (rules['confidence'] >= 0.6) & (rules['support'] >= 0.1 )]\n# so possuem 2 combos com lift maior que 2\nprint rules[(rules['lift'] >= 1) & (rules['confidence'] >= 0.6) & (rules['support'] >= 0.1 )]\n# quando baixamos o lift para 1 temos 8 combinacaoes\nprint rules[(rules['lift'] >= 0.5) & (rules['confidence'] >= 0.9) & (rules['support'] >= 0.1 )]\n# percebemos que temos 2 combinacoes com alto lift e alta confianca\nprint rules[(rules['lift'] >= 0.9) & (rules['confidence'] >= 0.9) & (rules['support'] >= 0.1 )]" }, { "alpha_fraction": 0.44798657298088074, "alphanum_fraction": 0.5570470094680786, "avg_line_length": 19.586206436157227, "blob_id": "7fbc6d4ff2c475f03579b137fb01c4e9aa7e0784", "content_id": "3099901c58fccb66d72c60aa1f8b02f6f738bf1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 80, "num_lines": 29, "path": "/matplotlib_basico.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 11 21:39:46 2018\n\n@author: alu2015111446\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.plot([1,2,3,4])\nplt.ylabel('Some numbers')\nplt.show()\n\n\nplt.plot(np.array([1,2,3,4]))\nplt.ylabel(u'números')\nplt.plot([1,2,3,4], [1,4,9,16])\nplt.show()\n\nplt.plot(np.array([1,2,3,4]), np.array([1,4,9,16]), 'ro') # c= 'r', marker = 'o'\nplt.axis([0,6,0,20]) #[xmin, xmax, ymin, ymax]\n\n\nt = np.arange(0.,5.,0.2)\nplt.plot(t, t, 'r--',\n t, t**2, 'bo',\n t, t**3, 'g^') # c= 'r', marker = 'o'\nplt.axis([0,6,0,20]) #[xmin, xmax, ymin, ymax]" }, { "alpha_fraction": 0.48778054118156433, "alphanum_fraction": 0.5371571183204651, "avg_line_length": 19.89583396911621, "blob_id": "adc171a9c408bc00fa927cec3952d315edff90f6", "content_id": "6f7e826a5870f3c906a60b8a51fba358e76a3785", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2005, "license_type": "no_license", "max_line_length": 83, "num_lines": 96, "path": "/grad_desc_multi.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 22:56:37 2018\n\n@author: evert\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef hypothesis(X, thetas):\n return np.dot(X, thetas.T)\n\ndef computeCost(X,y,theta):\n tobesummed = np.power(((X @ theta.T)-y),2)\n return np.sum(tobesummed)/(2 * len(X))\n\ndef cost_function(X, y, h, theta):\n N = len(X)\n \n #Soma vetorizada\n soma = np.sum((h(X, theta) - y) ** 2.)\n \n return (1./(2. * float(N))) * soma\n\ndef gradient_descent(X, y, thetas, alpha, max_iter):\n costs = np.zeros(max_iter, dtype=np.float64)\n N = len(X)\n J = len(thetas)\n soma = 0.\n thetas_tmp = thetas\n \n prev_cost = np.inf\n \n for i in range(max_iter):\n for j in range(J):\n for n in range(N):\n soma += (hypothesis(X[n], thetas) - y[n]) * X[n][j]\n \n thetas_tmp[j] = thetas_tmp[j] - (alpha/len(X)) * soma\n \n thetas = thetas_tmp\n #thetas = thetas - (alpha/len(X)) * np.sum((hypothesis(X, thetas) - y) * X)\n cost = cost_function(X, y, hypothesis, thetas)\n \n if cost < prev_cost:\n costs[i] = cost\n prev_cost = cost\n else:\n break\n \n\n return thetas, costs\n\nX = np.array([\n [1,21,3],\n [1,16,3],\n [1,24,3],\n [1,14,2],\n [1,30,4]], dtype=np.float32)\n\ny = np.array([399,\n 329,\n 369,\n 232,\n 539], dtype=np.float32)\n\n\n\nmax_iter = 10\nalpha = 0.0001\nthetas = np.ones(X.shape[1])\n\ncost = cost_function(X, y, hypothesis, thetas)\n\ntheta_final, costs = gradient_descent(X, y, thetas, alpha, max_iter)\n\nplt.plot(np.arange(max_iter), costs, c='r')\n\nto_predict = np.array([\n [1,19,4],\n [1,15,3],\n [1,14,3],\n [1,13,3]], dtype=np.float32)\n\ny_real = np.array([299,\n 314,\n 198,\n 212], dtype=np.float32)\n\npredicted = np.zeros(len(y_real))\n \nfor p in range(len(to_predict)):\n predicted[p] = hypothesis(to_predict[p], theta_final)" }, { "alpha_fraction": 0.6675165891647339, "alphanum_fraction": 0.7261601090431213, "avg_line_length": 25.513513565063477, "blob_id": "763d5172d0ef72dd4d6e54c7ec848a0fdd0d6dce", "content_id": "5d576c2f1697d7d8ec48335967fb540a3e996bc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2002, "license_type": "no_license", "max_line_length": 105, "num_lines": 74, "path": "/transformation.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 16 19:40:09 2018\n\n@author: alu2015111446\n\nAula 4\nTransformação\n\"\"\"\n\n\n#Binarização\nfrom sklearn import preprocessing\nimport numpy as np\nimport pandas as pd\n\n#Divide em dois valores, conforme o threshold\nX = np.array([[1.,0,-1],[1.,1.,-1],[0.,2.,3]])\nprint X\n\nbinarizer = preprocessing.Binarizer(threshold=0.0).fit(X)\nbinarizer.transform(X)\n\n#Cria novas variáveis binarizadas\nX = np.array([[1.,0,'A'],[1.,1.,'A'],[0.,2.,'B']])\nprint X\n\nbinarizer = preprocessing.MultiLabelBinarizer()\nbinarizer.fit_transform(X[:,-1])\n\n#Discretização Não supervisionada (não leva em consideração a classe)\n\n#Discretização em intervalos\n#Regras de associação, por exemplo, precisam separar valores contínuos em intervalos\ntempos = [1.,1.1,1.5,1.7,2.,3.,5.,7.,8.]\n\n#O atributo se transforma em categórico ordinal (com certa ordem)\n#Tipos de discretização: largura, frequencia e clusterização\na = np.array([0,1,2,6,6,9,10,10,10,13,18,20,21,21,25])\nx = [0,1,2,2,6,6,7,8,9,10,10,10,13,18,20,21,21,25]\n\nt, s = np.percentile(a, [33,66])\nprint t, s\n\n#Por largura\n#Divide o maior valor pelo numero de intervalos. Pode ser afetada por outliers, já que usa o máximo valor\n#Por frequencia\n#Conta qtd numeros existentes e divide pelo número de intervalos \n\n\n#Discretização supervisionada (leva em conta a classe)\n#Entropia (objetivo é escolher os intervalos onde será feito o corte, baseado na classe e\n#levando em consideração a minimização da entropia)\n\n#scipy.stats.entropy\n\n\n#Re-escalar: (Normalization, minmax (muito sensivel a outliers), )\na = np.arange(20).reshape(-1,1)\n\nfrom sklearn.preprocessing import MinMaxScaler as minmax\nscaler = minmax()\nc = scaler.fit_transform(a)\nprint c\n\n#Padronização: (standardization, z-score)\n#Os dados são padronizados cfe média e desvio padrão, portanto, tb sensíveis a outliers\n\nd = np.arange(20)\ne = np.array(map(lambda x: (x - d.mean()) / d.std(), d))\nprint e\n\n#transformação logarítmica\n#x = log(x)" }, { "alpha_fraction": 0.5226104855537415, "alphanum_fraction": 0.6058581471443176, "avg_line_length": 16.23008918762207, "blob_id": "685eb020dc1e2f9447309dcc9454fed4af6519fc", "content_id": "0d1114dea2df5b053d122592e71cb5fe4152d979", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1951, "license_type": "no_license", "max_line_length": 80, "num_lines": 113, "path": "/numpy_basico.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 11 19:27:54 2018\n\n@author: alu2015111446\n\"\"\"\n\nimport numpy as np\n\nprint '--- Array ----'\n\na = np.array([1,2,3,'a','b','c'])\nprint a.shape\nprint a\n\nprint '--- Matrix -----'\n\nb = np.array([[1,2,3],['a','b','c']])\nprint b.shape\nprint b\nprint b[1,1]\n\nprint '--- Teste ---'\n\nb = np.zeros((5,5))\nprint 'Zeros: ', b\n\nb = np.ones((5,5))\nprint 'Uns: ', b\n\nb = np.eye((5)) #matriz identidade\nprint 'Identidade: ', b\n\nb = np.full((5,5), 9.) #o ponto é para tratar como float, senão gera com inteiro\nprint 'Preenche com o valor 9: ', b\n\nb = np.random.random((4,4)) #matriz randômica 4 x 4 randomizando entre 0 e 1\nprint b\n\nprint '--- Exercícios ---'\na = np.array([[10,20,30,40],[50,60,70,80],[90,100,110,120]])\nprint a\n\nb = a[:2, 1:3]\nprint b\nb[0, 0] = 77\nprint a #alterou o valor da matriz a tb\n\n#repare na diferença dos objetos\nrow1 = a[1, :]\nrow2 = a[1:2, :]\nprint row1, row1.shape\nprint row2, row2.shape\n\n#agora com reshape\nprint row1.reshape(1,4)\n\na = np.arange(9).reshape(3,3)\nprint a\nprint a*3 #multiplica tudo por 3\n\n#especificando o tipo do array\na = np.array([[10,20],[30,40]], dtype=np.float32)\nprint a\n\nb = np.array([[50,60],[70,80]], dtype=np.float32)\nprint b\n\nprint a + b\nprint np.add(a, b)\n\nprint a - b\nprint np.subtract(a, b)\n\nprint a * b\nprint np.multiply(a, b)\n\nprint a / b\nprint np.divide(a, b)\n\nprint np.sqrt(a, b)\n\n##########################################\n\nx = np.array([[10,20],[30,40]])\ny = np.array([[50,60],[70,80]])\nv = np.array([90,100])\nw = np.array([110,120])\n\nprint x.dot(v)\nprint np.dot(x,v)\n\nprint np.sum(x)\nprint np.sum(x, axis = 0) #Soma as colunas\nprint np.sum(x, axis = 1) #Soma as linhas\n\nprint x.T #Matriz transposta\n\nk = np.arange(9).reshape(3,3)\nprint k\nprint k.T\n\nv = np.array([10,20,30])\nprint v.T\nprint np.matrix(v).T\n\n#Concatenacao\ny = np.array([[1,2,3]])\nz = np.array([[1,2,3]])\nprint np.concatenate((y, z), axis = 0)\n\nprint np.vstack((y,z))\nprint np.hstack((y,z))" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.8125, "avg_line_length": 31, "blob_id": "35c05dada7d6900024acd1db4c5543667f5fe9e0", "content_id": "37792d009d05fa1d5da6178e459b08632473e837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 43, "num_lines": 2, "path": "/README.md", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# MachineLearning_1\nMaterial Pós-Graduação - Machine Learning 1\n" }, { "alpha_fraction": 0.46768060326576233, "alphanum_fraction": 0.4752851724624634, "avg_line_length": 20.875, "blob_id": "e1293eb40cbf3ba5151ce6db7924a91a02143bc9", "content_id": "9e1c75cb693c62abdb89b82f5de39001315067c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 43, "num_lines": 24, "path": "/entregas/entrega_censo.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"data/censo/adult.data\")\n\ncolumns = ['age',\n 'workclass',\n 'fnlwgt',\n 'education',\n 'education-num',\n 'marital-status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'capital-gain',\n 'capital-loss',\n 'hours-per-week',\n 'native-country',\n '>50K, <=50K']\n\ndf = pd.DataFrame(data, columns=columns)\ndf.shape()\nx = df.dropna()\n\n" }, { "alpha_fraction": 0.6707289218902588, "alphanum_fraction": 0.6849658489227295, "avg_line_length": 32.01128005981445, "blob_id": "4490e7cdd5f66db1e18582510dbc671e0fb9246c", "content_id": "42a8e9ae22dd8a9fdb62c0d027702f3c06ba5cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8898, "license_type": "no_license", "max_line_length": 113, "num_lines": 266, "path": "/entregas/Clustering_Everton_Thomas_Gustavo_Emmel.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 01:25:14 2018\n\n@authores: Everton Thomas e Gustavo Emmel\n\nEntrega 1\nClustering Censo 2005\n\nPasso-a-passo do pré-processamento:\n 1. retirada e dados inexistentes ou incompletos do dataset (?)\n 2. avaliação de features a procura de outliers\n 3. remoção de outliers da feature 'age'\n 4. binarização das features categóricas\n 5. investigação da correlação entre as features 'education' e 'education-enum'\n 6. remoção da feature 'education' por existir correlação com 'education-enum',\n visando diminuir a quantidade de features e o consequente estouro de memória\n 7. remoção das features originais que foram binarizadas no passo anterior\n 8. análise da utilização de k-means e algoritmos hierárquicos\n 9. normalização das features através de z-score para tentar melhorar os resultados\n 10. aplicação de k-means e algoritmos hierárquicos para validar nova estrutura e testar\n se houve melhora no processo\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ncolumns = ['age',\n 'workclass',\n 'fnlwgt',\n 'education',\n 'education-num',\n 'marital-status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'capital-gain',\n 'capital-loss',\n 'hours-per-week',\n 'native-country',\n '>50K, <=50K']\n\n#Carrega o dataframe\ndata = pd.read_csv(\"data/censo/adult.data\", names=columns)\n\n#limpando dados desconhecidos do dataset\ndata = data[data['workclass'] != ' ?'] \ndata = data[data['education'] != ' ?'] \ndata = data[data['marital-status'] != ' ?'] \ndata = data[data['occupation'] != ' ?'] \ndata = data[data['relationship'] != ' ?'] \ndata = data[data['race'] != ' ?'] \ndata = data[data['sex'] != ' ?'] \ndata = data[data['native-country'] != ' ?'] \n\n#avaliando feature AGE\nplt.boxplot(data['age'])\nplt.show()\nplt.hist(data['age'])\nplt.show()\nnp.percentile(data['age'], q=range(0,100,10))\n\n#retirada dos outliers da feature AGE\ndata = data.loc[data['age'] < 75]\n\n#avaliando feature AGE após a retirada dos outliers\nplt.boxplot(data['age'])\nplt.show()\nplt.hist(data['age'])\nplt.show()\nnp.percentile(data['age'], q=range(0,100,10))\n\n#discretização da feature workclass\ndf_dsc = pd.get_dummies(data['workclass'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature marital-status\ndf_dsc = pd.get_dummies(data['marital-status'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature occupation\ndf_dsc = pd.get_dummies(data['occupation'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature relationship\ndf_dsc = pd.get_dummies(data['relationship'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature race\ndf_dsc = pd.get_dummies(data['race'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature sex\ndf_dsc = pd.get_dummies(data['sex'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature native-country\ndf_dsc = pd.get_dummies(data['native-country'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#discretização da feature >50K, <=50K\ndf_dsc = pd.get_dummies(data['>50K, <=50K'])\n\ndata = pd.concat([data, df_dsc], axis=1)\n\n#Verifica a correlação entre as features 'education' e 'education-enum'\ndata['education'] = data['education'].astype('category')\ndata['education'] = data['education'].cat.codes\n\nplt.scatter(data['education-num'].values, data['education'].values)\nplt.xlabel('education')\nplt.ylabel('education-num')\nplt.show()\n\npc = np.corrcoef(data['education'].values, data['education-num'].values)\nprint(pc)\n\n#No passo anterior, apesar do coef. de correlação = 0.34309, \n#pelo gráfico é possível verificar que há somente 1 valor de 'education' para cada 'education-enum'\n#então optamos por excluir a feature categorica 'education'\ndata = data.drop(['education'], axis=1)\n\n#remove as colunas originais que foram discretizadas\ndata = data.drop(columns = ['workclass', \n 'marital-status', \n 'occupation', \n 'relationship', \n 'race', \n 'sex', \n 'native-country',\n '>50K, <=50K'])\n\n\n\n#limita o dataset para evitar erro de memória\ndf = data.sample(n = 5000)\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score, calinski_harabaz_score\n\nmax_k_silhoute = 0\nmax_k_calinski = 0\nmax_score_silhoute = 0\nmax_score_calinski = 0\n\n#K-Means\nfor k in range(2,20):\n cl = KMeans(n_clusters=k, init='k-means++', n_init=10, max_iter=300)\n cl = cl.fit(df)\n labels = cl.labels_\n score_km_silhoute = silhouette_score(df, labels)\n score_km_ca = calinski_harabaz_score(df, labels)\n \n if (score_km_silhoute > max_score_silhoute):\n max_score_silhoute = score_km_silhoute\n max_k_silhoute = k\n \n if (score_km_ca > max_score_calinski):\n max_score_calinski = score_km_ca\n max_k_calinski = k\n \n \nprint('Max. score silhoute: Clusters {} / Score {}'.format(max_k_silhoute, max_score_silhoute))\nprint('Max. score calinski: Clusters {} / Score {}'.format(max_k_calinski, max_score_calinski))\n\n#Análise:\n#Para o cenário atual com k-means, o método da silhueta sugeriu 2 clusters, enquanto o método calinski sugeriu\n#19 clusters. Além disso, o score da silhueta (~0.577...) demonstra que os clusters não estão muito bem definidos\n#Acreditamos que 19 clusters, conforme sugerido pelo método calinski é um número excessivo de clusters para \n#segmentar os dados.\n\n#Cluster hierárquico\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.cluster.hierarchy import fcluster\nfrom matplotlib import pyplot as plt\n\nZ = linkage(df, 'single') #single, complete, ward\n\ndendrogram(Z)\n\nZ = linkage(df, 'ward') #single, complete, ward\n\ndendrogram(Z)\n\n\nk=20\nclusters = fcluster(Z, k, criterion='maxclust')\n\n#Análise:\n#Para os algoritmos de clusterização hierárquicos, acreditamos que a utilização de WARD se mostrou mais\n#eficiente que a 'Single' ou 'Complete', pois parece ter definido melhor os clusters.\n#É possível, através de WARD selecionar entre 2 e 4 clusters bem distintos.\n\n##################################\n#Em uma tentativa de melhorar os resultados, tentamos normalizar as features:\n# 'age', \n# 'fnlwgt', \n# 'capital-gain'\n# 'capital-loss'\n# 'hours-per-week'\n#através do método z-score\n\ndf['age'] = (df.age - df.age.mean())/df.age.std(ddof=0)\ndf['fnlwgt'] = (df.fnlwgt - df.fnlwgt.mean())/df.fnlwgt.std(ddof=0)\ndf['capital-gain'] = (df['capital-gain'] - df['capital-gain'].mean())/df['capital-gain'].std(ddof=0)\ndf['capital-loss'] = (df['capital-loss'] - df['capital-loss'].mean())/df['capital-loss'].std(ddof=0)\ndf['hours-per-week'] = (df['hours-per-week'] - df['hours-per-week'].mean())/df['hours-per-week'].std(ddof=0)\n\n#E aplicamos novamente os algortimos de clusterização\n\nmax_k_silhoute = 0\nmax_k_calinski = 0\nmax_score_silhoute = 0\nmax_score_calinski = 0\n\n#K-Means\nfor k in range(2,20):\n cl = KMeans(n_clusters=k, init='k-means++', n_init=10, max_iter=300)\n cl = cl.fit(df)\n labels = cl.labels_\n score_km_silhoute = silhouette_score(df, labels)\n score_km_ca = calinski_harabaz_score(df, labels)\n \n if (score_km_silhoute > max_score_silhoute):\n max_score_silhoute = score_km_silhoute\n max_k_silhoute = k\n \n if (score_km_ca > max_score_calinski):\n max_score_calinski = score_km_ca\n max_k_calinski = k\n \nprint('Resultado K-Means após normalização de variáveis:') \nprint('Max. score silhoute: Clusters {} / Score {}'.format(max_k_silhoute, max_score_silhoute))\nprint('Max. score calinski: Clusters {} / Score {}'.format(max_k_calinski, max_score_calinski))\n\n#Análise:\n#Em princípio, notamos que talvez as amplitudes e/ou diferentes dimensões entre as features poderia estar\n#enviezando o resultado obtido pela medida de avaliação Calinski. Isto porque após normalizar as features,\n#ambas as medidas de avaliação (silhueta e calinski) sugeriram a adoção de 2 clusters. Entretanto, os scores\n#alcançados reduziram substancialmente, indicando 2 cluster bem indefinidos.\n\nZ = linkage(df, 'single') #single, complete, ward\n\ndendrogram(Z)\n\nZ = linkage(df, 'ward') #single, complete, ward\n\ndendrogram(Z)\n\n#Análise:\n#Já para a clusterização hierárquica, a normalização parece ter auxiliado quando utilizado o algoritmo WARD.\n#Já para 'Single', o uso da normalização demonstrou clusters muito mais indefinidos do que anteriormente.\n\n#Conclusão:\n#No cenário em questão, acreditamos que a melhor opção para clusterizar estes dados seja a adoção do algoritmo\n#de clusterização hierárquiva WARD com normalização dos dados.\n#Utilzando esse método, e como demonstrado do dendrograma, sugerimos 2 ou 3 clusters." }, { "alpha_fraction": 0.476644903421402, "alphanum_fraction": 0.5228528380393982, "avg_line_length": 22.987951278686523, "blob_id": "8adc8fa4122601a50b1cda78559f01f1a53ac946", "content_id": "0aa04faef1710dc8336daea2272238a49419e0f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 73, "num_lines": 83, "path": "/python_basico.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\n\ndef is_impar(x):\n if x % 2 == 0:\n print x, ' é par'\n else:\n print x, ' é impar'\n \ndef maiorNumero():\n lst = [ -10, 10, 0, 1, 1, 7, 11, 5, 4, 3 ]\n maior = lst[0]\n for x in lst:\n if x > maior:\n maior = x\n print 'Maior número da lista: ', maior\n\ndef somaNumerosLista():\n lst = [ -10, 10, 0, 1, 1, 7, 11, 5, 4, 3 ]\n soma = 0\n for x in lst:\n soma += x\n print 'Soma números da lista: ', soma\n\ndef fibonacci(max):\n vet = [0,1]\n for x in range(max):\n vet.append(vet[-1] + vet[-2])\n print 'Sequência Fibonacci: ', vet\n\ndef _99_bottles_of_beer():\n sequencia = ''\n for x in range(99, 0, -1):\n if x == 1:\n print x,' bootle of beer on the wall', x, 'bottle of beer.'\n sequencia = 'no more beer on the wall.'\n else:\n print x,' bootles of beer on the wall', x, 'bottles of beer.'\n sequencia = str(x - 1) +' bootles of beer on the wall'\n print \"Take one down, pass it around,\", sequencia\n\ndef np_maior_numero_array():\n lst = [ -10, 10, 0, 1, 1, 7, 11, 5, 4, 3 ]\n x = np.max(lst)\n print 'Maior número da lista usando NumPy: ', x\n\ndef np_soma_array():\n lst = [ -10, 10, 0, 1, 1, 7, 11, 5, 4, 3 ]\n x = np.sum(lst)\n print 'Soma números da lista usando NumPy: ', x\n \ndef calc_sd(arr):\n soma = np.sum(arr)\n cnt = np.size(arr)\n media = soma / float(cnt)\n\n somatorio = 0;\n \n for x in arr:\n somatorio += (x - media) ** 2\n \n print 'Variância: ', 1/float(cnt)*somatorio\n \n resultado = np.sqrt(1/float(cnt)*somatorio)\n \n print 'Desvio padrão: ', resultado\n\nis_impar(10)\nis_impar(5)\nprint '--------'\nmaiorNumero()\nprint '--------'\nsomaNumerosLista()\nprint '--------'\nfibonacci(15)\nprint '--------'\n_99_bottles_of_beer()\nprint '--------'\nnp_maior_numero_array()\nprint '--------'\nnp_soma_array()\nprint '--------'\ncalc_sd([ -10, 10, 0, 1, 1, 7, 11, 5, 4, 3 ])\n" }, { "alpha_fraction": 0.6333914399147034, "alphanum_fraction": 0.6931124925613403, "avg_line_length": 35.98387145996094, "blob_id": "053c478dfac67d5d052129c7d175fbf92c0b187f", "content_id": "736311a0aeaa4ee02fa5d356a483b685ffb0a4de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2319, "license_type": "no_license", "max_line_length": 122, "num_lines": 62, "path": "/entregas/entrega_knn.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 30 04:26:09 2018\n\n@author: Everton Thomas e Gustavo Emmel\n\n#Análise dataset:\n Os atributos são as colunas binarizadas quanto a característica do animal (index):\n - se possui penas, pelos, dentes, etc..\n A classe é a última coluna do dataset, e indica uma categoria a qual o respectivo animal pertence\n \n#Análise variação conjunto de teste:\n É possível observar uma relação entre a escolha do tamanho do conjunto de teste e a acurácia obtida.\n Quanto maior o conjunto de teste consequentemente o conjunto de treino será menor. Nesses casos há uma perda\n considerável na acurácia do modelo. Isto exlica-se principalmente devido as poucas instâncias para treinar o algoritmo\n antes de aplicá-lo aos casos de teste.\n *Para melhor avaliar esses cenários retiramos o SHUFFLE do método train_test_split\n\n#Variação de K = 1 a 5\n Verificamos que, no respectivo conjunto de dados, K=1 e K=2 obteram a melhor acurácia (90.47619047619048)\n Conforme K aumenta, identificamos um decréscimo deste valor:\n K value: 1 | Accuracy: 90.47619047619048\n K value: 2 | Accuracy: 90.47619047619048\n K value: 3 | Accuracy: 85.71428571428571\n K value: 4 | Accuracy: 85.71428571428571\n K value: 5 | Accuracy: 76.19047619047619\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef getAccuracy(testset, predictions):\n correct = 0\n for id_test, test in testset.iterrows():\n p = predictions.loc[id_test][0]\n if test[-1] == p:\n correct += 1\n return (correct / float(len(testset))) * 100.0\n\n\n# prepare data\nzoo = pd.read_csv('../datasets/zoo/zoo.csv', nrows=None, header=0)\n\nzoo = zoo.drop(columns=['animal_name'])\n\ntrainset, testset, = train_test_split(zoo, test_size=0.2, shuffle=False)\n\n# print trainset\n# print testset\n\n\n#k = 1\n\nfor k in range(1, 6):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(trainset.iloc[:, 0:-1], trainset.iloc[:, -1])\n predicted_knn = knn.predict(testset.iloc[:, 0:-1])\n df_predict = pd.DataFrame(predicted_knn, index=testset.index.values)\n accuracy = getAccuracy(testset, df_predict)\n \n print(\"K value: \", k, \" | Accuracy: \", accuracy)\n\n" }, { "alpha_fraction": 0.5279456377029419, "alphanum_fraction": 0.5664652585983276, "avg_line_length": 21.632478713989258, "blob_id": "bfcaf4130103844e3b2bfd96a6231abd638d39be", "content_id": "f678072563a123b0d60b2760ce432a280711b95e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2657, "license_type": "no_license", "max_line_length": 87, "num_lines": 117, "path": "/regressao_linerar.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 30 20:01:32 2018\n\n@author: Exercício Regressão Linerar\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\ndef hypothesis(x, t0, t1):\n return t0 + t1 * x;\n\ndef cost_function(X, fx, h, t0, t1):\n soma = 0.\n N = len(X)\n \n for i in range(N):\n soma += (h(X[i], t0, t1) - fx[i]) ** 2.\n \n return (1./(2. * float(N))) * soma\n\ndef update_t0(X, fx, h, t0, t1, alpha):\n \"\"\"\n Atualiza t0 com base nos N valores passados para esta função.\n \"\"\"\n \n N = len(X)\n soma = 0.\n \n for i in range(N):\n soma += (h(X[i], t0, t1) - fx[i])\n \n return t0 - ((alpha * (1./float(N))) * soma)\n\n\ndef update_t1(X, fx, h, t0, t1, alpha):\n \"\"\"\n Atualiza t1 com base nos N valores passados para esta função.\n \"\"\"\n N = len(X)\n \n soma = 0.\n for i in range(N):\n soma += (h(X[i], t0, t1) - fx[i]) * X[i]\n \n return t1 - ((alpha * (1./float(N))) * soma)\n\ndf = pd.read_csv('datasets/regressao_univariada/ex1data1.csv')\n\ndf = StandardScaler().fit_transform(df)\n\ntrainset, testset = train_test_split(df, test_size=0.5)\n\nX = trainset[:,0]\nfx = trainset[:,1]\n\n#plt.plot(X, [hypothesis(x, t0, t1) for x in X], c='blue')\nplt.scatter(X, fx, c='red')\nplt.xlabel('x')\nplt.ylabel('f(x)')\n#plt.title(u'Predições ' + r'para $\\theta_0=$' + str(t0) + r' e $\\theta_1=$' + str(t1))\nplt.show()\n\nt0 = 0.1\nt1 = 0.5\nalpha = 0.1\n\nthreshold = 0.001\nbatch_size = 2\nepoch = 0.\nmax_epoch = 10\n\nprev = np.inf\ncurr = cost_function(X, fx, hypothesis, t0, t1)\n\nwhile (abs(curr - prev) > threshold) and (epoch < max_epoch):\n bc_cnt = 0 #contador de batch\n \n for i in range(batch_size):\n X_local = X[bc_cnt:(bc_cnt + batch_size)]\n fx_local = fx[bc_cnt:(bc_cnt + batch_size)]\n\n temp0 = update_t0(X_local, fx_local, hypothesis, t0, t1, alpha)\n temp1 = update_t1(X_local, fx_local, hypothesis, t0, t1, alpha)\n \n t0 = temp0\n t1 = temp1\n \n bc_cnt += 1\n \n prev = curr\n curr = cost_function(X, fx, hypothesis, t0, t1)\n print('custo na época ', epoch, ': ', curr)\n epoch += 1\n\nprint('t0: ', t0)\nprint('t1: ', t1)\n\n#Aplicando sobre os dados a serem preditos\nX_t = testset[:, 0]\nfx_t = testset[:, 1]\n\npredict = []\n\nfor i in range(len(X_t)):\n predict.append(hypothesis(X_t[i], t0, t1))\n\nplt.scatter(X_t, fx_t, c='red')\nplt.plot(X_t, predict, c='blue')\nplt.xlabel('x')\nplt.ylabel('f(x)')\n#plt.title(u'Predições ' + r'para $\\theta_0=$' + str(t0) + r' e $\\theta_1=$' + str(t1))\nplt.show()\n" }, { "alpha_fraction": 0.6235214471817017, "alphanum_fraction": 0.6421087980270386, "avg_line_length": 25.639638900756836, "blob_id": "6be4534d0caee2faa589d1816a74d2f72d5b59ee", "content_id": "77aeb6357bc9328c312ed32bc06f39781927764d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2959, "license_type": "no_license", "max_line_length": 102, "num_lines": 111, "path": "/entregas/entrega_dt.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Mon Jul 30 14:36:49 2018\n@author: Gustavo Emmel e Everton Thomas\n\n\"\"\"\n\n\nimport random\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom IPython.display import display\n\nrandom.seed(0)\nnp.random.seed(0) # garante que o conjunto de dados seja sempre particionado da mesma forma\n\ncolumns = ['age',\n 'workclass',\n 'fnlwgt',\n 'education',\n 'education-num',\n 'marital-status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'capital-gain',\n 'capital-loss',\n 'hours-per-week',\n 'native-country',\n 'salary']\n\ndf = pd.read_csv(\n filepath_or_buffer='adult.csv',\n dtype={\n 'age': np.float32,\n 'workclass': 'category',\n 'fnlwgt': np.float32,\n 'education': 'category',\n 'education-num': np.float32,\n 'marital-status': 'category',\n 'occupation': 'category',\n 'relationship': 'category',\n 'race': 'category',\n 'sex': 'category',\n 'capital-gain': np.float32,\n 'capital-loss': np.float32,\n 'hours-per-week': np.float32,\n 'native-country': 'category',\n 'salary': 'category',\n },\n na_values='?',\n skipinitialspace=True,\n names=columns\n\n)\n\n# display(df)\n\nlb_make = LabelEncoder()\ndf[\"native-country\"] = lb_make.fit_transform(df[\"native-country\"])\ndf[\"sex\"] = lb_make.fit_transform(df[\"sex\"])\ndf[\"race\"] = lb_make.fit_transform(df[\"race\"])\ndf[\"marital-status\"] = lb_make.fit_transform(df[\"marital-status\"])\ndf[\"education\"] = lb_make.fit_transform(df[\"education\"])\ndf[\"occupation\"] = lb_make.fit_transform(df[\"occupation\"])\ndf[\"relationship\"] = lb_make.fit_transform(df[\"relationship\"])\ndf[\"workclass\"] = lb_make.fit_transform(df[\"workclass\"])\n\nfact, class_labels = pd.factorize(df['salary'].astype(np.object))\n\ndf['salary'] = fact\ncolumns = df.columns.tolist()\ncolumns.pop(columns.index('salary'))\ncolumns.append('salary')\ndf = df.reindex(columns=columns)\ndisplay(df)\n\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nX, y = df[df.columns[:-1]], df[df.columns[-1]]\n\n# utiliza 25% do dataset para teste\nX_train, X_test, Y_train, Y_test = train_test_split(X, y, train_size=0.75, test_size=0.25, stratify=y)\n\n\ndt = tree.DecisionTreeClassifier(max_depth=3)\ndt.fit(X_train, Y_train)\n\npredictions = dt.predict(X_test)\n\nprint('accuracy score:', accuracy_score(Y_test, predictions))\n\n# accuracy score: 0.8339270359906645\n\nimport graphviz\nimport pydotplus\nfrom IPython.display import Image\n\ndot_data = tree.export_graphviz(\n dt, out_file=None,\n feature_names=df.columns[:-1], # ignora classe\n class_names=class_labels,\n filled=True, rounded=True,\n special_characters=True\n)\ngraph = graphviz.Source(dot_data)\ngraph = pydotplus.graphviz.graph_from_dot_data(dot_data)\nImage(graph.create_png())\n\n\n" }, { "alpha_fraction": 0.5755813717842102, "alphanum_fraction": 0.6007751822471619, "avg_line_length": 22.477272033691406, "blob_id": "af6a4a7d8bdcd2e091690af2fc81f7f639e556ed", "content_id": "903b163dd2655e8893fc716853de22d4ec3f6755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 71, "num_lines": 44, "path": "/tests/dt_entropy_inf_gain/df_entropy_inf_gain.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 6 16:01:05 2018\n\n@author: Automatize\n\nCálculo de entropia e Information Gain\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndef calc_entropy(df, feature, feature_class, value = None):\n\n df_tmp = df\n \n if value != None:\n df_tmp = df[df[feature] == value]\n \n cnt_total = len(df_tmp)\n \n cnt = len(df_tmp)/len(df)\n \n cnt_true = len(df_tmp[df_tmp[feature_class] == 'Yes'])\n cnt_false = len(df_tmp[df_tmp[feature_class] == 'No'])\n\n p_true = cnt_true / cnt_total\n p_false = cnt_false / cnt_total\n\n entropy = -p_true*np.log2(p_true) - (p_false*np.log2(p_false))\n\n return entropy, cnt, p_true, p_false \n\ndf = pd.read_csv('play_tenis.csv')\n\n#entropia = -p0.log2(p0) - p1.log2(p1)\n\n\nentropy, cnt,P0, P1 = calc_entropy(df, 'play', 'play')\n\nentropy_w, cnt_w, pw0, pw1 = calc_entropy(df, 'wind', 'play', 'Weak')\nentropy_s, cnt_s, ps0, ps1 = calc_entropy(df, 'wind', 'play', 'Strong')\n\nGain_Wind = entropy - (cnt_w * entropy_w) - (cnt_s * entropy_s)" }, { "alpha_fraction": 0.5526191592216492, "alphanum_fraction": 0.5693723559379578, "avg_line_length": 24.672727584838867, "blob_id": "c5836198d9940da691cc41ff3db6fef20399331a", "content_id": "10b686ed42ed1953d9a5cc116ad91191d436a55d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4248, "license_type": "no_license", "max_line_length": 112, "num_lines": 165, "path": "/entregas/teste_simplificado_gradiente_descendente.py", "repo_name": "ThomasEA/MachineLearning_1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 1 22:05:48 2018\n\n@author: Everton Thomas e Gustavo Emmel\n\nRegressão multivariada\n\n\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\ndef nan_to_average(X):\n if X.isnull().values.any():\n cols = X.columns.tolist()\n for x in cols:\n col_mean = np.mean(X[x])\n X[np.isnan(X[x])] = col_mean\n \n return X\n \ndef normalize(X, columns):\n columns[:-1]\n\n scaled_features = StandardScaler().fit_transform(X.iloc[:,:-1].values)\n scaled_features_df = pd.DataFrame(scaled_features, index=X.index, columns=columns[:-1])\n scaled_features_df['MEDV'] = X['MEDV']\n X = scaled_features_df\n return X\n\ndef add_x0(X):\n x0 = np.ones(X.shape[0]).reshape(X.shape[0], 1)\n X['x0'] = x0\n #reposiciona a última coluna (x0) para a primeira posição\n cols = X.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n X = X[cols]\n return X\n\ndef scatter_features(x, fx, lblx, lbly, color):\n plt.scatter(x, fx, c=color)\n plt.xlabel('x -> {0}'.format(lblx))\n plt.ylabel('fx -> {0}'.format(lbly))\n plt.show()\n\ndef hypothesis(X, theta):\n return np.dot(theta.T, X)\n\ndef cost_function(X, fx, h, theta):\n soma = 0.\n N = len(X)\n \n for i in range(N):\n soma += (h(X.iloc[i], theta) - fx.iloc[i]) ** 2.\n \n return (1./(2. * float(N))) * soma\n\ndef update_t(X, fx, h, theta, alpha):\n tethas_tmp = [None] * len(thetas)\n \n N = len(X)\n \n soma = 0.\n for j in range(len(theta)):\n for i in range(N):\n #if j == 0:\n # soma += (h(X.iloc[i], theta) - fx.iloc[i]) * 1\n #else:\n soma += (h(X.iloc[i], theta) - fx.iloc[i]) * X.iloc[i,j]\n \n if (j == 0):\n tethas_tmp[j] = 1\n else:\n tethas_tmp[j] = theta[j] - ((alpha * (1./float(N))) * soma)\n \n return tethas_tmp\n\ndef gradient_descent(X, fx, h, theta, alpha):\n tethas_tmp = [None] * len(thetas)\n \n N = len(X)\n \n soma = 0.\n \n #for i in range(N):\n #if j == 0:\n # soma += (h(X.iloc[i], theta) - fx.iloc[i]) * 1\n #else:\n #soma += (h(X.iloc[i], theta) - fx.iloc[i]) * X.iloc[i,j]\n \n for j in range(len(theta)):\n tethas_tmp[j] = theta[j] - ((alpha * (1./float(N))) * np.sum(X.iloc[:,j] * (X @ theta.T - fx), axis=0))\n \n return tethas_tmp\n\n\ndf = pd.read_csv('../datasets/casas/ex1data2.csv', names=['size', 'rooms', 'value'])\n\n#3. adiciona x0 ao dataset para viabilizar o algoritmo\ndf = add_x0(df)\n\n#4. Terminado o préprocessamento, divide DS em treino e teste\ntrainset, testset = train_test_split(df, test_size=0.5, shuffle=False)\n\n#5. Avaliamos algumas features e sua relação com a classe\nscatter_features(df['size'], df['value'], 'Tamanho', 'Valor casas', 'red')\nscatter_features(df['rooms'], df['value'], 'Nº de quartos', 'Valor médio casas', 'red')\n\nX = trainset.iloc[:,:-1]\nfx = trainset.iloc[:,-1]\n\nthetas = np.ones(X.columns.shape[0])\nalpha = 0.5\n\nthreshold = 0.01\nbatch_size = 5\nepoch = 0.\nmax_epoch = 10\n\nprev = np.inf\ncurr = cost_function(X, fx, hypothesis, thetas)\n\nthetas_final = []\n\nwhile (abs(curr - prev) > threshold) and (epoch < max_epoch):\n bc_cnt = 0 #contador de batch\n \n for i in range(batch_size):\n X_local = X.iloc[bc_cnt:(bc_cnt + batch_size)]\n fx_local = fx.iloc[bc_cnt:(bc_cnt + batch_size)]\n\n tmp_thetas = gradient_descent(X_local, fx_local, hypothesis, thetas, alpha)\n \n thetas = np.array(tmp_thetas)\n \n bc_cnt += 1\n \n prev = curr\n curr = cost_function(X_local, fx_local, hypothesis, thetas)\n print('custo na época ', epoch, ': ', curr)\n \n if (len(thetas_final) == 0) or (curr < prev):\n thetas_final = thetas\n \n epoch += 1\n\nprint('>>> thetas: ', thetas_final)\n\n#Aplicando sobre os dados a serem preditos\nX_t = testset.iloc[1, :-1]\nfx_t = testset.iloc[1, -1]\n\n#predict = []\nval = hypothesis(X_t, thetas)\n\nprint('Valor real: ', fx_t, ' / Valor predito: ', val)\n#val = cost_function()\n#for i in range(len(X_t)):\n# predict.append(hypothesis(X_t.iloc[i], thetas))\n\n\n" } ]
23
prjemian/ipython_otz
https://github.com/prjemian/ipython_otz
13a910cd268f368bb72839635972115bb77da8c7
999d62c40e8b0b6feb410c0730054eb94ff5218f
f0ff34efba6beef8b6b19c2841edf1b7094b92a5
refs/heads/master
2018-07-15T05:08:31.676941
2018-06-17T13:52:45
2018-06-17T13:52:45
110,611,430
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5969581604003906, "alphanum_fraction": 0.6375158429145813, "avg_line_length": 42.83333206176758, "blob_id": "0afa554425b8984e0574899e1289bb644d93f31c", "content_id": "dd232ca319f735b1c4a858d103fd3306bbb38e0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 789, "license_type": "no_license", "max_line_length": 82, "num_lines": 18, "path": "/profile_bluesky/startup/11-motors.py", "repo_name": "prjemian/ipython_otz", "src_encoding": "UTF-8", "text": "print(__file__)\n\n\"\"\"motors, stages, positioners, ...\"\"\"\n\n# m1 = MyEpicsMotorWithDial('gov:m1', name='m1')\n\nm1 = TunableEpicsMotor(\"gov:m1\", name=\"m1\", labels=(\"motor\", \"tunable\"))\nm2 = EpicsMotor('gov:m2', name='m2', labels=(\"motor\", \"general\"))\nm3 = EpicsMotor('gov:m3', name='m3', labels=(\"motor\", \"general\"))\nm4 = EpicsMotor('gov:m4', name='m4', labels=(\"motor\", \"general\", \"demo\"))\nm5 = EpicsMotor('gov:m5', name='m5', labels=(\"motor\", \"demo\"))\nm6 = EpicsMotor('gov:m6', name='m6', labels=(\"motor\", \"utility\"))\nm7 = EpicsMotor('gov:m7', name='m7', labels=(\"motor\", \"utility\"))\nm8 = EpicsMotor('gov:m8', name='m8', labels=(\"motor\", \"utility\"))\n\nshutter = EpicsMotorShutter(\"gov:m9\", name=\"shutter\", labels=(\"shutter\", \"motor\"))\nshutter.closed_position = 0.0\nshutter.open_position = 3.0\n" }, { "alpha_fraction": 0.7036713361740112, "alphanum_fraction": 0.7220279574394226, "avg_line_length": 31.685714721679688, "blob_id": "413d62ccd6adeecbd00d461cbe9da77423ffbf0e", "content_id": "f300535278515f224201a0d24b74d776aba7ab11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1144, "license_type": "no_license", "max_line_length": 83, "num_lines": 35, "path": "/profile_bluesky/startup/10-devices.py", "repo_name": "prjemian/ipython_otz", "src_encoding": "UTF-8", "text": "print(__file__)\n\n\"\"\"Set up default complex devices\"\"\"\n\nimport time\nfrom ophyd import Component, Device, DeviceStatus\nfrom ophyd import EpicsMotor, EpicsScaler\nfrom ophyd.scaler import ScalerCH\nfrom ophyd import EpicsSignal, EpicsSignalRO, EpicsSignalWithRBV\nfrom ophyd import PVPositioner, PVPositionerPC\nfrom ophyd import AreaDetector, PcoDetectorCam\nfrom ophyd import SingleTrigger, ImagePlugin, HDF5Plugin\nfrom ophyd.areadetector.filestore_mixins import FileStoreHDF5IterativeWrite\nfrom APS_BlueSky_tools.devices import *\n#from APS_BlueSky_tools.synApps_ophyd import *\nfrom APS_BlueSky_tools import *\nfrom APS_BlueSky_tools.plans import *\n\n\nclass TunableEpicsMotor(EpicsMotor, AxisTunerMixin):\n\t\"\"\"\n\tExample::\n\t\n def a2r_pretune_hook():\n # set the counting time for *this* tune\n yield from bps.abs_set(scaler.preset_time, 0.2)\n \n a2r = TunableEpicsMotor(\"xxx:m1\", name=\"a2r\")\n a2r.tuner = TuneAxis([scaler], a2r, signal_name=scaler.channels.chan2.name)\n a2r.tuner.width = 0.02\n a2r.tuner.num = 21\n a2r.pre_tune_method = a2r_pretune_hook\n RE(a2r.tune())\n\n\t\"\"\"\n" }, { "alpha_fraction": 0.6311745047569275, "alphanum_fraction": 0.6575192213058472, "avg_line_length": 28.387096405029297, "blob_id": "7b9bdce38ffe3a46245fbf0b72d0153fd98c481f", "content_id": "8ea706938315cda2c9e3fe26515c88b32f67399e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/profile_bluesky/startup/20-detectors.py", "repo_name": "prjemian/ipython_otz", "src_encoding": "UTF-8", "text": "print(__file__)\n\n\"\"\"various detectors and other signals\"\"\"\n\nfrom APS_BlueSky_tools.examples import SynPseudoVoigt\n\n\ndef use_EPICS_scaler_channels(scaler):\n \"\"\"\n configure scaler for only the channels with names assigned in EPICS \n \"\"\"\n read_attrs = []\n for ch in scaler.channels.component_names:\n _nam = epics.caget(\"{}.NM{}\".format(scaler.prefix, int(ch[4:])))\n if len(_nam.strip()) > 0:\n read_attrs.append(ch)\n scaler.channels.read_attrs = read_attrs\n\n\nnoisy = EpicsSignalRO('gov:userCalc1', name='noisy')\n#scaler = EpicsScaler('gov:scaler1', name='scaler')\nscaler = ScalerCH('gov:scaler1', name='scaler')\nuse_EPICS_scaler_channels(scaler)\n\n\nsynthetic_pseudovoigt = SynPseudoVoigt(\n 'synthetic_pseudovoigt', m1, 'm1', \n center=-1.5 + 0.5*np.random.uniform(), \n eta=0.3 + 0.5*np.random.uniform(), \n sigma=0.001 + 0.05*np.random.uniform(), \n scale=1e5)\n" }, { "alpha_fraction": 0.6800553798675537, "alphanum_fraction": 0.6952908635139465, "avg_line_length": 29.723403930664062, "blob_id": "210486a1fe546a717efb6e08de2968fbe2026adb", "content_id": "f695d6888526acbb9f28e88793cf1c3ec71b5d51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1444, "license_type": "no_license", "max_line_length": 75, "num_lines": 47, "path": "/profile_bluesky/startup/25-areadetector.py", "repo_name": "prjemian/ipython_otz", "src_encoding": "UTF-8", "text": "print(__file__)\n\nfrom ophyd import SingleTrigger, AreaDetector, SimDetector\nfrom ophyd import HDF5Plugin, ImagePlugin\nfrom ophyd.areadetector.trigger_mixins import SingleTrigger\nfrom ophyd.areadetector.filestore_mixins import FileStoreHDF5IterativeWrite\nfrom ophyd import Component, Device, EpicsSignalWithRBV\nfrom ophyd.areadetector import ADComponent\n\n\n# MUST, must, MUST have trailing \"/\"!!!\nimage_file_path = \"/tmp/simdet/%Y/%m/%d/\"\n\n\nclass MyHDF5Plugin(HDF5Plugin, FileStoreHDF5IterativeWrite):\n \"\"\"\n \"\"\"\n\n\nclass MySingleTriggerHdf5SimDetector(SingleTrigger, SimDetector): \n \n image = Component(ImagePlugin, suffix=\"image1:\")\n hdf1 = Component(\n MyHDF5Plugin,\n suffix='HDF1:', \n root='/', # for databroker\n write_path_template=image_file_path, # for EPICS AD\n )\n\ntry:\n _ad_prefix = \"otzSIM1:\"\n adsimdet = MySingleTriggerHdf5SimDetector(_ad_prefix, name='adsimdet')\n adsimdet.read_attrs.append(\"hdf1\")\n\nexcept TimeoutError:\n print(f\"Could not connect {_ad_prefix} sim detector\")\n\n\ndef count_AD(det, count_time=0.2, num=1, delay=None, *, md=None):\n det.cam.stage_sigs[\"acquire_time\"] = count_time\n yield from bp.count([det], num=num, delay=delay, md=md)\n\n\ndef ad_continuous_setup(det, acq_time=0.1, acq_period=0.005):\n det.cam.acquire_time.put(acq_time)\n det.cam.acquire_period.put(acq_period)\n det.cam.image_mode.put(\"Continuous\")\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7132353186607361, "avg_line_length": 24.5, "blob_id": "9ae7599ee6723b059970399d7963b0b43c8f8f8c", "content_id": "ce17accab2e75279380e57a938179ef3f84edba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/profile_bluesky/startup/20-signals.py", "repo_name": "prjemian/ipython_otz", "src_encoding": "UTF-8", "text": "print(__file__)\n\n\"\"\"other signals\"\"\"\n\naps_sr_current = EpicsSignalRO(\"S:SRcurrentAI\", name=\"aps_sr_current\")\n\n# always record this as secondary stream during scans\n# name: aps_sr_current_monitor\n# db[-1].table(\"aps_sr_current_monitor\")\nsd.monitors.append(aps_sr_current)\n\n\nscans = sscanDevice(\"gov:\", name=\"scans\")\ncalcs = userCalcsDevice(\"gov:\", name=\"calcs\")\ncalcs.enable.put(\"Enable\")\ncalc1 = calcs.calc1\n" }, { "alpha_fraction": 0.6015625, "alphanum_fraction": 0.671875, "avg_line_length": 17.285715103149414, "blob_id": "a0bd6a25925a3da587aa26bd3dd3efb750b703ae", "content_id": "849f74ffb7f1a53b76f4cf891113a2d1e941dff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "no_license", "max_line_length": 48, "num_lines": 7, "path": "/profile_bluesky/startup/21-tunable-motors.py", "repo_name": "prjemian/ipython_otz", "src_encoding": "UTF-8", "text": "print(__file__)\n\n\"\"\"tunable motors\"\"\"\n\nm1.tuner = TuneAxis([synthetic_pseudovoigt], m1)\nm1.tuner.width = 0.02\nm1.tuner.num = 21\n" } ]
6
dyuvaraaj/Learn_abc
https://github.com/dyuvaraaj/Learn_abc
f7bb10be86b547cf800d55bf818b534d3d86553c
a494894510a4c5c677afab5be79a6df0b76f9bff
8ba541542882e2462964f43f1d9cd5b846214bda
refs/heads/master
2020-04-08T21:44:45.923688
2018-11-30T02:33:02
2018-11-30T02:33:02
159,756,833
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.710241436958313, "alphanum_fraction": 0.731890082359314, "avg_line_length": 26.64285659790039, "blob_id": "90480538642b3e150b04a80cf7ac61edeaea07fd", "content_id": "612fe3179a14018af4f2dffd98a0342334ed073f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 88, "num_lines": 42, "path": "/MachineLearning/Exercise1_DataPreprocessing.py", "repo_name": "dyuvaraaj/Learn_abc", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 10 16:55:10 2018\r\n\r\n@author: v-yudura\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndataset = pd.read_csv(\"Data.csv\")\r\n\r\n#Split into independent and dependent variables.\r\nX = dataset.iloc[:,:-1].values\r\nY = dataset.iloc[:,3].values\r\n\r\n#Remove missing values using sklearn.imputer\r\nfrom sklearn.preprocessing import Imputer\r\nimputer = Imputer(missing_values=\"NaN\", strategy=\"mean\", axis=0)\r\nX[:,1:3] = imputer.fit_transform(X[:,1:3])\r\n\r\n#Encode values\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabelEncoder = LabelEncoder()\r\nX[:,0] = labelEncoder.fit_transform(X[:,0])\r\n\r\nfrom sklearn.preprocessing import OneHotEncoder\r\noneHotEncoder = OneHotEncoder(categorical_features=[0])\r\nX = oneHotEncoder.fit_transform(X).toarray()\r\n\r\nnp.set_printoptions(threshold=np.NaN)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size=0.2, random_state=0)\r\n\r\n\r\n#Feature scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nstandardScaler = StandardScaler()\r\nX_Train = standardScaler.fit_transform(X_Train)\r\nX_Test = standardScaler.transform(X_Test)" }, { "alpha_fraction": 0.6872682571411133, "alphanum_fraction": 0.7107540369033813, "avg_line_length": 20.52777862548828, "blob_id": "a9a1ae2a237f37edcb6833f0354bbb530373477d", "content_id": "d5e5d83f14ed03e2e14612263a0e6974645cf82e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 52, "num_lines": 36, "path": "/MachineLearning/Exercise3_PolynomialLinearRegression.py", "repo_name": "dyuvaraaj/Learn_abc", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 17 07:59:47 2018\r\n\r\n@author: v-yudura\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndataset = pd.read_csv(\"Position_Salaries.csv\")\r\n\r\nX = dataset.iloc[:,1:-1].values\r\nY = dataset.iloc[:,2].values\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nregression = LinearRegression()\r\nregression.fit(X,Y)\r\nY_Pred = regression.predict(X)\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(X,Y,color=\"red\")\r\nplt.plot(X,Y_Pred, color=\"blue\")\r\n\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\npolynomialRegression = PolynomialFeatures(degree=2)\r\nX_Poly = polynomialRegression.fit_transform(X)\r\npolynomialRegression.fit(X_Poly, Y)\r\n\r\nlinearReg2 = LinearRegression()\r\nlinearReg2.fit(X_Poly, Y)\r\nplt.scatter(X,Y, color=\"red\")\r\nplt.plot(X,Y,color=\"blue\")\r\n" }, { "alpha_fraction": 0.6837329864501953, "alphanum_fraction": 0.7116007804870605, "avg_line_length": 28.176469802856445, "blob_id": "d29bcd4b96b73060da85f29ec40039cda70b5d0d", "content_id": "caf5ae79c5dfb92820555bde51d76ece7bbf4a88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1543, "license_type": "no_license", "max_line_length": 87, "num_lines": 51, "path": "/MachineLearning/Exercise3_MultipleLinearRegression.py", "repo_name": "dyuvaraaj/Learn_abc", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 17 05:54:31 2018\r\n\r\n@author: v-yudura\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndataset = pd.read_csv(\"50_Startups.csv\")\r\nX = dataset.iloc[:,:-1].values\r\nY = dataset.iloc[:,4].values\r\n\r\n#Split into training and test dataset\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabelEncoder = LabelEncoder()\r\nX[:,3] = labelEncoder.fit_transform(X[:,3])\r\n\r\nfrom sklearn.preprocessing import OneHotEncoder\r\noneHotEncoder = OneHotEncoder(categorical_features=[3])\r\nX = oneHotEncoder.fit_transform(X).toarray()\r\nX = X[:,1:]\r\n\r\nimport statsmodels.formula.api as sm\r\nX= np.append(arr=np.ones((50,1)).astype(int) , values=X,axis=1)\r\nX_Opt = X[:,[0,1,2,3,4,5]]\r\nregressor_BackwardElimination = sm.OLS(endog=Y,exog=X_Opt).fit()\r\nregressor_BackwardElimination.summary()\r\n\r\nX_Opt = X[:,[0,1,3,4]]\r\nregressor_BackwardElimination = sm.OLS(endog=Y,exog=X_Opt).fit()\r\nregressor_BackwardElimination.summary()\r\n\r\nX_Opt = X[:,[0,3,4]]\r\nregressor_BackwardElimination = sm.OLS(endog=Y,exog=X_Opt).fit()\r\nregressor_BackwardElimination.summary()\r\n\r\n\r\nX_Opt = X[:,[0,3]]\r\nregressor_BackwardElimination = sm.OLS(endog=Y,exog=X_Opt).fit()\r\nregressor_BackwardElimination.summary()\r\n\r\n#split train and test dataset\r\nfrom sklearn.model_selection import train_test_split\r\nX_Train,X_Test,Y_Train,Y_Test = train_test_split(X_Opt ,Y,test_size=0.2,random_state=0)\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(X_Train, Y_Train)\r\nY_Pred = regressor.predict(X_Test)\r\n\r\n\r\n" }, { "alpha_fraction": 0.6642441749572754, "alphanum_fraction": 0.7093023061752319, "avg_line_length": 23.55555534362793, "blob_id": "30c4f0fd48f6e2a7578b004194ca3aa256674d3e", "content_id": "208355691046e44a6811998bec3e839bf2035fbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 688, "license_type": "no_license", "max_line_length": 49, "num_lines": 27, "path": "/MachineLearning/Exercise2_SimpleLinearRegression.py", "repo_name": "dyuvaraaj/Learn_abc", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 10 19:35:38 2018\r\n\r\n@author: v-yudura\r\n\"\"\"\r\n\r\n#Simple Linear Regression\r\nimport pandas as pd\r\ndataset = pd.read_csv(\"Salary_Data.csv\")\r\n\r\n#Seperate dependent and independent variables\r\nX_Train = dataset.iloc[0:30,:-1].values\r\nY_Train = dataset.iloc[0:30,1].values\r\n\r\n\r\nX_Test = dataset.iloc[30:33,:-1].values\r\nY_Test = dataset.iloc[30:33,1].values\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nlinearRegression = LinearRegression()\r\nlinearRegression.fit(X_Train, Y_Train)\r\nY_Pred = linearRegression.predict(X_Train)\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(X_Test, Y_Test,color=\"red\")\r\nplt.plot(X_Train,Y_Pred, color=\"blue\")" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 35, "blob_id": "255f777fee12fc05bf21abc678507f1f5ce64d34", "content_id": "e896953f9c8ba6c323216c0e80066239e2edf206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 59, "num_lines": 2, "path": "/README.md", "repo_name": "dyuvaraaj/Learn_abc", "src_encoding": "UTF-8", "text": "# Learn_abc\nThis project is to learn AI, Block Chain and Cyber security\n" } ]
5
ilyaDubovtsev/support-heroku
https://github.com/ilyaDubovtsev/support-heroku
5b4929b9c7f93506a5c0c1654aa4c24cbe7819b4
9873b4182b928a2b32acdbc8a031720f5145d77b
4306b09fe5b88855853bbccc19c23cb0e774d634
refs/heads/master
2021-09-06T05:20:27.466103
2018-02-02T17:53:47
2018-02-02T17:53:47
120,006,668
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8030303120613098, "alphanum_fraction": 0.8030303120613098, "avg_line_length": 32, "blob_id": "8178aba4d0fb2c0e3e880b25fcb60b7d25b7b0c1", "content_id": "6615ae4d31c320ea026db6723db66bdf2eb3c8a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 48, "num_lines": 2, "path": "/README.md", "repo_name": "ilyaDubovtsev/support-heroku", "src_encoding": "UTF-8", "text": "# support-heroku\nA version of the learning support app for Heroku\n" }, { "alpha_fraction": 0.6034846901893616, "alphanum_fraction": 0.6335797309875488, "avg_line_length": 50.18918991088867, "blob_id": "b5e5238659a953b3a9b7d6299e7557377e4d9cd7", "content_id": "ab5780fe7138790461eff5c0edfb87ef69a2e1da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2703, "license_type": "no_license", "max_line_length": 117, "num_lines": 37, "path": "/support_heroku/supportHeroku.py", "repo_name": "ilyaDubovtsev/support-heroku", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello_world():\n names = ['Азиз Ринатович', 'Павел Муравьев', 'Алена Васильева', 'Math Help', 'Елена Константинова',\n 'Алина Бесконечная', 'Вадим Соколов']\n skils = ['Математический анализ, высшая математика, линейная алгебра, аналитическая геометрия, дифференциальные '\n 'уравнения, химия (общая, неорганическая, органическая)',\n 'Математический анализ, линейная алгебра, аналитическая геометрия, дифференциальные уравнения, '\n 'теория вероятности, статистика',\n 'Математический анализ, линейная алгебра, аналитическая геометрия, дифференциальные уравнения',\n 'Физика',\n 'Химия, английский (переводы, грамматика)',\n 'ВСЕ РАБОТЫ по экономическим дисциплинам (анализ, учет, статистика, экономика). Задачи и ОНЛАЙН - '\n 'математика, ТВиМС, ЛинАлг и др',\n 'Математический анализ, дифференциальные уравнения, теория вероятностей и математическая статистика, '\n 'линейная алгебра, аналитическая геометрия, общая алгебра и теория чисел, теория функций комплексного '\n 'переменного, функциональный анализ, методы оптимальных решений, вычислительная математика. С немного '\n 'большими сложностями математическая физика (уравнения в частных производных).']\n urls = ['https://vk.com/id373565147',\n 'https://vk.com/id282638202',\n 'https://vk.com/id123175948',\n 'https://vk.com/id283263189',\n 'https://vk.com/lena_constanta',\n 'https://vk.com/id426631125',\n 'https://vk.com/id415677041']\n avs = ['images/' + str(i + 1) + '.jpg' for i in range(7)]\n params = {'names': names, 'skils': skils, 'urls': urls, 'length': 7, 'avs': avs}\n\n return render_template('index.html', params=params)\n\n\nif __name__ == '__main__':\n app.run()\n" } ]
2
Rahima-web/Hamiltonien
https://github.com/Rahima-web/Hamiltonien
9f5c0db099dcc16d07ff3198e4c09177ee4c692c
cbf1fe8f70dba036df63913ee5557f003586384c
621b9ae446b5dac59efdc0aa2577d618b3501619
refs/heads/main
2023-01-14T08:50:09.563103
2020-11-23T00:52:20
2020-11-23T00:52:20
308,698,643
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44354191422462463, "alphanum_fraction": 0.4800366163253784, "avg_line_length": 20.062650680541992, "blob_id": "e640db5633999df265215be55e2311ee1d3b7032", "content_id": "87527c36ec607501e013e6594f86dab2d257e249", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8745, "license_type": "no_license", "max_line_length": 160, "num_lines": 415, "path": "/TP3/TP3.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 13 18:01:20 2020\n\n@author: rahimamohamed\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom datetime import datetime \nimport numpy as np\nimport pandas as pd\nfrom pyti.smoothed_moving_average import smoothed_moving_average as sma\nfrom pyti.bollinger_bands import lower_bollinger_band as lbb\n\n\n\" VARIABLE DECLARATION\" \n\n\n\" QUESTION 1: Download of the file\" \n\ndata = pd.read_csv(\"https://raw.githubusercontent.com/Rahima-web/Hamiltonien/67773738288686dd932eb87932fd551b65b45f5d/TP3/bank_of_america.csv\",sep=\",\",header=0)\ndata['Date']=[datetime.strptime(x, '%m/%d/%Y') for x in data['Date'].str.slice(0,11)]\ndata = data.drop(['Open', 'High','Low','Volume','Adj Close'], axis=1)\ndata=data.to_numpy()\n\n\n#data['fast_sma'] = sma(data['Close'].tolist(), 10)\n#data['slow_sma'] = sma(data['Close'].tolist(), 30)\n\n\n\" QUESTION 2: Separate the dataset in two parts: training and test datasets.\" \n\nnb = int(len(data)*0.70)\nTrain_date = data[0:nb,0]\nTrain_close = data[0:nb,1]\n\nTest_date = data[nb:len(data),0]\nTest_close = data[nb:len(data),1]\n\nplt.plot(Train_date,Train_close,color=\"blue\")\nplt.plot(Test_date,Test_close,color=\"red\")\nplt.show()\n\n\n\"\"\"\nQUESTION 3\n\"\"\"\n\nn = len(Train_date)\nNb_init = 0\nNb_share = 30\nCash_init = 5000\n\ndef state(p,Nb,Cash):\n pas = 5\n Price = []\n for i in range(1,pas):\n Price.append(p[0:i:1])\n for i in range(pas,n+1):\n Price.append(p[i-pas:i:1])\n S = [Price,Nb,Cash]\n return S\n\n#S = state(close_train,Nb_init,Cash_init) \n#print(S) \n\ndef reward(S,a,t):\n R = 0\n Cash = S[2]\n Nb = S[1]\n S = S[0]\n if (a == 1) & (Nb >= 30):\n if (S[t][-1] >= (np.mean(S[t]) * 0.99)): #vendre\n R = S[t][-1] * Nb_share\n Cash+= R\n Nb -= Nb_share\n else:\n R = 0\n Nb = Nb\n Cash = Cash\n \n if (a == 2) : \n if (Cash >= S[t][-1] * Nb_share):\n if (S[t][-1] <= (np.mean(S[t]) * 1.01)): #achat\n R = -S[t][-1] * Nb_share\n Cash+= R\n Nb += Nb_share\n else:\n R = 0\n Nb = Nb\n Cash = Cash\n \n if (a == 0):\n R = 0\n Nb = Nb\n Cash = Cash\n \n if (Nb==0):\n R = -S[t][-1] * Nb_share\n Cash+= R\n Nb += Nb_share\n \n return R,Nb,Cash\n\n#R = reward(state(close_train,90,2000),2,15)\n#print(R)\n\ndef portfolio(ptf_prec,S,a,t):\n R,Nb,Cash = reward(S,a,t)\n ptf = ptf_prec\n if t == 0:\n ptf = Cash_init \n else:\n ptf += Cash\n return ptf\n\n\"\"\"\nQUESTION 4\n\"\"\"\n\nalpha = 0.4\ngamma = 0.7\n\ndef maxi(cash_prec,S,t):\n A = np.zeros((3,1))\n Nb = S[1]\n if Nb == 0:\n a=2\n A[a] = portfolio(cash_prec,S,a,t)\n else:\n for j in range(0,3):\n A[j] = portfolio(cash_prec,S,j,t)\n index = np.where(A == np.max(A))\n a = index[0][0] \n return A[a],a\n\n#def Q_learning(data):\n# Q = np.zeros((n-1,3))\n# S = state(data,Nb_init,Cash_init)\n# for t in range(0,n-1):\n# for a in range(0,3):\n# R,Nb,Cash = reward(S,a,t)\n# Qmax = maxi(Cash,S,t+1)\n# for itera in range(0,20):\n# Q[t,a] = (1 - alpha) * Q[t,a] + alpha * (R + gamma * Qmax[0]) \n# \n# S = state(data,Nb,Cash)\n# _,Nb,Cash = reward(S,Qmax[1],t)\n# return Q\n#\n#Q = Q_learning(close_train)\n#print(Q)\n\ndef Q_learning(data):\n Q = np.zeros((n-1,3))\n Nb_prec,Cash_prec = Nb_init,Cash_init\n Cashs = [Cash_prec]\n for t in range(1,n):\n S = state(data,Nb_prec,Cash_prec)\n for a in range(0,3):\n \n R,Nb,Cash = reward(S,a,t-1)\n Qmax = maxi(Cash,S,t)\n \n for itera in range(0,20):\n Q[t-1,a] = (1 - alpha) * Q[t-1,a] + alpha * (R + gamma * Qmax[0]) \n \n R,Nb_new,Cash_new = reward(state(data,Nb,Cash),Qmax[1],t)\n Nb_prec,Cash_prec = Nb_new,Cash_new\n Cashs.append(Cash_new)\n \n return Q,Cashs\n\nQ,Cash = Q_learning(Train_close)\n\nPI=np.zeros((len(Q),1))\nfor i in range(0,len(Q)):\n PI[i]=np.argmax(Q[i])\n\n \n\nplt.plot(Train_date [:-1],Q[:,0], label = 'A = 0')\nplt.plot(Train_date [:-1],Q[:,1], label = 'A = 1')\nplt.plot(Train_date [:-1],Q[:,2], label = 'A = 2')\nplt.legend()\nplt.show()\n\nplt.plot(Train_date ,Cash, label = 'Cash')\nplt.show()\n\nplt.plot(Train_date [:-1],PI, label = 'policy')\nplt.legend()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\" QUESTION 3: Portfolio function\" \n#\n#\n#def reward(init,A,S,i,Nb):\n# R = 0\n# ptf = np.zeros((nb,1))\n# ptf[0] = Cash\n# if i == 0: \n# ptf[0] = Cash\n# else:\n# if (A == -1) & (Nb >= 30): #vendre\n# R = S[i] * NB_share\n# Nb -= 30\n# \n# if (A == 1) & (init >= (S[i] * NB_share)) : #achète\n# R = -S[i] * NB_share \n# Nb += 30\n# \n# if A == 0:\n# R = 0 # on ne doit pas mettre une récompense négative ici (ex - 0,02) ?\n# ptf[i] = init + R\n# return ptf[i],Nb,R\n#\n#ptf = []\n#res,Nb,R = reward(0,1,Train_close,0,0)\n#ptf.append(res)\n#for i in range(1,nb):\n#\n# res,Nb,R = reward(res,np.random.choice([-1,0,1]),Train_close,i,Nb)\n# ptf.append(res)\n#\n##ptf = [ptfs(ptf[-1],close_train,1,i) for i in range(0,n)]\n#plt.plot(Train_date, ptf)\n#plt.show()\n#\n#alpha = 0.03\n#gamma = 0.8\n#\n#def maxi(init,S,i,Nb):\n# A = np.zeros((3,1))\n# for j in range(-1,2,1):\n# A[j] = reward(init,j,S,i,Nb)[0]\n# index = np.where(A == max(A))\n# a = index[0][0] - 1 \n# return a\n#\n#def Q_learning(S):\n# Q = np.zeros((nb,1))\n# res,Nb,_ = reward(0,1,S,0,0)\n# for t in range(1,nb):\n# \n# for itera in range(0,10):\n# \n# a = maxi(res,S,t-1,Nb)\n# at = np.random.choice([-1,0,1])\n# Q[t-1],_,R = reward(res,at,S,t-1,Nb)\n# Q[t-1] = (1 - alpha) * Q[t-1] + alpha * (R + gamma * reward(Q[t-1],a,S,t,Nb)[0])\n# \n# res,Nb,_ = reward(Q[t-1],a,S,t-1,Nb) \n# \n# return Q\n#\n#Q = Q_learning(Train_close)\n#\n#plt.plot(Train_date,Q)\n#plt.show()\n\n### Trading Strategy\n# '''If Price is 3% below Slow Moving Average, then Buy\n#\tPut selling order for 2% above buying price'''\n# nb = int(len(data)*0.70)\n# A = np.zeros((nb,1))\n# for i in range(1,nb):\n# if Data['slow_sma'][i] > Data['Close'][i] and (Data['slow_sma'][i] - Data['Close'][i]) > 0.03 * Data['Close'][i]:\n# A[i]=1 #achat\n# if Data['slow_sma'][i] < Data['Close'][i] and (Data['slow_sma'][i] - Data['Close'][i]) < 0.02 * Data['Close'][i]:\n# A[i]=-1 #vend\n# return A\n# \n\n#A=strategy(data)\n\n#NB_share = 30\n#Cash = 5000\n#nb = int(len(data)*0.70)\n\n#price_trans = 0.1*30*valeurclose\n#\n#def reward(A,S,i):\n# # ON VEUT MAXIMISER LE CASH DU PTF: si on vend on gagne du cash et si on achète on perd du cash\n# R = np.zeros((nb,1))\n# for j in range(0,nb):\n# if A[j] == -1:\n# R[j]= S[j] * NB_share - 0.1* S[j] * NB_share\n# if A[i] == 1:\n# R[j]= -S[j] * NB_share - 0.1* S[j] * NB_share\n# if A[j] == 0:\n# R[j]= 0\n# return R[i]\n#\n#\n#\n#R = [reward(A,Train_close,i) for i in range(0,nb)]\n#\n#\n#def ptfs(R):\n# ptf = np.zeros((nb,1))\n# ptf[0] = Cash\n# for j in range(0,nb-1):\n# ptf[j+1] = ptf[j] + R[j] \n# return ptf\n#\n# \n#ptf = ptfs(R)\n\n#alpha = 0.4\n#gamma = 0.9\n#\n#def Q_learnig(S,A):\n# Q = np.zeros((nb,1))\n# Q_prec = Q\n# for itera in range(10): \n# for i in range(0,nb):\n# \n# break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#n = len(Train_date)\n\n#def policy(S): \n# A = np.zeros((n,1))\n# for i in range(0,n):\n# if S[i] > np.mean(S):\n# A[i] = -1 #sell\n# if S[i] < np.mean(S):\n# A[i] = 1 #buy\n# if S[i] == np.mean(S):\n# A[i] = 0 #hold\n# return A\n#\n#\n#A = policy(Train_close)\n#NB_share = 30\n#Cash = 5000\n\n#def reward(A,S,i):\n# R = np.zeros((n,1))\n# for j in range(0,n):\n# if A[j] == -1:\n# R[j]= -S[j] * NB_share\n# if A[i] == 1:\n# R[j]= S[j] * NB_share \n# if A[j] == 0:\n# R[j]= 0\n# return R[i]\n#\n#\n#R = [reward(A,Train_close,i) for i in range(0,n)]\n#\n#def ptfs(R,i):\n# ptf = np.zeros((n,1))\n# ptf[0] = Cash\n# if i == 0: \n# ptf[0] = Cash\n# else:\n# for j in range(0,n-1):\n# ptf[j+1] = ptf[j] + R[j]\n# return ptf[i]\n#\n# \n#ptf = [ptfs(R,i) for i in range(0,n)]\n#\n#alpha = 0.4\n#gamma = 0.9\n#\n#def Q_learnig(S,A):\n# Q = np.zeros((n,1))\n# Q_prec = Q\n# for itera in range(10): \n# for i in range(0,n):\n# \n# break\n" }, { "alpha_fraction": 0.6140350699424744, "alphanum_fraction": 0.6564798951148987, "avg_line_length": 27.063491821289062, "blob_id": "91eb604dfc78f0b4bffdff46adcd759a378f4ec7", "content_id": "aaaa5c3e7b6cc159884b3afaaf0bc5e8ebcb5383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "no_license", "max_line_length": 160, "num_lines": 63, "path": "/TP3/Brouillon.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 16 17:22:53 2020\n\n@author: rahimamohamed\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom datetime import datetime \nimport numpy as np\nimport pandas as pd\nfrom pyti.smoothed_moving_average import smoothed_moving_average as sma\nfrom pyti.bollinger_bands import lower_bollinger_band as lbb\n\n\n\" VARIABLE DECLARATION\" \n\n\n\" QUESTION 1: Download of the file\" \n\ndata = pd.read_csv(\"https://raw.githubusercontent.com/Rahima-web/Hamiltonien/67773738288686dd932eb87932fd551b65b45f5d/TP3/bank_of_america.csv\",sep=\",\",header=0)\ndata['Date']=[datetime.strptime(x, '%m/%d/%Y') for x in data['Date'].str.slice(0,11)]\n#data = data.drop(['Open', 'High','Low','Volume','Adj Close'], axis=1)\n#data=data.to_numpy()\n\n\n\ndata['fast_sma'] = sma(data['Close'].tolist(), 10)\ndata['slow_sma'] = sma(data['Close'].tolist(), 30)\n\n\n\n\n\n\" QUESTION 2: Separate the dataset in two parts: training and test datasets.\" \n\nnb = int(len(data)*0.70)\nTrain_date = data['Date'][:nb]\nTrain_close = data['Close'][:nb]\n\nTest_date = data['Date'][nb:]\nTest_close = data['Close'][nb:]\n\nplt.plot(Train_date,Train_close,color=\"blue\")\nplt.plot(Test_date,Test_close,color=\"red\")\nplt.show()\n\n\" QUESTION 3: Portfolio function\" \n\n## Trading Strategy\n\ndef strategy(Data):\t\n '''If Price is 3% below Slow Moving Average, then Buy\n\tPut selling order for 2% above buying price'''\n nb = int(len(data)*0.70)\n A = np.zeros((nb,1))\n for i in range(1, len(Train_close)):\n if data['slow_sma'][i] > data['Close'][i] and (data['slow_sma'][i] - data['Close'][i]) > 0.03 * data['Close'][i]:\n A[i]=1\n if data['slow_sma'][i] > data['Close'][i] and (data['slow_sma'][i] - data['Close'][i]) < 0.02 * data['Close'][i]:\n\t\t\tA[i]=-1\n return A" }, { "alpha_fraction": 0.5684889554977417, "alphanum_fraction": 0.5847665667533875, "avg_line_length": 26.95689582824707, "blob_id": "b317960617d08c60c7af3cae65a63e1c4f222935", "content_id": "2bebd55863ee3bad52cfc0df98fce67fc794d9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3259, "license_type": "no_license", "max_line_length": 101, "num_lines": 116, "path": "/TP4/TP4.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 22 14:21:23 2020\n\n@author: rahimamohamed\n\nGROUPE : Rahima Mohamed, Mahisha Uruthirasigamani, Danush Chandrarajah \n\nPROBLEM : You look for a parking space on the street. Each space is free with probability p = 1 − q. \nYou cannot tell if a space is free until you reach it. You can not go backward. Once at a space, \nyou must decide to stop or to continue. \nFrom position s (i.e., s spaces from your destination), the cost of stopping is s. \nThe cost of passing your destination without parking is D. Construct the strategy\nthat will return the optimal parking space for a destination.\n\nNote: Fix yourself (at your will) the values that you need to solve the problem\n\n\"\"\"\n\nimport random as rd\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\"\"\" VARIABLE DECLARATION \"\"\"\n\n#number of parkinks\n\nN = 3\np_max = 0.5\np_min = 0.2\nsize_min=40\nsize_max=300\n#p=0.3\n\n\"\"\" QUESTION 1 : Generate several maps with different parking-space distributions. \"\"\"\n\ndef Parking_Creation():\n # The probability p that a qpace is free :\n p = rd.uniform(p_max,p_min)\n \n # Define the size of the parking :\n size = int(rd.uniform(size_min,size_max))\n \n # Create the parking : x=0 => space is not free && x=1 => space is free :\n availability = rd.choices([0,1] , [1-p,p] , k=size)\n \n return availability,p,size\n\n\ndef create_all_parkings(n):\n# print(\"veuillez entrer le nombre de parking souhaité\")\n# n=input()\n# n=int(n)\n Liste_of_Parking=[]\n for i in range(0,n):\n Liste_of_Parking.append(Parking_Creation())\n return Liste_of_Parking\n\n\n\"\"\" QUESTION 2 : Implement the parking strategy shown in class for the generated maps.\"\"\"\n\ndef Parking_Strategy(availability,p,size):\n D = size+1 #Cost of passing your destination without parking\n S,X,STOP,A = [],[],[],[] #S: space; X: availability, STOP : cost of stopping; A: action to do \n for index,x in enumerate(availability) :\n s = size-index #space number\n S += [s]\n X += [x]\n if x==0:\n STOP += [float('nan')] #the space is not free\n A += ['Continue']\n else :\n stop = (D*p+1) * ((1-p)**s)\n STOP += [stop] \n if stop >= 1:\n A += ['PARKING']\n return S,X,STOP,A\n else:\n A += ['Continue']\n \n\ndef All_parking_strategy():\n H=[]\n L=create_all_parkings(N)\n for i in range (0,len(L)):\n H.append(Parking_Strategy(L[i][0],L[i][1],L[i][2]))\n \n return H\n\n\n\"\"\" QUESTION 3 : Discuss your results. \"\"\"\n\n\nsns.set_style('darkgrid')\n\ndef traces(X,Y):\n plt.scatter(X,Y)\n plt.plot(X,[1]*len(X), 'b--')\n #plt.xlim(X[len(X)-40], 0) # decreasing S\n plt.title('STOP CONTROLLER EVOLUTION AS A FUNCTION OF PLACE S')\n plt.xlabel('S')\n plt.ylabel('STOP CONTROLER VALUE')\n plt.tight_layout()\n plt.show()\n \n###############################################################################\n\nL=create_all_parkings(N)\nfor i in range(0,len(L)):\n print (\" \\nPARKING NUMBER :\",(i+1),\"\\n\")\n L=create_all_parkings(N)\n H=All_parking_strategy()\n traces(H[i][0],H[i][2])\n \n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5382204651832581, "alphanum_fraction": 0.5688664317131042, "avg_line_length": 25.21917724609375, "blob_id": "65c111f7c6e5eebb645e36bb9fe84dab229ac456", "content_id": "fcc307f23000960d249591cd403b52af328598f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5743, "license_type": "no_license", "max_line_length": 160, "num_lines": 219, "path": "/TP3/Brouillon_2.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 18 19:42:30 2020\n\n@author: rahimamohamed\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom datetime import datetime \nimport numpy as np\nimport pandas as pd\n##from pyti.bollinger_bands import lower_bollinger_band as lbb\n\n\n\n\" QUESTION 1: Download of the file\" \n\nDATA = pd.read_csv(\"https://raw.githubusercontent.com/Rahima-web/Hamiltonien/67773738288686dd932eb87932fd551b65b45f5d/TP3/bank_of_america.csv\",sep=\",\",header=0)\nDATA['Date'] = [datetime.strptime(x, '%m/%d/%Y') for x in DATA['Date'].str.slice(0,11)]\nDATA = DATA.drop(['Open', 'High','Low','Volume','Adj Close'], axis=1)\nDATA = DATA.to_numpy()\n\n\n#data['fast_sma'] = sma(data['Close'].tolist(), 10)\n#data['slow_sma'] = sma(data['Close'].tolist(), 30)\n\n\n\" QUESTION 2: Separate the dataset in two parts: training and test datasets.\" \n\n#We are taking 70% of the DATA for the training and 30 % for the test\n\n\nNB_OF_TRAIN = int(len(DATA)*0.70) \nTrain_date = DATA[0:NB_OF_TRAIN,0]\nTrain_close = DATA[0:NB_OF_TRAIN,1]\n\nTest_date = DATA[NB_OF_TRAIN:len(DATA),0]\nTest_close = DATA[NB_OF_TRAIN:len(DATA),1]\n\nprint(\"\\n---------------QUESTION 2: SERATION OF THE DATASET-------------\\n\")\n\nplt.plot(Train_date,Train_close,label =\"Training\",color=\"blue\")\nplt.plot(Test_date,Test_close,label=\"Test\",color=\"red\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"Close Price\")\nplt.legend()\nplt.show()\n\n\n\n\" QUESTION 3 : state & reward function + portfolio function\"\n\n\" VARIABLE DECLARATION\" \n\nn_train = len(Train_date)\nInitial_nbshare = 0\nNB_of_Share = 30\nInitial_Cash = 5000\n\n### STATE FUNCTION ###\n\n\n\"\"\"\nHere we define a matrix S which describes each state: \neach state corresponds to the closing price for 5 days, \nthe number of shares owned, and the cash owned.\n\"\"\"\n\ndef State_function(data,Nb,Cash):\n pas = 5\n close_price = []\n for i in range(1,pas):\n close_price.append(data[0:i:1])\n for i in range(pas,n_train+1):\n close_price.append(data[i-pas:i:1])\n S = [close_price,Nb,Cash]\n return S\n\nS = State_function(Train_close,NB_of_Share,Initial_Cash) \n\n### REWARD FUNCTION ###\n\n\"\"\"\nFor the reward function: we calculate the 5-day simple moving average, then we set that: \n - if the close price is 3% below the 5-day simple Moving Average, then we buy \n - if the close price is 0.2% above the 5-day simple Moving Average, then we sell\n\"\"\"\n\ndef Reward_function(State,a,t):\n R = 0\n Cash = State[2]\n nb = State[1]\n S = State\n #SMA = np.mean(S[0][t])\n \n ## The case where we SELL\n if (a == 1) & (nb >= 30):\n if (S[0][t][-1] >= (np.mean(S[0][t])* 0.03)): \n R = S[0][t][-1] * NB_of_Share\n Cash+= R\n nb -= NB_of_Share\n \n #The case where where we have 0 share in our portfolio, so we can't sell \n if (nb==0): \n R = 0\n nb = nb\n Cash = Cash\n else:\n R = 0\n nb = nb\n Cash = Cash\n \n ## The case where we BUY, we verify that we have enough cash \n if (a == 2) : \n if (Cash >= S[0][t][-1] * NB_of_Share):\n if (S[0][t][-1] <= (np.mean(S[0][t]) * 1.02)): \n R = -S[0][t][-1] * NB_of_Share\n Cash+= R\n nb += NB_of_Share\n else:\n R = 0\n nb = nb\n Cash = Cash\n \n ## The case where we HOLD \n if (a == 0):\n R = 0\n nb = nb\n Cash = Cash\n \n return R,nb,Cash\n\nR = Reward_function(State_function(Train_close,0,5000),2,35)\n\n\n### PORTFOLIO FUNCTION ###\n\ndef Portfolio_function(old_ptf,S,a,t):\n R,Nb,Cash = Reward_function(S,a,t)\n ptf = old_ptf\n \n #the case where we at t=0, initale state\n if t == 0:\n ptf = Initial_Cash \n else:\n ptf += Cash\n return ptf\n\n\n\n\" QUESTION 4 : Implement the Q-Learning Algorithm \"\n\n\" VARIABLE DECLARATION\"\n\nalpha = 0.9\ngamma = 0.1\n\n### We first code how to find the A who maximise the portfolio\n\ndef Research_of_max(old_cash,S,t):\n Action = np.zeros((3,1))\n Nb = S[1]\n \n # If the number of owned shares is equal to zero, so we buy, then a=2 \n if Nb == 0:\n a=2\n Action[a] = Portfolio_function(old_cash,S,a,t)\n else:\n for k in range(0,3):\n Action[k] = Portfolio_function(old_cash,S,k,t)\n \n #We reseach the optimal action a\n index = np.where(Action == np.max(Action))\n a = index[0][0] \n return Action[a],a\n\ndef Q_learning_algorithm(data):\n Q = np.zeros((n_train-1,3))\n old_NB,old_Cash = NB_of_Share,Initial_Cash\n for t in range(1,n_train):\n S = State_function(data,old_NB,old_Cash)\n for a in range(0,3):\n \n R,Nb,Cash = Reward_function(S,a,t-1)\n Q_max = Research_of_max(Cash,S,t)\n \n for itera in range(0,50):\n Q[t-1,a] = (1 - alpha) * Q[t-1,a] + alpha * (R + gamma * Q_max[0]) \n \n R,new_NB,new_Cash = Reward_function(State_function(data,Nb,Cash),Q_max[1],t)\n old_NB,old_Cash = new_NB,new_Cash\n return Q\n\nQ = Q_learning_algorithm(Train_close)\n\n### POLICY PI: the sequence of optimal action ###\n\nPI=np.zeros((len(Q),1))\nfor i in range(0,len(Q)):\n PI[i]=np.argmax(Q[i])\n\nprint(\"\\n---------------QUESTION 4:-------------\\n\")\n\nplt.plot(Train_date [:-1],Q[:,0], label = 'A = 0')\nplt.plot(Train_date [:-1],Q[:,1], label = 'A = 1')\nplt.plot(Train_date [:-1],Q[:,2], label = 'A = 2')\nplt.xlabel(\"Time\")\nplt.ylabel(\"Cash Value\")\nplt.legend()\nplt.show()\n\n\nplt.plot(Train_date [:-1],PI, label = 'policy')\nplt.xlabel(\"Time\")\nplt.ylabel(\"Optimal action \")\nplt.title(\"Optimal sequence of action\")\nplt.legend()\nplt.show()\n\n" }, { "alpha_fraction": 0.5936273336410522, "alphanum_fraction": 0.6243361830711365, "avg_line_length": 18.561086654663086, "blob_id": "110b10be22981db2c12055b383f48297b634697e", "content_id": "827e61482d513549f7b4178acec4e5ea9d021701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4338, "license_type": "no_license", "max_line_length": 92, "num_lines": 221, "path": "/TP2/TP2.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 6 14:13:01 2020\n\n@author: rahimamohamed\n\"\"\"\n\n\"\"\"\nGROUPE : Rahima Mohamed, Mahisha Uruthirasigamani, Danush Chandrarajah \n\nPROBLEM :\n \nYou need to sell a car. At every time t = 0, · · · , T − 1, you set a price pt\n, and a customer then views the car. \nThe probability that the customer buys a car at price p is D(p). \nIf the car is not sold at time T, then it is sold for a fixed price WT , WT < 1.\nMaximize the reward from selling the car and find the recursion for the optimal \nreward, when D(p) = (1 − p)+.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\" VARIABLE DECLARATION \" \n\n# Maturity\nT = 500 \n\n# if the car is not sell until T\n\nW_T = 0.3\n\n# Cumulative Reward\n\nW=np.zeros((T,1))\n\n\n#Price matrix\nP = np.zeros((T,1))\n\n\ntime=[i for i in range(0,T)]\n\n\n\"\"\" QUESTION 1 : Find the optimal strategy and the wealth \ni.e., the optimal expected cumulative reward) over time using \nthe Bellman equation. \n\n\"\"\"\n\ndef Wealth():\n #W_T is fixed and W_0 is equal to zeros\n W[T-1]=W_T\n W[0]=0\n for i in range (T-1,0,-1):\n W[i-1]= ((1+W[i])/2.0)**2\n W[0]=0\n return W\n\n\nW=Wealth()\n\n\nprint( \"\\n------Question 1 : the optimal expected cumulative reward------\\n\")\nprint (W)\n\ndef Price(W):\n P[T-1]=W_T\n for i in range (T-1,0,-1):\n P[i-1]= (1+W[i])/2.0 \n return P\nP=Price(W)\n\n\n\"\"\" QUESTION 2 : Plot the resulting optimal strategy and the wealth over time. \"\"\"\n\nprint( \"\\n----------------Question 2---------------\\n\")\nplt.plot(time,W)\nplt.title('EVOLUTION OF WEALTH')\nplt.ylabel('W')\nplt.xlabel('Time')\nplt.show()\n\nplt.plot(time,P)\nplt.title('EVOLUTION OF PRICE')\nplt.ylabel('P')\nplt.xlabel('Time')\nplt.show()\n\n\"\"\" QUESTION 3 : Choose a couple of other strategies and see the corresponding revenues. \"\"\"\n\n\ndef Wealth1(P):\n W=P**2\n return W\n\nprint( \"\\n----------------Question 3: other strategies---------------\\n\")\n\n# 1st Strategy: P is constant\n \nprint( \"\\n 1ST STRATEGY : P=0.5 \\n\")\nP1 = np.array([0.5 for i in range(0,T)])\nW1=Wealth1(P1)\n\nplt.plot(time,W1)\nplt.title('EVOLUTION OF WEALTH')\nplt.ylabel('W')\nplt.xlabel('Time')\nplt.show()\n\nplt.plot(time,P1)\nplt.title('EVOLUTION OF PRICE')\nplt.ylabel('P')\nplt.xlabel('Time')\nplt.show()\n \n# 2nd Strategy: P is inscreasing\n\nprint( \"\\n 2ND STRATEGY : P IS INCREASING \\n\")\nP2=np.linspace(0,1,T)\n\nW2=Wealth1(P2)\nprint (np.where(W2==max(W2)))\n\nplt.plot(time,W2)\nplt.title('EVOLUTION OF WEALTH')\nplt.ylabel('W')\nplt.xlabel('Time')\nplt.show()\n\nplt.plot(time,P2)\nplt.title('EVOLUTION OF PRICE')\nplt.ylabel('P')\nplt.xlabel('Time')\nplt.show()\n\n\n# 3rd Strategy: P is decreasing\n\nprint( \"\\n 3RD STRATEGY : P IS DECREASING \\n\")\n\nP3=np.ones((T,1))\n\nfor i in range (0,T):\n P3[i]=P3[i-1]-1/(T-1)\n\nW3=Wealth1(P3)\n\nplt.plot(time,W3)\nplt.title('EVOLUTION OF WEALTH')\nplt.ylabel('W')\nplt.xlabel('Time')\nplt.show()\n\nplt.plot(time,P3)\nplt.title('EVOLUTION OF PRICE')\nplt.ylabel('P')\nplt.xlabel('Time')\nplt.show()\n\n# 4th strategy : P is Random\n\nprint( \"\\n 4TH STRATEGY : P IS RANDOM\\n\")\nP4=np.zeros((T,1))\nfor i in range (0,T):\n P4[i]=random.uniform(0,1)\n\nW4=Wealth1(P4)\nplt.figure(figsize=(14,7))\n\nplt.plot(time,W4)\nplt.title('EVOLUTION OF WEALTH')\nplt.ylabel('W')\nplt.xlabel('Time')\nplt.show()\n\nplt.figure(figsize=(14,7))\nplt.plot(time,P4)\nplt.title('EVOLUTION OF PRICE')\nplt.ylabel('P')\nplt.xlabel('Time')\nplt.show()\n\n\nprint(\"\\n\")\nprint (\"-------------------COMPARISON--------------------\")\nprint(\"\\n\")\n\nprint (\" WITH BELLMAN \")\nprint(\"\\nThe max of optimal cumulative reward is:\" , max(W))\nprint(\"The max is reached at this index:\",np.where(W==max(W)))\n\nprint(\"\\n\")\n\nprint (\" WITH P=0.25 \")\nprint(\"\\nThe max of optimal cumulative reward is:\",max(W1))\nprint(\"The max is reached at this index:\",0)\n\nprint(\"\\n\")\n\nprint (\" WITH P INCREASING \")\nprint(\"\\nThe max of optimal cumulative reward is:\", max(W2))\nprint(\"The max is reached at this index:\",np.where(W2==max(W2)))\n\nprint(\"\\n\")\n\nprint (\" WITH P DECREASING \")\nprint(\"\\nThe max of optimal cumulative reward is:\",max(W3))\nprint(\"The max is reached at this index:\",np.where(W3==max(W3)))\n\nprint(\"\\n\")\n\nprint (\" WITH P RANDOM \")\nprint(\"\\nThe max of optimal cumulative reward is:\",max(W4))\nprint(\"The max is reached at this index:\",np.where(W4==max(W4)))\n\n\na=np.where(W4==max(W4))\nb=a[0][0]-1\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5308414101600647, "alphanum_fraction": 0.5645285248756409, "avg_line_length": 18.569276809692383, "blob_id": "b56bd93b63579d5f7e1bf224b43f5966b5d4cb97", "content_id": "ee00d12cbf7c03b72a2a6bb1407755f613b6685a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6507, "license_type": "no_license", "max_line_length": 79, "num_lines": 332, "path": "/TP1/TP1.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#testedanushpourrahima\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 30 18:33:37 2020\n\n@author: rahimamohamed\n\"\"\"\n\n\"\"\"\nGROUPE : Rahima Mohamed, Mahisha Uruthirasigamani, Danush Chandrarajah \n\nPROBLEM :\n \nAn investor has a fund. It has 1 million euros at time zero. \nIt pays 5% interest per year for T=50 years. \nThe investor cannot withdraw the invested money. \nBut, (s)he consumes a proportion (at) of the interest at time t and \nreinvests the rest. \nWhat should the investor do to maximize the consumption before T?\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\" VARIABLE DECLARATION \" \n\n# Interest rate\nr = 0.05 \n\n# Maturity\nT = 50 \n\n# fund at t=0\nX_0 = 1000000 \n\n# Invested Capital\nX_invest = [] \n\n# Consumed Capital\nX_consum = [] \n\n# Cumulative Reward/Total consumption\n\nW=0\n\n# Sequence of actions \nA=np.zeros((T,1))\n\n\n\"\"\" QUESTION 1 : Implement the Bang Bang Controller \"\"\"\n\n##----------------Bang Bang Controller--------------##\n\ndef BangBang_Controller():\n rho=1 # value of rho at (T-1)=1\n RHO=[rho]\n A[0]=1 # Value of a at (T-1) =1\n for i in range (1,T):\n if r>= 1/rho:\n rho=(1+r) * rho\n RHO.append(rho)\n A[i]=0\n if r< 1/rho:\n rho = 1 + rho\n RHO.append(rho)\n A[i]=1\n RHO.reverse() # on reorganise donc la liste des rho\n A[:] = A[::-1] # on reorganise aussi A \n return RHO,A\n\nprint(\"\\n\")\nprint (\"--------------- THE SEQUENCE OF ACTIONS a --------------------\")\n\nB=BangBang_Controller()\nprint(B[1])\n\nprint(\"\\n\")\n\n##----------------The invested capital--------------##\n\ndef plant_equation(A):\n X_invest= [X_0]\n for i in range (1,T):\n X_invest.append( X_invest[i-1] + r*X_invest[i-1]*(1-A[i-1]))\n return X_invest\n\n\n\nX = plant_equation(B[1]) #B[1] : The sequence of action a\n\nprint (\"--------------- EVOLUTION OF THE INVESTED CAPITAL--------------------\")\nprint(\"\\n\")\n\nprint(X)\nprint(\"\\n\") \n\ntime=[i for i in range(0,T)]\n\nplt.plot(time,X,color = 'red')\nplt.title('INVESTED CAPITAL EVOLUTION')\nplt.ylabel('invested capital')\nplt.xlabel('Time')\nplt.show()\n\n\n \n\"\"\" QUESTION 2 : Compute the corresponding total consumption and \nfind the sequence of optimal actions.\n\"\"\"\n\ndef consumption(A,X):\n X_consum=[]\n S=0\n for i in range (1,T+1):\n X_consum.append(r*X[i-1]*A[i-1])\n S+=X_consum[i-1]\n return X_consum,S\n\nprint (\"--------------- CONSUMPTION ARRAY--------------------\")\nprint(\"\\n\")\n\nC=consumption(B[1],X) #B[1] : The sequence of action a\nprint(C[0])\nprint(\"\\n\")\n\n\nprint (\"--------------- TOTAL CONSUMPTION --------------------\")\nprint(\"\\n\")\n\nprint(C[1])\nprint(\"\\n\")\n\n\n \n\"\"\" QUESTION 3 : Plot the consumption as a function of time\n\"\"\"\n\nprint (\"--------------- EVOLUTION OF CONSUMPTION --------------------\")\nprint(\"\\n\")\n\nplt.plot(time,C[0],color= 'green')\nplt.title('CONSUMPTION EVOLUTION')\nplt.ylabel('consumption')\nplt.xlabel('Time')\nplt.show()\n\n\n\"\"\" QUESTION 4 : Plot the action sequence as a function of time.\n\"\"\"\n\nprint(\"\\n\")\nprint (\"--------------- EVOLUTION OF ACTIONS --------------------\")\nprint(\"\\n\")\n\nplt.plot(time,B[1],color = 'purple')\nplt.title('PROPORTION (a) EVOLUTION')\nplt.ylabel('proportion a')\nplt.xlabel('Time')\nplt.show()\n\ngama = np.zeros((T,1))\nrho= BangBang_Controller()[0]\n\ndef gamma():\n \n for i in range(1,T):\n gama[i-1] = (1+r) * rho[i] + (1+r*rho[i])*A[i-1]\n \n return gama\n\nY1 = []\nY2 = []\nfor i in range (0,T):\n Y1.append (1+rho[i]) #a=1\n \nfor i in range (0,T):\n Y2.append ((1 + r)*rho[i]) #a=0\n\nprint(\"\\n\") \nprint (\"--------------- CONSUME AND SAVE --------------------\")\nprint(\"\\n\")\n\nG = gamma()\nplt.plot(rho,Y1,label = 'Y1',color = 'blue')\nplt.plot(rho,Y2, label = 'Y2', color ='magenta')\nplt.plot(rho,G, label = 'Gamma', color= 'orange')\nplt.legend()\nplt.show()\n\n\n\n\"\"\" QUESTION 5 : Choose a couple of other strategies (controllers) \nto compare their respective total consumption to that obtained using \nthe bang-bang approach.\n\"\"\"\n\nprint(\"\\n\")\nprint (\"--------------- OTHER STRATEGIES --------------------\")\nprint(\"\\n\")\n\n##----------First stratégie: if A is always eaqual to 1----------##\n\n\nprint (\"The total consumption with bang bang controller is:\")\nC=consumption(B[1],X)\nprint(C[1])\nprint(\"\\n\")\n\n\nprint (\" IF A IS ALWAYS EQUAL TO 1 : \")\n\nA1=np.ones((T,1))\nX1 = plant_equation(A1)\n\nprint (\"The total consumption is:\")\nC1=consumption(A1,X1)\nprint(C1[1])\n\nprint(\"\\n\")\n\n\n##----------Seconde Stratégie: if A is increasing through the time----------##\n\nprint (\" IF A IS INCREASING : \")\n\nA2= np.arange(0,1,1/(T-1))\nX2 = plant_equation(A2)\n\nprint (\"The total consumption is:\")\nC2=consumption(A2,X2)\n\nprint(C2[1])\n\nprint(\"\\n\")\n\n\n##----------3rd Stratégie: if A is decreasing through the time----------##\n\nprint (\" IF A IS DECREASING : \")\n\nA3= np.zeros((T,1))\nA3[0]=1\nA3[T-1]=0\nfor i in range (1,T-1):\n A3[i]=A3[i-1]-1/(T-1)\n\nX3 = plant_equation(A3)\n\nprint (\"The total consumption is:\")\nC3=consumption(A3,X3)\n\nprint(C3[1])\n\nprint(\"\\n\")\n\n\n\n##----------4th Stratégie: if A is random----------##\n\nprint (\" IF A IS RANDOM : \")\n\nA4=np.zeros((T,1))\nfor i in range (0,T):\n A4[i]=random.uniform(0,1)\n \nX4 = plant_equation(A4)\n\nprint (\"The total consumption is:\")\nC4=consumption(A4,X4)\n\nprint(C4[1])\n\nprint(\"\\n\") \n\n\n##----------5th Stratégie: if A is increasing then deacreasing----------##\n\nprint (\" IF A IS INCREASING THEN DECREASING : \")\n\nA5= np.arange(0,1,1/49)\nA5[0]=0\n\nfor i in range (25,T):\n A5[i]=abs(A5[i-1]-1/(T-1))\n \nA5[T-1]=0\n\nX5 = plant_equation(A5)\n\nprint (\"The total consumption is:\")\nC5=consumption(A5,X5)\n\nprint(C5[1])\nprint(\"\\n\") \n\n\n\n##----------6th Stratégie: if A is equal to 0 and 1 at t=T-1----------##\n\nprint (\" IF A IS EQUAL TO 0 AND 1 AT t=T-1: \")\n\nA6=np.zeros((T,1))\nA6[T-1]=1\n\nX6 = plant_equation(A6)\n\nprint (\"The total consumption is:\")\nC6=consumption(A6,X6)\n\nprint(C6[1])\nprint(\"\\n\") \n\nprint (\" GRAPH OF A EVOLUTION: \")\n\nplt.figure(figsize=(7,7))\n\n\nplt.plot(time,A2, label = 'A increasing', color ='magenta')\nplt.plot(time,A3, label = 'A decreasing', color= 'orange')\nplt.plot(time,A6, label = 'A=0 and 1 at t=T-1', color= 'purple')\nplt.legend()\nplt.show()\nplt.figure(figsize=(7,7))\nplt.plot(time,A1,label = 'A=1',color = 'blue')\nplt.plot(time,A4,label = 'A random',color = 'red')\nplt.plot(time,A5, label = 'A increase and decrease', color ='green')\n\nplt.legend()\nplt.show()\n\n\n\n\n" }, { "alpha_fraction": 0.4600438177585602, "alphanum_fraction": 0.4883856773376465, "avg_line_length": 22.537931442260742, "blob_id": "c03280cb65657d5c7561dd1fe0beb797c791dce8", "content_id": "71fc8ea923c1e9511bb8ca6e4c0f4859f580a26f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6849, "license_type": "no_license", "max_line_length": 101, "num_lines": 290, "path": "/TP4/brouillon1.py", "repo_name": "Rahima-web/Hamiltonien", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 22 14:44:44 2020\n\n@author: rahimamohamed\n\"\"\"\n\nimport random as rd\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\"\"\" VARIABLE DECLARATION \"\"\"\n\n#number of parkinks\n\nN = 3\n#p_max = 0.5\n#p_min = 0.2\n#size_min=40\n#size_max=300\np=0.3\n#size = 400\n\n\"\"\" QUESTION 1 : Generate several maps with different parking-space distributions. \"\"\"\n\ndef Parking_Creation(size,p):\n # The probability p that a qpace is free :\n #p = rd.uniform(p_max,p_min)\n \n # Define the size of the parking :\n #size = int(rd.uniform(size_min,size_max))\n \n # Create the parking : x=0 => space is not free && x=1 => space is free :\n availability = rd.choices([0,1] , [1-p,p] , k=size)\n \n return availability,p,size\n\n\ndef create_all_parkings(n,p):\n# print(\"veuillez entrer le nombre de parking souhaité\")\n# n=input()\n# n=int(n)\n Liste_of_Parking=[]\n size=50\n #p=0.1\n for i in range(0,n):\n Liste_of_Parking.append(Parking_Creation(size,p))\n size+=100\n #p+=0.4 \n return Liste_of_Parking\n\n#L=create_all_parkings(3,0.1)\n\n \n \n\n\n\"\"\" QUESTION 2 : Implement the parking strategy shown in class for the generated maps.\"\"\"\n\ndef Parking_Strategy(availability,p,size):\n D = size+50 #Cost of passing your destination without parking\n S,X,STOP,A = [],[],[],[] #S: space; X: availability, STOP : cost of stopping; A: action to do \n for index,x in enumerate(availability) :\n s = size-index #space number\n S += [s]\n X += [x]\n if x==0:\n STOP += [float('nan')] #the space is not free\n A += ['Continue']\n else :\n stop = (D*p+1) * ((1-p)**s)\n STOP += [stop] \n if stop >= 1:\n A += ['PARKING']\n return S,X,STOP,A\n else:\n A += ['Continue']\n \n\ndef All_parking_strategy(n,p):\n H=[]\n L=create_all_parkings(n,p)\n for i in range (0,len(L)):\n H.append(Parking_Strategy(L[i][0],L[i][1],L[i][2]))\n \n return H\n\nH=All_parking_strategy(N,p)\nfor i in range (0,len(H[0][3])):\n if H[0][3][i]=='PARKING':\n print(\"the index is:\",i)\n\n\"\"\" QUESTION 3 : Discuss your results. \"\"\"\n\n\nsns.set_style('darkgrid')\n\ndef traces(X,Y):\n plt.scatter(X,Y)\n plt.plot(X,[1]*len(X), 'b--')\n #plt.xlim(X[len(X)-40], 0) # decreasing S\n plt.title('STOP CONTROLLER EVOLUTION AS A FUNCTION OF PLACE S')\n plt.xlabel('S')\n plt.ylabel('STOP CONTROLER VALUE')\n plt.tight_layout()\n plt.show()\n \nprint(\"\\n--------------------COMPARISON: FOR DIFFERENT P AND SIZE------------------\\n\")\n\n\"\"\" pour p =0.1 \"\"\"\n\nprint(\" -----------IF P=0.1----------- \\n\")\np=0.1\nL=create_all_parkings(N,p)\nH=All_parking_strategy(N,p)\nfor i in range(0,len(L)):\n print (\" \\nPARKING NUMBER :\",(i+1),\" , WITH SIZE:\",L[i][2],\"\\n\")\n# L=create_all_parkings(N,p)\n# H=All_parking_strategy(N,p)\n traces(H[i][0],H[i][2])\n for j in range (0,len(H[i][3])):\n if H[i][3][j]=='PARKING':\n print(\"\\n The parking place is:\",j+1,\"\\n\")\n \n \n\n\n\"\"\" pour p =0.5 \"\"\"\nprint(\" \\n -----------IF P=0.5----------- \\n\")\np=0.5\nL=create_all_parkings(N,p)\nH=All_parking_strategy(N,p)\nfor i in range(0,len(L)):\n print (\" \\nPARKING NUMBER :\",(i+1),\" , WITH SIZE:\",L[i][2],\"\\n\")\n# L=create_all_parkings(N,p)\n# H=All_parking_strategy(N,p)\n traces(H[i][0],H[i][2])\n for j in range (0,len(H[i][3])):\n if H[i][3][j]=='PARKING':\n print(\"\\n The parking place is:\",j+1,\"\\n\")\n\n\"\"\" pour p =0.9 \"\"\"\nprint(\" \\n -----------IF P=0.9----------- \\n\")\np=0.9\nL=create_all_parkings(N,p)\nH=All_parking_strategy(N,p)\nfor i in range(0,len(L)):\n print (\" \\nPARKING NUMBER :\",(i+1),\" , WITH SIZE:\",L[i][2],\"\\n\")\n# L=create_all_parkings(N,p)\n# H=All_parking_strategy(N,p)\n traces(H[i][0],H[i][2])\n for j in range (0,len(H[i][3])):\n if H[i][3][j]=='PARKING':\n print(\"\\n The parking place is:\",j+1)\n \n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#n=3\n#\n#def create_parking():\n# # Define probability p of a place is free :\n# p = rd.uniform(0.5,0.3)\n# \n# # Define the size of the parking :\n# size = int(rd.uniform(40,300))\n# \n# # Create the parking : x=0 => occupied / x=1 => free :\n# parking = rd.choices([0,1] , [1-p,p] , k=size)\n# \n# return parking,p,size\n#\n#\n#def nb_park(n):\n## print(\"veuillez entrer le nombre de parking souhaité\")\n## n=input()\n## n=int(n)\n# Liste_Parking=[]\n# for i in range(0,n):\n# Liste_Parking.append(create_parking())\n# return Liste_Parking\n# \n##\n##P1= create_parking()\n##P2=create_parking()\n##P3=create_parking()\n#\n#L=nb_park(n)\n#\n##Free =[]\n##Not_Free =[]\n##for i in range (0, len(L[0][0])):\n## if L[0][0][i]==1:\n## Free.append(L[0][0][i])\n## if L[0][0][i]==0:\n## Not_Free.append(L[0][0][i])\n##\n##X= [i for i in range (0,L[0][2])]\n##plt.scatter(X,L[0][0])\n##plt.plot(X[L[0][2]-40],L[0][0],colour = 'blue')\n##plt.xlim(X[len(X)-40], 0)\n##plt.tight_layout()\n##plt.show()\n#\n#\n#\n#def se_garer(parking,p,size):\n# D = size+1\n# S,X,STOP,A = [],[],[],[] \n# for index,x in enumerate(parking) :\n# s = size-index \n# S += [s]\n# X += [x]\n# if x==0:\n# STOP += [float('nan')]\n# A += ['Continue']\n# else :\n# stop = (D*p+1) * ((1-p)**s)\n# STOP += [stop] \n# if stop >= 1:\n# A += ['Se gare']\n# return S,X,STOP,A\n# else:\n# A += ['Continue']\n# \n#\n#def h():\n# H=[]\n# for i in range (0,len(L)):\n# H.append(se_garer(L[i][0],L[i][1],L[i][2]))\n# \n# return H\n#H=h()\n# \n#\n##se_garer(P1[0],P1[1],P1[2])\n##se_garer(P2[0],P2[1],P2[2])\n##se_garer(P3[0],P3[1],P3[2])\n#\n#\n#sns.set_style('darkgrid')\n#\n#def traces(X,Y):\n# plt.scatter(X,Y)\n# plt.plot(X,[1]*len(X), 'b--')\n# plt.xlim(X[len(X)-40], 0) # decreasing S\n# plt.title('Évolution du stop-controleur en fonction de S')\n# plt.xlabel('S')\n# plt.ylabel('Valeur du stop-controleur')\n# plt.tight_layout()\n# plt.show()\n# \n################################################################################\n#\n#for i in range(0,len(L)):\n# L=nb_park(n)\n# H=h()\n# traces(H[i][0],H[i][2])\n# \n# \n# \n# \n##if __name__ == '__main__':\n## \n## park,p,s = create_parking()\n## S,X,Stop,Action = se_garer(park,p,s)\n## \n## df = pd.DataFrame({'S':S,'État (x)':X,'Stop Controleur':Stop,'Action':Action})\n## traces(S,Stop)\n# \n# \n# \n \n \n \n" } ]
7
piotr-cieslik/EpicReaderProcessor
https://github.com/piotr-cieslik/EpicReaderProcessor
320970d06c0171f0a4e8d8432079bbd949854c1d
8c2b470515b40b2809513638d7b4f4135c52f957
7137e00632b976d1dced33f60716027a95256a58
refs/heads/master
2022-03-14T02:30:45.497005
2019-11-06T03:58:18
2019-11-06T03:58:18
219,908,272
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7120419144630432, "alphanum_fraction": 0.713945746421814, "avg_line_length": 32.36507797241211, "blob_id": "25c595b288f8f31cfec7a018495a137a4b033bdf", "content_id": "2c8646b8413ec6ced9ea055a9a185ea21e145794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2101, "license_type": "no_license", "max_line_length": 85, "num_lines": 63, "path": "/epic-reader-processor.py", "repo_name": "piotr-cieslik/EpicReaderProcessor", "src_encoding": "UTF-8", "text": "import textract\nimport sys\nimport os\nimport time\nimport json\n\n# number of seconds between checks\npause = 15\n\n# define paths to directories\ndirectory_root = \"/tmp/epic-reader/\"\ndirectory_queued = os.path.join(directory_root, \"queued\")\ndirectory_processing = os.path.join(directory_root, \"processing\")\ndirectory_processed = os.path.join(directory_root, \"processed\")\ndirectory_result = os.path.join(directory_root, \"result\")\n\n# Doesn't work for python2.\n# Using python3 causes problem with encoding of textract result.\n# make sure that directories exist\n# os.makedirs(directory_queued, exist_ok=True)\n# os.makedirs(directory_processing, exist_ok=True)\n# os.makedirs(directory_processed, exist_ok=True)\n# os.makedirs(directory_result, exist_ok=True)\n\ndef process_files():\n print(\"start processing files...\")\n for file in os.listdir(directory_queued):\n processing_start_time = int(time.time())\n file_name = os.path.basename(file)\n file_path_queued = os.path.join(directory_queued, file_name)\n file_path_processing = os.path.join(directory_processing, file_name)\n file_path_processed = os.path.join(directory_processed, file_name)\n file_path_result = os.path.join(directory_result, file_name + \".json\")\n \n # Ensure that no one else use this file.\n # Move is atomic operation in linux.\n os.rename(file_path_queued, file_path_processing)\n text = textract.process(file_path_processing, method=\"tesseract\", language=\"pol\")\n processing_end_time = int(time.time())\n\n # Create result\n result = {\n \"status\": \"success\",\n \"text\": text,\n \"processingStartTime\": processing_start_time,\n \"processingEndTime\": processing_end_time,\n }\n\n # Serialzie result to json\n json_result = json.dumps(result)\n \n # Write result to file\n file_result = open(file_path_result, \"w\")\n file_result.writelines(json_result)\n file_result.close()\n \n # Mark file as processed by moving it to processed directory\n os.rename(file_path_processing, file_path_processed)\n print(\"end processing files.\")\n\nwhile True:\n process_files()\n time.sleep(pause)" } ]
1
poojakhatri/DjangoQuiz
https://github.com/poojakhatri/DjangoQuiz
8a50e040da417d11665cc82244c64eccddb3a13a
0e27c2fd3a46cbadf340060e84474c4f3f7d95d1
400c7a81d6021882071026767363ba7bf9f91782
refs/heads/master
2020-08-22T09:44:29.602689
2019-10-21T01:44:41
2019-10-21T01:44:41
216,368,510
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7083029747009277, "alphanum_fraction": 0.7097596526145935, "avg_line_length": 29.5222225189209, "blob_id": "980fd497ed36b779ebcca8126ceb142cb3785b60", "content_id": "2b022e785bfffa35c5a3be294102aa5ee488836f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2746, "license_type": "no_license", "max_line_length": 77, "num_lines": 90, "path": "/djangoquiz/quiz/views.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom .models import *\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom .forms import SignUpForm\n\n# from django.core.urlresolvers import reverse\n# Create your views here.\n@login_required(login_url='/register/')\ndef home(request):\n\n\treturn render(request, 'quiz/home.html')\n\n\ndef about(request):\n\t\n\treturn render(request, 'quiz/about.html', {'title': 'About'})\n\n\ndef about1(request):\n\tobj = Stat.objects.all()\n\treturn render(request, 'quiz/about1.html',{'obj':obj})\n\n\ndef register(request):\n\n\tif request.method == \"POST\":\n\t\tform = SignUpForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tusername=form.cleaned_data.get('username')\n\t\t\tmessages.success(request, f'Account created for {username}!')\n\t\t\tform.save()\n\t\t\treturn redirect('quiz-register')\n\telse:\n\t\tform = SignUpForm()\n\treturn render(request, 'quiz/register.html', {'form': form})\n\n@login_required(login_url='/register/')\ndef math_view(request):\n\tif request.method == 'GET':\n\t\tmath_quiz_questions = Math.objects.all().order_by('?')\n\t\treturn render(request, 'quiz/math.html', {\"quizz\": math_quiz_questions})\n\n\tif request.method == 'POST':\n\t\tform_data = request.POST.dict()\n\t\tuser = request.user\n\t\tquestion_keys = filter(lambda ques: ques.isdigit(), list(form_data.keys()))\n\t\tfor question_no in question_keys:\n\t\t\t\n\t\t\tif \"ch_\" in form_data.get(question_no):\n\n\t\t\t\tmath_obj = Math.objects.get(si_no=question_no)\n\t\t\t\tmath_quiz_ans_obj = MathQuizAnswer()\n\t\t\t\tmath_quiz_ans_obj.user = user\n\t\t\t\tmath_quiz_ans_obj.mat_quiz = math_obj\n\t\t\t\tmath_quiz_ans_obj.answer = form_data.get(question_no)\n\t\t\t\tmath_quiz_ans_obj.save()\n\n\t\treturn redirect('quiz-math_view')\n\n\n@login_required(login_url='/register/')\ndef stat_view(request):\n\tif request.method == 'GET':\n\t\tstat_quiz_questions = Stat.objects.exclude(\n\t\t\t_id__in=list(StatQuizAnswer.objects.values_list('stat_quiz', flat=True))\n\t\t)[:10]\n\t\treturn render(request, 'quiz/stat.html', {\"quizz\": stat_quiz_questions})\n\n\tif request.method == 'POST':\n\t\tform_data = request.POST.dict()\n\t\tuser = request.user\n\t\tquestion_keys = filter(lambda ques: ques.isdigit(), list(form_data.keys()))\n\t\tfor question_no in question_keys:\n\t\t\t\n\t\t\tif \"ch_\" in form_data.get(question_no):\n\n\t\t\t\tstat_obj = Stat.objects.get(si_no=question_no)\n\t\t\t\tstat_quiz_ans_obj = StatQuizAnswer()\n\t\t\t\tstat_quiz_ans_obj.user = user\n\t\t\t\tstat_quiz_ans_obj.stat_quiz = stat_obj\n\t\t\t\tstat_quiz_ans_obj.answer = form_data.get(question_no)\n\t\t\t\tstat_quiz_ans_obj.save()\n\n\t\treturn redirect('quiz-stat_view')" }, { "alpha_fraction": 0.5406249761581421, "alphanum_fraction": 0.589062511920929, "avg_line_length": 26.826086044311523, "blob_id": "abfc3734c41d6adcf1cc78583cfb65e8e6f9af69", "content_id": "2bd4113ef882b885638f89103f2dea80dc1e6857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 108, "num_lines": 23, "path": "/djangoquiz/quiz/migrations/0005_auto_20191019_1535.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-19 15:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quiz', '0004_auto_20191019_1523'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='math',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='stat',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n ]\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5437219738960266, "avg_line_length": 26.030303955078125, "blob_id": "96dd799b0b887c845c68c79659db1515fa050f4d", "content_id": "c1fde4b8deb5ddc118af76fa4925fc0b4abb35da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "no_license", "max_line_length": 97, "num_lines": 33, "path": "/djangoquiz/quiz/migrations/0007_auto_20191019_1620.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-19 16:20\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quiz', '0006_auto_20191019_1613'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='math',\n name='test_id',\n ),\n migrations.RemoveField(\n model_name='stat',\n name='test_id',\n ),\n migrations.AddField(\n model_name='math',\n name='_id',\n field=models.CharField(default=1, max_length=100, primary_key=True, serialize=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='stat',\n name='_id',\n field=models.CharField(default=3, max_length=100, primary_key=True, serialize=False),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5022522807121277, "alphanum_fraction": 0.5439189076423645, "avg_line_length": 25.909090042114258, "blob_id": "20301b97c3c91649c9f38e1ca243b0e878877055", "content_id": "b3f2665f152d0f48a7c65d4216d43746e43eca02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 888, "license_type": "no_license", "max_line_length": 96, "num_lines": 33, "path": "/djangoquiz/quiz/migrations/0006_auto_20191019_1613.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-19 16:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quiz', '0005_auto_20191019_1535'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='math',\n name='id',\n ),\n migrations.RemoveField(\n model_name='stat',\n name='id',\n ),\n migrations.AddField(\n model_name='math',\n name='test_id',\n field=models.CharField(default=1, max_length=30, primary_key=True, serialize=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='stat',\n name='test_id',\n field=models.CharField(default=4, max_length=30, primary_key=True, serialize=False),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5047619342803955, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 21.105262756347656, "blob_id": "c28d2c98a4f3fe8905fd941ecada6e27c40c3d2c", "content_id": "07b87dc4dded9a93968874ac5c23226e7498f497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 85, "num_lines": 19, "path": "/djangoquiz/quiz/migrations/0003_auto_20191019_1518.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-19 15:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quiz', '0002_auto_20191013_0543'),\n ]\n\n operations = [\n \n migrations.AlterField(\n model_name='math',\n name='id',\n field=models.CharField(max_length=20, primary_key=True, serialize=False),\n ),\n ]\n" }, { "alpha_fraction": 0.6341991424560547, "alphanum_fraction": 0.661796510219574, "avg_line_length": 21.54878044128418, "blob_id": "d99c4e1c7e8f97d7f746051a0114f8ee3ce558ab", "content_id": "74806e0829b6e72081ca4aa361922a74f108e4f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 101, "num_lines": 82, "path": "/djangoquiz/quiz/models.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Ques(models.Model): # Collection name\n\t# Documents\n\t_id = models.CharField(max_length=100, primary_key=True)\n\tsi_no = models.IntegerField(null=True)\n\tlevel = models.IntegerField(null=True)\n\tquestion = models.TextField()\n\tch_1 = models.TextField()\n\tch_2 = models.TextField()\n\tch_3 = models.TextField()\t\n\tch_4 = models.TextField()\n\tanswer = models.CharField(max_length=10,null=True)\n\n\n\tclass Meta:\n\t\tabstract = True\n\n\tdef __str__(self):\n\t\treturn str(self.level)\n\n\t@property\n\tdef detail(self):\n\t\treturn {\n\t\t\t\"_id\": self._id,\n\t\t\t\"si_no\": self.si_no,\n\t\t\t\"level\": self.level,\n\t\t\t\"question\":self.question,\n\t\t\t\"ch_1\": self.ch_1,\n\t\t\t\"ch_2\": self.ch_2,\n\t\t\t\"ch_3\": self.ch_3,\n\t\t\t\"ch_4\": self.ch_4,\n\t\t\t\"answer\": self.answer\n\n\t\t}\n\t\n\[email protected]\n\tdef detail(self, any_):\n\t\traise AttributeError\n\t\n\t\n\nclass Math(Ques):\n\tpass\n\n\nclass Stat(Ques):\n\tpass\n\n\t\nclass MathQuizAnswer(models.Model):\n\tuser = models.ForeignKey(User, related_name=\"MathQuizAnswersAsUser\", on_delete=models.CASCADE)\n\tmat_quiz = models.ForeignKey(Math, related_name=\"MathQuizAnswersAsMath\", on_delete=models.CASCADE)\n\tCH_1 = \"ch_1\"\n\tCH_2 = \"ch_2\"\n\tCH_3 = \"ch_3\"\n\tCH_4 = \"ch_4\"\n\tCHOICES = (\n\t\t(CH_1, \"Answer 1\"),\n\t\t(CH_2, \"Answer 2\"),\n\t\t(CH_3, \"Answer 3\"),\n\t\t(CH_4, \"Answer 4\"),\n\t)\n\tanswer = models.CharField(max_length=6, default='')\n\n\nclass StatQuizAnswer(models.Model):\n\tuser = models.ForeignKey(User, related_name=\"StatQuizAnswersAsUser\",on_delete=models.CASCADE)\n\tstat_quiz = models.ForeignKey('Stat', related_name=\"StatQuizAnswersAsStat\",on_delete=models.CASCADE)\n\tCH_1 = \"ch_1\"\n\tCH_2 = \"ch_2\"\n\tCH_3 = \"ch_3\"\n\tCH_4 = \"ch_4\"\n\tCHOICES = (\n\t\t(CH_1, \"Answer 1\"),\n\t\t(CH_2, \"Answer 2\"),\n\t\t(CH_3, \"Answer 3\"),\n\t\t(CH_4, \"Answer 4\"),\n\t)\n\tanswer = models.CharField(max_length=6, default='')" }, { "alpha_fraction": 0.5145630836486816, "alphanum_fraction": 0.594660222530365, "avg_line_length": 21.88888931274414, "blob_id": "0e934d0c67225c245df6cab03a8f9c5eecb56dd6", "content_id": "6162b1201fa09e4a4017330a1fa2ee4cd7b4ad76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 85, "num_lines": 18, "path": "/djangoquiz/quiz/migrations/0004_auto_20191019_1523.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-19 15:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quiz', '0003_auto_20191019_1518'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='stat',\n name='id',\n field=models.CharField(max_length=20, primary_key=True, serialize=False),\n ),\n ]\n" }, { "alpha_fraction": 0.5018404722213745, "alphanum_fraction": 0.525153398513794, "avg_line_length": 23.696969985961914, "blob_id": "b80abc1a241019ffaa4f84538bd78b5badc70909", "content_id": "ba46478c4211230355dc6eac13205b16766ac6c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 49, "num_lines": 33, "path": "/djangoquiz/quiz/migrations/0002_auto_20191013_0543.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-13 05:43\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quiz', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='math',\n name='level',\n field=models.IntegerField(null=True),\n ),\n migrations.AlterField(\n model_name='math',\n name='si_no',\n field=models.IntegerField(null=True),\n ),\n migrations.AlterField(\n model_name='stat',\n name='level',\n field=models.IntegerField(null=True),\n ),\n migrations.AlterField(\n model_name='stat',\n name='si_no',\n field=models.IntegerField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.4655679762363434, "alphanum_fraction": 0.48852264881134033, "avg_line_length": 34.39583206176758, "blob_id": "66cf9c908c78980819ceec7c8f2d7dd141233b96", "content_id": "0cda8f20bc1c78ced86365b3b60abcb75a21244a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1699, "license_type": "no_license", "max_line_length": 114, "num_lines": 48, "path": "/djangoquiz/quiz/migrations/0001_initial.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-10-13 05:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Math',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('si_no', models.IntegerField(max_length=200, null=True)),\n ('level', models.IntegerField(max_length=200, null=True)),\n ('question', models.TextField()),\n ('ch_1', models.TextField()),\n ('ch_2', models.TextField()),\n ('ch_3', models.TextField()),\n ('ch_4', models.TextField()),\n ('answer', models.CharField(max_length=10, null=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Stat',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('si_no', models.IntegerField(max_length=200, null=True)),\n ('level', models.IntegerField(max_length=200, null=True)),\n ('question', models.TextField()),\n ('ch_1', models.TextField()),\n ('ch_2', models.TextField()),\n ('ch_3', models.TextField()),\n ('ch_4', models.TextField()),\n ('answer', models.CharField(max_length=10, null=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6614173054695129, "alphanum_fraction": 0.6771653294563293, "avg_line_length": 28.941177368164062, "blob_id": "0d9b353cf4e941ddb684a49ea4f55f5bd11971f0", "content_id": "58b7b450a98ad0d9f8633cb3add2066859fb7f7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 88, "num_lines": 17, "path": "/djangoquiz/quiz/admin.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Math\nfrom .models import Stat\n\n# Register your models here.\n\[email protected](Math)\nclass MathAdmin(admin.ModelAdmin):\n\tlist_display = ('_id','si_no', 'level','question','ch_1','ch_2','ch_3','ch_4','answer')\n\tordering = (\"si_no\",)\n\n# admin.site.register(Math)\n# admin.site.register(Stat)\[email protected](Stat)\nclass MathAdmin(admin.ModelAdmin):\n\tlist_display = ('_id','si_no', 'level','question','ch_1','ch_2','ch_3','ch_4','answer')\n\tordering = (\"si_no\",)" }, { "alpha_fraction": 0.6928701996803284, "alphanum_fraction": 0.6928701996803284, "avg_line_length": 48.727272033691406, "blob_id": "a65bdf6ba290cb8c38372dbd902ad5be6b06708d", "content_id": "f2a08425c26a9fac399c539f2fa264b9881a98fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 100, "num_lines": 11, "path": "/djangoquiz/quiz/urls.py", "repo_name": "poojakhatri/DjangoQuiz", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .import views\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [\n path('',views.home, name='quiz-home'),\n path('register/', views.register,name='quiz-register'),\n path('login/', auth_views.LoginView.as_view(template_name='quiz/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='quiz/logout.html'), name='logout'),\n path('math_quiz/', views.math_view, name=\"quiz-math_view\"),\n path('stat_quiz/', views.stat_view, name=\"quiz-stat_view\"),\n]\n" } ]
11
francis12/spider_base
https://github.com/francis12/spider_base
0bff786ef271322e70dd534dfbcee0fa8e63e632
52053f0ad29c2c9cfcd5e16e23e77b681ba28d7d
42b26fa8c651ae514df5d218efd607d0770545c5
refs/heads/master
2023-02-07T23:03:30.071542
2020-12-26T04:38:01
2020-12-26T04:38:01
324,481,536
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5437073707580566, "alphanum_fraction": 0.5468071699142456, "avg_line_length": 31.918367385864258, "blob_id": "ba3e4c23ba1818c83ab705323a220d62e9727fa3", "content_id": "cef2be63f1f21f20403e1ab583b9af353030fa98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1655, "license_type": "no_license", "max_line_length": 99, "num_lines": 49, "path": "/script/houniao/houniao_util.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom script.houniao.common import HNCommonParam\n\n\nclass DBUtil:\n bp_pool = None\n param = HNCommonParam\n\n @classmethod\n def get_session(cls):\n # ��ʼ�����ݿ�����:\n cls.bp_pool = create_engine(cls.param.database_url, pool_size=cls.param.database_pool_size,\n max_overflow=cls.param.database_max_overflow,\n pool_recycle=3600 * 6) if cls.bp_pool is None else cls.bp_pool\n # ����DBSession����\n DBSession = sessionmaker(bind=cls.bp_pool)\n session = DBSession()\n return session\n\n\nclass BeanUtil:\n @staticmethod\n def copy_obj_properties(_from=None, to=None):\n fields = dir(_from)\n for field in fields:\n if not (field.startswith(\"__\") or field.startswith(\"_\")):\n if getattr(_from, field) is not None:\n if hasattr(to, field):\n # print(field)\n setattr(to, field, getattr(_from, field))\n # print(getattr(to, field))\n return to\n\n @staticmethod\n def copy_dict_properties(_from=None, to=None):\n for key in _from:\n if _from[key] is not None:\n to[key] = _from[key]\n return to\n\n @staticmethod\n def item_to_bo(item, bo_class):\n bo_instance = bo_class()\n items = dict(item.items())\n for key in items:\n if hasattr(bo_class, key):\n setattr(bo_instance, key, items[key])\n return bo_instance\n" }, { "alpha_fraction": 0.5716741681098938, "alphanum_fraction": 0.5775439143180847, "avg_line_length": 48.37474060058594, "blob_id": "082e9a69a99af8b3eda8bc51febf404a068b5bc8", "content_id": "0385d75deba624e7df15feb1a51325ead80a2967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24306, "license_type": "no_license", "max_line_length": 148, "num_lines": 483, "path": "/script/houniao/houniao_script.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport hashlib\nimport os\nimport traceback\nimport json\nfrom urllib import parse\nimport math\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.cookies import RequestsCookieJar\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom script.houniao.common import HNCommonParam\nfrom script.houniao.houniao_bo import *\nfrom script.houniao.logging_utils import Logger\n\nclass DriverUtil():\n base_dir = os.path.dirname(__file__)\n\n def get_driver(self):\n co = webdriver.ChromeOptions()\n co.add_extension(os.path.join(self.base_dir, 'ivimm_chrome_proxyauth_plugin.zip'))\n driver = webdriver.Chrome(os.path.join(self.base_dir, 'chromedriver.exe'), chrome_options=co)\n return driver\n\n\nclass HouNiaoScript():\n\n headers = {\n 'key': '00cad0fb5df34fbad99981b6c406d39e',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'accept': 'application/json',\n 'user-agent': 'MIX 2(Android/8.0.0) (com.houniao.hk/1.0.4) Weex/0.26.0 1080x2030',\n }\n\n\n base_dir = os.path.dirname(__file__)\n base_url = 'http://www.houniao.hk'\n login_url = base_url + '/home/User/login'\n cookie_ts_key = 'Hm_lpvt_79b5f26a1df74b42929511d91ac22f32'\n download_path = os.path.join(HNCommonParam.script_download_path, time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n cookies_jar = None\n\n def login(self, driver):\n if driver is None or ('home/User/login' not in driver.current_url and 'data:,' != driver.current_url):\n return False\n try:\n driver.get(self.login_url)\n WebDriverWait(driver, 60 * 5, 3).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.userName')))\n time.sleep(1.6)\n driver.find_element_by_class_name('userName').send_keys(HNCommonParam.hn_username)\n time.sleep(1.6)\n driver.find_element_by_class_name('password').send_keys(HNCommonParam.hn_password)\n time.sleep(1.6)\n cnt = 0\n while driver.find_element_by_class_name('submit-form').is_displayed():\n if cnt > 10:\n self.login(driver)\n cnt = cnt+1\n driver.find_element_by_class_name('submit-form').click()\n time.sleep(1.8)\n if not driver.find_element_by_class_name('submit-form').is_displayed():\n WebDriverWait(driver, 60 * 1, 3).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.verify-code')))\n verify_exp = driver.find_element_by_class_name('verify-code').text.replace('=', '').replace('?', '').replace('×', '*')\n verify_exp_result = eval(verify_exp)\n Logger.log('验证码为:%s, 计算结果为:%s' % (verify_exp, verify_exp_result))\n driver.find_element_by_class_name('varify-input-code').send_keys(verify_exp_result)\n time.sleep(0.6)\n driver.find_element_by_class_name('cerify-code-button').click()\n WebDriverWait(driver, 60*2, 3).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.personal')))\n Logger.log('登录成功!')\n except Exception as err:\n traceback.print_exc()\n Logger.log('登录失败,重新登录: %r ' %(err))\n self.login(driver)\n return True\n\n def close_ad_window(self, driver):\n if driver.find_element_by_class_name('product-huodong-close').is_displayed():\n driver.find_element_by_class_name('product-huodong-close').click()\n\n\n def init_cookies(self):\n self.cookies_jar = RequestsCookieJar()\n with DriverUtil().get_driver() as driver:\n self.login(driver)\n cookies = driver.get_cookies()\n for cookie in cookies:\n self.cookies_jar.set(cookie['name'], cookie['value'])\n self.cookies_jar.set('productHuodongClose', 'yes')\n self.cookies_jar.set(self.cookie_ts_key, int(time.time()))\n\n # @retry(retry_on_exception=BaseException, wrap_exception=False, stop_max_attempt_number=5, wait_random_min=2000,\n # wait_random_max=5000)\n def request_get_proxy(self, url, params=None):\n Logger.log('request get:' + url)\n Logger.log(params)\n self.cookies_jar.set('productHuodongClose', 'yes')\n self.cookies_jar.set(self.cookie_ts_key, str(int(time.time())))\n while True:\n response = requests.get(url=url, params=params, cookies= self.cookies_jar, timeout= 60*5, verify=False)\n self.download_file(response)\n if response.status_code == 200:\n break\n else:\n Logger.log('%s请求失败:%d,重试'% (url, response.status_code))\n time.sleep(5)\n return response\n\n def request_proxy(self, url, params=None, headers=None, cookies=None):\n Logger.log('request get:' + url)\n Logger.log(params)\n while True:\n response = requests.get(url=url, params=params, headers=headers, cookies= cookies, timeout= 60*5, verify=False)\n self.download_file(response)\n if response.status_code == 200:\n break\n else:\n Logger.log('%s请求失败:%d,重试'% (url, response.status_code))\n time.sleep(5)\n return response\n\n\n def download_countdown_good_lists(self, url=None, activity_pid=None, activity_code=None):\n res = self.request_get_proxy(url)\n soup = BeautifulSoup(res.text)\n activity_goods = soup.select('div.activity-goodslist div.detail-wrap')\n for group_good in activity_goods:\n good = GoodBo()\n good.activity_pid = activity_pid\n good.activity_code = activity_code\n good.url = self.base_url + group_good.select_one('.goods-info .goods-link')['href']\n good.name = group_good.select_one('.goods-info .goods-link .goods-title').text\n good.code = self.get_code_from_href(group_good.select_one('.goods-info .goods-link')['href'])\n if self.download_pic_url(group_good.select_one('.goods-info .goods-link img.goods-image')['src'], file_name=good.code):\n good.pic_url = self.base_url + group_good.select_one('.goods-info .goods-link img.goods-image')['src']\n GoodDao.insert(good)\n\n\n def download_card_good_lists(self, url=None, activity_pid=None, activity_code=None):\n res = self.request_get_proxy(url)\n soup = BeautifulSoup(res.text)\n group_goods = soup.select('.result.module-floor')\n for group_good in group_goods:\n group_title = group_good.select_one('.floor-name')\n goods = group_good.select('.floor-detail div.goods-col')\n for good_item in goods:\n good = GoodBo()\n good.activity_pid = activity_pid\n good.activity_code = activity_code\n good.sub_title = group_title.text.strip()\n good.name = good_item.select_one('.goods-info .goods-name a')['title']\n good.url = self.base_url + good_item.select_one('.goods-info .goods-name a')['href']\n good.code = self.get_code_from_href(good_item.select_one('.goods-info .goods-name a')['href'])\n if self.download_pic_url( soup.select_one('.goods-img img')['data-original'], file_name=good.code):\n good.pic_url = self.base_url + soup.select_one('.goods-img img')['data-original']\n GoodDao.insert(good)\n\n def download_good_item_lists(self, list_url=None, activity_pid=None, activity_code=None):\n res = self.request_get_proxy(list_url)\n soup = BeautifulSoup(res.text)\n brand_title = soup.select_one('.brand-main .brand-title')\n brand_product_items = soup.select('.brand-product .product-main .product-items')\n for brand_product_item in brand_product_items:\n good = GoodBo()\n good.activity_pid = activity_pid\n good.activity_code = activity_code\n good.sub_sub_title = brand_title.text.strip()\n good.sub_title = self.base_url + brand_title.select_one('img')['src']\n # good.price = brand_product_item.select_one('.product-price').text.strip()\n product_info = brand_product_item.select_one('.product-name a')\n good.name = product_info.text.strip()\n good.url = self.base_url + product_info['href']\n good.code = self.get_code_from_href(product_info['href'])\n if self.download_pic_url(brand_product_item.select_one('.product-img img')['src'], file_name=good.code):\n good.pic_url = self.base_url + brand_product_item.select_one('.product-img img')['src']\n GoodDao.insert(good)\n\n\n def download_search_app_good_list(self, keyword=None, activity_pid=None, activity_code=None):\n url = 'https://www.houniao.hk/wxapi/goods/selectGoods'\n page = 1\n while True:\n params = {'page': page, 'limit': 8, 'keywords': keyword, 'sortType': '人气', 'tradeTypeId': 0, 'categoryIds': 0, 'categoryRank': 0}\n res = self.request_proxy(url=url, params=params, headers=self.headers).json()\n data_count = res['data']['dataCount']\n for data in res['data']['list']:\n good_bo = GoodBo()\n good_bo.activity_code = activity_code\n good_bo.activity_pid = activity_pid\n good_bo.name = data['goodsName']\n good_bo.code = data['goodsSku']\n pic_url = '/'+ data['goodsImg']\n if self.download_pic_url(pic_url, file_name=good_bo.code):\n good_bo.pic_url = self.base_url + pic_url\n GoodDao.insert(good_bo)\n page = page + 1\n if math.ceil(data_count / 8) < page:\n break\n\n def download_search_good_list(self, search_url=None, activity_pid=None, activity_code=None, limit_page_num=None):\n split_search_url = search_url[:search_url.index('?')]\n params = dict(parse.parse_qsl(search_url[search_url.index('?')+1:]))\n cur_num = 1\n first_run = True\n soup =None\n while first_run or (soup.select_one('#totalPage') is not None and cur_num < int(soup.select_one('#totalPage')['value'])):\n if first_run:\n first_run = False\n elif limit_page_num is not None and limit_page_num <= cur_num:\n return\n else:\n cur_num = cur_num+1\n params['index']=cur_num\n res = self.request_get_proxy(split_search_url, params= params)\n soup = BeautifulSoup(res.text)\n goods = soup.select('.result ul li.goods')\n for good in goods:\n good_bo = GoodBo()\n good_bo.activity_code = activity_code\n good_bo.activity_pid = activity_pid\n good_bo.name = good.select_one('.goods-name').text\n good_bo.url = self.base_url + good.select_one('.goods-name a')['href']\n good_img = good.select_one('.goods-img img')\n pic_url = good_img.get('data-original', good_img.get('src'))\n\n good_bo.code = self.get_code_from_href(self.base_url + good.select_one('.goods-name a')['href'])\n if self.download_pic_url(pic_url, file_name=good_bo.code):\n good_bo.pic_url = self.base_url + pic_url\n GoodDao.insert(good_bo)\n\n def download_good_detail(self, detail_url, activity_pid=None, activity_code=None):\n res = self.request_get_proxy(detail_url)\n soup = BeautifulSoup(res.text)\n sku_name = soup.select_one('.goods-detail-center .sku-name')\n label_name = sku_name.select_one('.label').text\n sku_name = sku_name.text.replace(label_name, '')\n good_bo = GoodBo()\n good_bo.activity_code = activity_code\n good_bo.activity_pid = activity_pid\n good_bo.name = sku_name\n good_bo.url = detail_url\n good_bo.code = self.get_code_from_href(detail_url)\n if self.download_pic_url(soup.select_one('.img-con img')['src'], file_name=good_bo.code):\n good_bo.pic_url = self.base_url + soup.select_one('.img-con img')['src']\n GoodDao.insert(good_bo)\n\n def download_goods(self):\n # App Banner页\n app_banner = 'https://www.houniao.hk/wxapi/index/carousel'\n app_banner_res = self.request_proxy(url=app_banner, headers=self.headers).json()\n if app_banner_res['code'] == 200:\n for data in app_banner_res['data']:\n ad_name = data['adName']\n ad_url = data['adURL']\n ad_img_file = \"/\" + data['adFile']\n activity = ActivtiyBO()\n activity.type = 'search'\n activity.activity_code = 'TOP'\n activity.name = ad_name\n activity.pic_url = self.base_url +ad_img_file\n activity_pid = ActivtiyDao.insert(activity)\n self.download_pic_url(img_url=ad_img_file, file_name='TOP#顶幅banner#' + str(activity_pid))\n # 下载详情数据\n self.download_search_app_good_list(keyword=ad_name, activity_pid=activity_pid, activity_code='TOP')\n\n\n # App 国际馆\n coutry_url = 'https://www.houniao.hk/wxapi/index/country'\n coutry_res = self.request_proxy(url=coutry_url, headers= self.headers).json()\n if coutry_res['code'] == 200:\n for data in coutry_res['data']:\n ad_name = data['adName']\n ad_url = data['adURL']\n ad_img_file = '/' + data['adFile']\n activity = ActivtiyBO()\n activity.activity_code = 'COUNTRY'\n activity.name = ad_name\n activity_pid = ActivtiyDao.insert(activity)\n self.download_pic_url(img_url=ad_img_file, file_name='COUNTRY#' + ad_name + \"#\" + str(activity_pid))\n for page in [1,2]:\n country_good_url = 'https://www.houniao.hk/wxapi/goods/selectGoods'\n country_good_params = {'originId': ad_url, 'page':page,'limit':8}\n country_good_res = self.request_proxy(url=country_good_url, params=country_good_params, headers= self.headers).json()\n if country_good_res['code'] ==200:\n for good_list_item in country_good_res['data']['list']:\n good_bo = GoodBo()\n good_bo.activity_code = 'COUNTRY'\n good_bo.activity_pid = activity_pid\n good_bo.code = good_list_item['goodsSku']\n good_id = good_list_item['goodsId']\n good_bo.name = good_list_item['goodsName']\n good_img = '/' + good_list_item['goodsImg']\n if self.download_pic_url(good_img, file_name=good_bo.code):\n good_bo.pic_url = self.base_url + good_img\n GoodDao.insert(good_bo)\n else:\n Logger.log('国际馆下载失败')\n self.init_cookies()\n res = self.request_get_proxy('http://www.houniao.hk/')\n soup = BeautifulSoup(res.text)\n soup.select_one('.nav-tabs .nav-item')\n\n self.download_trade_type_goods(soup, '保税直供', 'TRADETYPE', 1)\n self.download_trade_type_goods(soup, '完税进口', 'TRADETYPE', 1)\n self.download_trade_type_goods(soup, '国内贸易', 'TRADETYPE', 1)\n self.download_trade_type_goods(soup, '香港直邮', 'TRADETYPE', 1)\n self.download_trade_type_goods(soup, '海外直邮', 'TRADETYPE', 1)\n\n # 网红爆品+新品上市\n Logger.log('开始下载 网红爆品')\n hot_good = soup.select_one('.nav-tabs .nav-item:contains(\"网红爆品\")')\n activity = ActivtiyBO()\n activity.activity_code = 'HOT'\n activity.name = '网红爆品'\n activity.url = self.base_url+ hot_good.select_one('a')['href']\n activity_pid = ActivtiyDao.insert(activity)\n self.download_good_item_lists(activity.url, activity_pid, 'HOT')\n\n self.download_trade_type_goods(soup, '新品上市', 'NEW')\n # Logger.log('开始下载 新品上市')\n # new_good = soup.select_one('.nav-tabs .nav-item:contains(\"新品上市\")')\n # activity = ActivtiyBO()\n # activity.activity_code = 'NEW'\n # activity.name = '新品上市'\n # activity.url = self.base_url+ new_good.select_one('a')['href']\n # activity_pid = ActivtiyDao.insert(activity)\n # self.download_search_good_list(activity.url, activity_pid, 'NEW')\n\n # pc顶幅, 废弃,使用APP跑马灯\n # Logger.log('开始下载 顶幅跑马灯')\n # banner_items = soup.select('.banner-slide .items li')\n # for banner_item in banner_items:\n # activity = ActivtiyBO()\n # href = banner_item.select_one('a')['href']\n # activity.activity_code = 'TOP'\n # activity.name = '顶幅banner'\n # activity.url = href\n # if banner_item['_src'] is not None:\n # activity.pic_url = self.base_url + banner_item['_src'].replace('url(', '').replace(')', '')\n # # type: # detail: 商品详情, search: 搜索结果页,other: 其他\n # if 'product/detail' in href:\n # # 商品详情链接:\n # # if banner_item['style'] is not None:\n # # pass\n # activity.type = 'detail'\n # activity_pid = ActivtiyDao.insert(activity)\n # self.download_good_detail(href, activity_pid=activity_pid, activity_code='TOP')\n # elif 'product/search' in href:\n # activity.type = 'search'\n # activity_pid = ActivtiyDao.insert(activity)\n # self.download_search_good_list(href, activity_pid=activity_pid, activity_code='TOP')\n # else:\n # Logger.log('unknow type:' + href)\n # activity.type = 'other'\n # activity_pid = ActivtiyDao.insert(activity)\n # if not self.download_pic_url(banner_item['_src'].replace('url(', '').replace(')', ''), file_name='TOP#顶幅banner#' + str(activity_pid)):\n # activity.pid = activity_pid\n # activity.pic_url = None\n # ActivtiyDao.update_room_detail(activity)\n\n # 卡片活动\n Logger.log('开始下载 卡片活动')\n floor_items = soup.select('.floor-items .floor-item')\n for floor_item in floor_items:\n href = floor_item.select_one('a')['href']\n name = floor_item.select_one('p.name').text.strip()\n desc = floor_item.select_one('p.desc').text.strip()\n img_url = floor_item.select_one('img')['src']\n activity = ActivtiyBO()\n activity.activity_code = 'CARD'\n activity.desc = desc\n activity.name = '卡片活动-' + name\n activity.url = self.base_url+href\n activity.pic_url = self.base_url +img_url\n activity_pid = ActivtiyDao.insert(activity)\n if not self.download_pic_url(img_url, file_name='CARD#' + activity.name+'#' + str(activity_pid)):\n activity.pid = activity_pid\n activity.pic_url = None\n ActivtiyDao.update_room_detail(activity)\n self.download_card_good_lists(activity.url, activity_pid, 'CARD')\n # 抢购\n Logger.log('开始下载 抢购')\n activity = ActivtiyBO()\n activity.activity_code = 'COUNTDOWN'\n activity.name = '抢购'\n activity.url = self.base_url+ soup.select_one('#flashsale .navbox.active')['href']\n activity_pid = ActivtiyDao.insert(activity)\n self.download_countdown_good_lists(activity.url, activity_pid=activity_pid, activity_code='COUNTDOWN')\n\n # 爬取商品分类第一页\n Logger.log('开始爬取 商品分类')\n list_items = soup.select('.site-category .catlist li.list-item')\n for list_item in list_items:\n # :奶粉辅食\n item_name_1 = list_item.select_one('a.item span.catname').text.strip()\n for list_item2 in list_item.select('div.sub-list dl.slblock'):\n # 婴儿奶粉\n item_name_2 = list_item2.select_one('dt.li-title').text.strip()\n for list_item3 in list_item2.select('dd.li-item'):\n item_name_3 = list_item3.text.strip()\n activity = ActivtiyBO()\n activity.activity_code = 'CATEGORY'\n activity.name = item_name_1 + '-' + item_name_2 + '-' + item_name_3\n activity.url = 'http:'+ list_item3.select_one('a')['href']\n activity_pid = ActivtiyDao.insert(activity)\n self.download_search_good_list(activity.url, activity_pid, 'CATEGORY', limit_page_num=1)\n\n\n def download_trade_type_goods(self, soup, target_title, activiy_code, limit_page_num=None):\n Logger.log('开始下载 '+ target_title)\n trade_good = soup.select_one('.nav-tabs .nav-item:contains(\"%s\")' % (target_title))\n activity = ActivtiyBO()\n activity.activity_code = activiy_code\n activity.name = target_title\n activity.url = self.base_url+ trade_good.select_one('a')['href']\n activity_pid = ActivtiyDao.insert(activity)\n self.download_search_good_list(activity.url, activity_pid, activiy_code, limit_page_num=limit_page_num)\n\n def download_file(self, response):\n pass\n # if not os.path.exists(self.download_path):\n # os.makedirs(self.download_path)\n # _hash = hashlib.md5()\n # _hash.update(response.url.encode('utf-8'))\n # md5_code = _hash.hexdigest()\n # with open(os.path.join(self.download_path, md5_code), 'w', encoding='utf-8') as f:\n # f.write(response.text)\n # return response\n\n def download_pic_url(self, img_url=None, file_name=None):\n try:\n full_img_url = self.base_url + img_url\n split_file_name = img_url.split('/')[-1]\n if file_name is None:\n file_name = split_file_name\n else:\n file_name = file_name + os.path.splitext(split_file_name)[-1]\n if img_url.startswith('/'):\n img_url = img_url[1:]\n download_path = os.path.join(HNCommonParam.script_download_path, img_url.replace(split_file_name, ''))\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n r = requests.get(full_img_url, stream=True, timeout=60*5)\n abs_file_path = os.path.join(download_path, file_name)\n # print('保存图片:' + abs_file_path)\n with open(abs_file_path, 'wb') as f:\n f.write(r.content)\n return True\n except Exception as err:\n Logger.log('img %s download err' % img_url)\n return False\n\n def get_code_from_href(self, href):\n if href is None:\n return None\n return href[href.index('itemSku=')+len('itemSku='):]\n\nscheduler = BlockingScheduler()\[email protected]_job(\"cron\", day_of_week='*', hour=HNCommonParam.job_hour, minute=HNCommonParam.job_min, second='00')\ndef rebate():\n script = HouNiaoScript()\n try:\n Logger.log('spider task start')\n script.download_goods()\n Logger.log('Spider task end')\n except BaseException as err:\n Logger.log('Spider task failed: ' + time.strftime('%Y-%m-%d', time.localtime()))\n traceback.print_exc()\n\nif __name__ == '__main__':\n try:\n Logger.log(\"statistic scheduler start\")\n scheduler.start()\n # HouNiaoScript().download_goods()\n Logger.log(\"statistic scheduler start success\")\n except (KeyboardInterrupt, SystemExit):\n scheduler.shutdown()\n Logger.log(\"statistic scheduler start-up fail\")\n\n\n\n" }, { "alpha_fraction": 0.5531914830207825, "alphanum_fraction": 0.707446813583374, "avg_line_length": 16.090909957885742, "blob_id": "560330673d22610bd4f9c693fefc30cf26734831", "content_id": "58fd2b3fc8759af2d2f3681f8d2246e79b5a6404", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 188, "license_type": "no_license", "max_line_length": 68, "num_lines": 11, "path": "/script/houniao/xkucun_common.ini", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "[download.path]\nwin_path = D:/download/xxkucun\n\n[database]\nurl = mysql+pymysql://root:Pa9090Db*@192.168.102.143:3306/xxkucun_db\npool_size = 50\nmax_overflow = 300\n\n[job]\nhour = 17\nmin = 58\n" }, { "alpha_fraction": 0.5806092619895935, "alphanum_fraction": 0.589211642742157, "avg_line_length": 28.85196304321289, "blob_id": "9d8d34d7ec54f4c05dbc8787158661575d833e18", "content_id": "a1984e46bfd049ba67356ec46921c32da0170cae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9961, "license_type": "no_license", "max_line_length": 157, "num_lines": 331, "path": "/script/houniao/xxkucun_bo.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "import datetime\nimport time\nfrom script.houniao.houniao_util import DBUtil, BeanUtil\nfrom sqlalchemy import Column, String\nfrom sqlalchemy.types import INTEGER,FLOAT, TEXT\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\n\nclass DistrictBO(Base):\n # 表的名字:\n __tablename__ = 't_district'\n\n # 表的结构:\n pid = Column(INTEGER, primary_key=True, autoincrement=True)\n city_id = Column(INTEGER)\n district_id = Column(INTEGER)\n district_name = Column(String(255))\n district_url = Column(String(255))\n\n\nclass DistrictDao:\n @staticmethod\n def insert(item: DistrictBO):\n session = DBUtil.get_session()\n # 插入\n session.add(item)\n session.commit()\n pid = item.pid\n session.close()\n return pid\n\n @staticmethod\n def select_all():\n session = DBUtil.get_session()\n res = session.query(DistrictBO)\n session.close()\n return res\n\n @staticmethod\n def select_by_district_id(district_id):\n session = DBUtil.get_session()\n res = session.query(DistrictBO).filter(DistrictBO.district_id ==district_id).first()\n session.close()\n return res\n\n @staticmethod\n def select_by_city_id(city_id):\n session = DBUtil.get_session()\n res = session.query(DistrictBO).filter(DistrictBO.city_id ==city_id).all()\n session.close()\n return res\n\n @staticmethod\n def update_detail(room: DistrictBO):\n session = DBUtil.get_session()\n item = session.query(DistrictBO).filter(DistrictBO.pid == room.pid).first()\n BeanUtil.copy_obj_properties(room, item)\n session.commit()\n session.close()\n\n\n\nclass CategoryBo(Base):\n # 表的名字:\n __tablename__ = 't_category'\n\n # 表的结构:\n pid = Column(INTEGER, primary_key=True, autoincrement=True)\n city_id = Column(INTEGER)\n category_id = Column(INTEGER)\n parent_category_pid = Column(INTEGER)\n category_name = Column(String(255))\n level = Column(String(255))\n\n\nclass CategoryDao:\n @staticmethod\n def insert(item: CategoryBo):\n session = DBUtil.get_session()\n # 插入\n session.add(item)\n session.commit()\n pid = item.pid\n session.close()\n return pid\n\n @staticmethod\n def select_all():\n session = DBUtil.get_session()\n res = session.query(CategoryBo)\n session.close()\n return res\n\n @staticmethod\n def select_by_city_id_level(city_id, level):\n session = DBUtil.get_session()\n res = session.query(CategoryBo).filter(CategoryBo.city_id ==city_id).filter(CategoryBo.level ==level).all()\n session.close()\n return res\n\n @staticmethod\n def select_by_city_id_level_name(city_id, level, name):\n session = DBUtil.get_session()\n res = session.query(CategoryBo).filter(CategoryBo.city_id ==city_id).filter(CategoryBo.level ==level).filter(CategoryBo.category_name ==name).first()\n session.close()\n return res\n\n @staticmethod\n def select_by_city_id_categoryid(city_id, cateory_id):\n session = DBUtil.get_session()\n res = session.query(CategoryBo).filter(CategoryBo.city_id ==city_id).filter(CategoryBo.category_id ==cateory_id).first()\n session.close()\n return res\n\n @staticmethod\n def update_detail(room: CategoryBo):\n session = DBUtil.get_session()\n item = session.query(CategoryBo).filter(CategoryBo.pid == room.pid).first()\n BeanUtil.copy_obj_properties(room, item)\n session.commit()\n session.close()\n\n\nclass ProductListBO(Base):\n # 表的名字:\n __tablename__ = 't_product_list'\n\n # 表的结构:\n pid = Column(INTEGER, primary_key=True, autoincrement=True)\n city_id = Column(INTEGER)\n product_id = Column(INTEGER)\n category_pid = Column(INTEGER)\n district_id = Column(INTEGER)\n img_url = Column(String(255))\n group_type = Column(String(255))\n name = Column(String(255))\n brand_name = Column(String(255))\n price = Column(String(255))\n market_price = Column(String(255))\n sale_time = Column(String(255))\n discount = Column(FLOAT)\n commission = Column(FLOAT)\n sale_qty = Column(INTEGER)\n total_qty = Column(INTEGER)\n pay_count = Column(INTEGER)\n total_seconds = Column(INTEGER)\n distance = Column(INTEGER)\n time_format = Column(String(255))\n sale_status = Column(String(255))\n create_time = Column(String(255))\n\n def __init__(self,\n city_id=None,\n product_id=None,\n img_url=None,\n group_type=None,\n name=None,\n brand_name=None,\n price=None,\n market_price=None,\n sale_time=None,\n discount=None,\n commission=None,\n sale_qty=None,\n total_qty=None,\n pay_count=None,\n total_seconds=None,\n distance=None,\n time_format=None,\n create_time=None):\n self.city_id = city_id\n self.product_id = product_id\n self.img_url = img_url\n self.group_type = group_type\n self.name = name\n self.brand_name = brand_name\n self.price = price\n self.market_price = market_price\n self.sale_time = sale_time\n self.discount = discount\n self.commission = commission\n self.sale_qty = sale_qty\n self.total_qty = total_qty\n self.pay_count = pay_count\n self.total_seconds = total_seconds\n self.distance = distance\n self.time_format = time_format\n self.create_time = datetime.datetime.now()\n\n\nclass ProductListDao:\n @staticmethod\n def insert(item: ProductListBO):\n session = DBUtil.get_session()\n # 插入\n session.add(item)\n session.commit()\n pid = item.pid\n session.close()\n return pid\n\n @staticmethod\n def select_all():\n session = DBUtil.get_session()\n res = session.query(ProductListBO)\n session.close()\n return res\n\n @staticmethod\n def update_detail(room: ProductListBO):\n session = DBUtil.get_session()\n item = session.query(ProductListBO).filter(ProductListBO.pid == room.pid).first()\n BeanUtil.copy_obj_properties(room, item)\n session.commit()\n session.close()\n\n\nclass ProductDetailBO(Base):\n # 表的名字:\n __tablename__ = 't_product_detail'\n\n # 表的结构:\n pid = Column(INTEGER, primary_key=True, autoincrement=True)\n product_id = Column(INTEGER)\n name = Column(String(255))\n imgs = Column(String(1024))\n price = Column(String(255))\n market_price = Column(String(255))\n brand_id = Column(INTEGER)\n brand_name = Column(String(255))\n brand_logo = Column(String(255))\n fxwa = Column(String(255))\n commission = Column(FLOAT)\n max_commission = Column(FLOAT)\n sale_qty = Column(INTEGER)\n total_qty = Column(INTEGER)\n sale_time = Column(String(255))\n offline_time = Column(String(255))\n total_seconds = Column(INTEGER)\n book_way = Column(String(255))\n stores = Column(TEXT)\n condition = Column(TEXT)\n remark = Column(TEXT)\n setmeal = Column(TEXT)\n condition_url = Column(String(255))\n remark_url = Column(String(255))\n setmeal_url = Column(String(255))\n create_time = Column(String(255))\n\n def __init__(self,\n product_id=None,\n name=None,\n imgs=None,\n price=None,\n market_price=None,\n brand_id=None,\n brand_name=None,\n brand_logo=None,\n fxwa=None,\n commission=None,\n max_commission=None,\n sale_qty=None,\n total_qty=None,\n sale_time=None,\n offline_time=None,\n total_seconds=None,\n book_way=None,\n stores=None,\n condition=None,\n remark=None,\n setmeal=None,\n condition_url=None,\n remark_url=None,\n setmeal_url=None,\n create_time=None):\n self.product_id = product_id\n self.name = name\n self.imgs = imgs\n self.price = price\n self.price = price\n self.market_price = market_price\n self.brand_id = brand_id\n self.brand_name = brand_name\n self.brand_logo = brand_logo\n self.fxwa = fxwa\n self.commission = commission\n self.max_commission = max_commission\n self.sale_qty = sale_qty\n self.total_qty = total_qty\n self.sale_time = sale_time\n self.offline_time = offline_time\n self.total_seconds = total_seconds\n self.book_way = book_way\n self.stores = stores\n self.condition = condition\n self.remark = remark\n self.setmeal = setmeal\n self.condition_url = condition_url\n self.remark_url = remark_url\n self.setmeal_url = setmeal_url\n self.create_time = datetime.datetime.now()\n\n\nclass ProductDetailDao:\n @staticmethod\n def insert(item: ProductDetailBO):\n session = DBUtil.get_session()\n # 插入\n session.add(item)\n session.commit()\n pid = item.pid\n session.close()\n return pid\n\n @staticmethod\n def select_all():\n session = DBUtil.get_session()\n res = session.query(ProductDetailBO)\n session.close()\n return res\n\n @staticmethod\n def update_detail(room: ProductDetailBO):\n session = DBUtil.get_session()\n item = session.query(ProductDetailBO).filter(ProductDetailBO.pid == room.pid).first()\n BeanUtil.copy_obj_properties(room, item)\n session.commit()\n session.close()\n" }, { "alpha_fraction": 0.4833333194255829, "alphanum_fraction": 0.4833333194255829, "avg_line_length": 30.578947067260742, "blob_id": "ec6a9dfaf91c600413ef512c0905818770f10790", "content_id": "ef638b3d31bb8abf09628c9dbe875c0cf26b79f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "no_license", "max_line_length": 72, "num_lines": 19, "path": "/script/houniao/logging_utils.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "import time, os\n\n\nclass Logger:\n\n @staticmethod\n def log(log_info=None, this_dir = 'log'):\n if log_info is None:\n return False\n try:\n str(log_info)\n except:\n return False\n this_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n file_name = time.strftime('%Y-%m-%d', time.localtime()) + '.log'\n if os.path.exists('./' + this_dir) is False:\n os.mkdir('./' + this_dir)\n with open('./' + this_dir + '/' + file_name, 'a+') as f:\n f.write('[' + this_time + ']:' + str(log_info) + '\\n')\n" }, { "alpha_fraction": 0.6445595622062683, "alphanum_fraction": 0.6455958485603333, "avg_line_length": 33.42856979370117, "blob_id": "4a18e3633bd0953d01be7b988cc8191e22d4c9f7", "content_id": "af1fd3c05080c81da312b16e7285845eb2a100bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/script/houniao/common.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "import configparser\nimport os\nimport platform\n\n\n\nclass HNCommonParam():\n base_dir = os.path.dirname(__file__)\n config_file_path = os.path.join(base_dir, \"common.ini\") # 配置文件名称\n config = configparser.ConfigParser()\n config.read(config_file_path,encoding=\"utf-8-sig\")\n\n hn_username = config[\"houniao\"][\"user_name\"]\n hn_password = config[\"houniao\"][\"password\"]\n # 数据库链接\n database_url = config[\"database\"][\"url\"]\n database_pool_size = int(config[\"database\"][\"pool_size\"])\n database_max_overflow = int(config[\"database\"][\"max_overflow\"])\n job_hour = config[\"job\"][\"hour\"]\n job_min = config[\"job\"][\"min\"]\n # 下载根目录\n plat_form = platform.platform()\n if \"Linux\" in plat_form:\n script_download_path = config[\"download.path\"][\"linux_path\"]\n elif \"Windows\" in plat_form:\n script_download_path = config[\"download.path\"][\"win_path\"]\n else:\n script_download_path = config[\"download.path\"][\"linux_path\"]\n\n" }, { "alpha_fraction": 0.541198194026947, "alphanum_fraction": 0.5557667016983032, "avg_line_length": 51.325965881347656, "blob_id": "a657e3614058391cead545722b419183c1f13559", "content_id": "e0c4fbb4438002159c3c0be67ada265532a3fa3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19153, "license_type": "no_license", "max_line_length": 288, "num_lines": 362, "path": "/script/houniao/xxkucun_script.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport hashlib\nimport os\nimport traceback\nimport json\nimport requests\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom bs4 import BeautifulSoup\n\nfrom script.houniao.xxkucun_bo import *\nfrom script.houniao.logging_utils import Logger\nfrom script.houniao.xkucun_common import XKCCommonParam\n\n\nclass XXKuCunScript():\n\n log_path = 'xxkucun_log'\n base_dir = os.path.dirname(__file__)\n base_url = 'http://www.houniao.hk'\n login_url = base_url + '/home/User/login'\n cookie_ts_key = 'Hm_lpvt_79b5f26a1df74b42929511d91ac22f32'\n download_path = os.path.join(XKCCommonParam.script_download_path, time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n def request_get_proxy(self, url, params=None):\n Logger.log('request get:' + url, this_dir=self.log_path)\n Logger.log(params)\n response = None\n retry_cnt = 1\n while True:\n response = requests.get(url=url, params=params, headers=self.get_now_header(), timeout= 60*5, verify=False)\n if response.status_code == 200:\n break\n else:\n retry_cnt = retry_cnt+1\n if retry_cnt > 10:\n Logger.log('%s 重试超过10次,跳出'%(url), this_dir=self.log_path)\n break\n Logger.log('%s请求失败:%d,重试第%d次'% (url, response.status_code, retry_cnt))\n time.sleep(5)\n return response\n\n def get_signature(self, src_id):\n md5 = hashlib.md5()\n md5.update(src_id.encode('utf-8'))\n return md5.hexdigest()\n\n def get_now_header(self):\n header = {\n \"version\": \"2.9\",\n \"appkey\": \"98aqnb\",\n \"timestamp\": \"1608101674\",\n \"sign\": \"4ec64958698e0e3929576cc2033dc905\",\n \"user-agent\": \"okhttp/4.7.2\"\n }\n # banner\n timestamp = int(time.time())\n sign_source = 'appkey=98aqnb&appsecret=BAD60AB6-F6D7-4F2F-9711-5EEF15287D4E&timestamp=' + str(timestamp)\n sign = self.get_signature(sign_source)\n header['timestamp'] = str(timestamp)\n header['sign'] = sign\n return header\n\n def download_goods(self):\n session = DBUtil.get_session()\n citys = session.execute('select * from t_city')\n for city in citys:\n self.download_city_goods(city)\n session.close()\n\n def download_city_goods(self, city):\n city_id = city[1]\n city_name = city[2]\n Logger.log('开始下载城市:'+ city_name, this_dir=self.log_path)\n\n banner_url = 'https://api.xxkucun.com/v1/product/banner/list?ver=1&city_id='+ str(city_id)\n banner_res = self.request_get_proxy(banner_url).json()\n\n # 下载区域\n Logger.log('开始下载区域, 条目基础数据', this_dir=self.log_path)\n district_url = 'https://api.xxkucun.com/v1/common/district/list?city_id='+ str(city_id)\n district_res = self.request_get_proxy(district_url).json()\n if 0 == district_res['err_code']:\n for district_data in district_res['data']:\n if DistrictDao.select_by_district_id(district_data['ID']) is None:\n district_bo = DistrictBO()\n district_bo.city_id = city_id\n district_bo.district_id = district_data['ID']\n district_bo.district_name = district_data['Name']\n DistrictDao.insert(district_bo)\n\n catetory_url = 'https://api.xxkucun.com/v1/product/category/list?num=8&city_id=' + str(city_id)\n catetory_res = self.request_get_proxy(catetory_url).json()\n if 0 == catetory_res['err_code']:\n for catetory_data in catetory_res['data']:\n db_category = CategoryDao.select_by_city_id_categoryid(city_id, catetory_data['ID'])\n if db_category is None:\n category_bo = CategoryBo()\n category_bo.city_id = city_id\n category_bo.level = 0\n category_bo.category_id = catetory_data['ID']\n category_bo.category_name = catetory_data['Name']\n category_id = CategoryDao.insert(category_bo)\n else:\n category_id = db_category.pid\n child_category_url = 'https://api.xxkucun.com/v1/product/catechild/list?category_id=%d&city_id=%d' % (catetory_data['ID'], city_id)\n child_catetory_res = self.request_get_proxy(child_category_url).json()\n if 0 == child_catetory_res['err_code']:\n for catetory_data in child_catetory_res['data']:\n db_category = CategoryDao.select_by_city_id_categoryid(city_id, catetory_data['ID'])\n if db_category is None:\n category_bo = CategoryBo()\n category_bo.city_id = city_id\n category_bo.level = 1\n category_bo.parent_category_pid = category_id\n category_bo.category_id = catetory_data['ID']\n category_bo.category_name = catetory_data['Name']\n CategoryDao.insert(category_bo)\n\n # 条目下商品\n Logger.log('开始下载条目商品数据', this_dir=self.log_path)\n distircts = DistrictDao.select_by_city_id(city_id)\n categories = CategoryDao.select_by_city_id_level(city_id, 2)\n for district in distircts:\n for category in categories:\n page_index = 1\n while True:\n category_district_search_url = 'https://api.xxkucun.com/v1/product/search/by/category?page_index=%d&district_id=%d&px=0&category_id=%d&type=0&lng=103.56358166666665&lat=33.00125&actiontype=&city_id=%d' % (page_index, district.district_id,category.category_id,city_id )\n category_district_search_res = self.request_get_proxy(category_district_search_url).json()\n if 0 == category_district_search_res['err_code']:\n if len(category_district_search_res['data']) == 0:\n break\n for rec_data in category_district_search_res['data']:\n product_list_bo = ProductListBO()\n product_list_bo.city_id = city_id\n product_list_bo.district_id = district.district_id\n product_list_bo.category_pid = category.pid\n product_list_bo.product_id = rec_data['ID']\n product_list_bo.name = rec_data['Name']\n product_list_bo.img_url = rec_data['Img']\n self.download_pic_url(rec_data['Img'], rec_data['ID'])\n product_list_bo.brand_name = rec_data['BrandName']\n product_list_bo.price = rec_data['Price']\n product_list_bo.market_price = rec_data['MarketPrice']\n product_list_bo.sale_qty = rec_data['SaleQty']\n product_list_bo.total_qty = rec_data['TotalQty']\n product_list_bo.discount = rec_data['Discount']\n product_list_bo.commission = rec_data['Commission']\n product_list_bo.group_type = 'SEARCH'\n product_list_bo.sale_status = rec_data.get('SaleStatus', None)\n ProductListDao.insert(product_list_bo)\n # self.down_good_detail(rec_data['ID'], city_id)\n else:\n break\n page_index = page_index + 1\n Logger.log('开始下载BANNER数据', this_dir=self.log_path)\n banner_url = 'https://api.xxkucun.com/v1/product/banner/list?ver=1&city_id='+ str(city_id)\n banner_res = self.request_get_proxy(banner_url).json()\n if 0 == banner_res['err_code']:\n for banner_data in banner_res['data']:\n product_list_bo = ProductListBO()\n product_list_bo.city_id = city_id\n product_list_bo.product_id = banner_data['ProdID']\n product_list_bo.img_url = banner_data['Img']\n self.download_pic_url(banner_data['Img'])\n product_list_bo.group_type = banner_data['type']\n product_list_bo.sale_status = banner_data.get('SaleStatus', None)\n ProductListDao.insert(product_list_bo)\n Logger.log('开始下载今日推荐', this_dir=self.log_path)\n rec_url = 'https://api.xxkucun.com/v1/product/today/rec/list?lng=103.56358166666665&lat=33.00125&city_id='+ str(city_id)\n rec_res = self.request_get_proxy(rec_url).json()\n if 0 == rec_res['err_code']:\n for rec_data in rec_res['data']:\n product_list_bo = ProductListBO()\n product_list_bo.city_id = city_id\n product_list_bo.product_id = rec_data['ID']\n product_list_bo.name = rec_data['Name']\n product_list_bo.img_url = rec_data['Img']\n self.download_pic_url(rec_data['Img'], rec_data['ID'])\n product_list_bo.brand_name = rec_data['BrandName']\n product_list_bo.price = rec_data['Price']\n product_list_bo.market_price = rec_data['MarketPrice']\n product_list_bo.commission = rec_data['Commission']\n product_list_bo.pay_count = rec_data['PayCount']\n product_list_bo.group_type = 'REC'\n product_list_bo.sale_status = rec_data.get('SaleStatus', None)\n ProductListDao.insert(product_list_bo)\n self.down_good_detail(rec_data['ID'], city_id)\n\n\n Logger.log('开始下载即将下线', this_dir=self.log_path)\n down_url = 'https://api.xxkucun.com/v1/product/pro/GetDownLineList?user_id=null&lng=103.56358166666665&lat=33.00125&city_id='+ str(city_id)\n down_res = self.request_get_proxy(down_url).json()\n if 0 == down_res['err_code']:\n for rec_data in down_res['data']:\n product_list_bo = ProductListBO()\n product_list_bo.city_id = city_id\n product_list_bo.product_id = rec_data['ID']\n product_list_bo.name = rec_data['Name']\n product_list_bo.img_url = rec_data['Img']\n self.download_pic_url(rec_data['Img'], rec_data['ID'])\n product_list_bo.brand_name = rec_data['BrandName']\n product_list_bo.price = rec_data['Price']\n product_list_bo.market_price = rec_data['MarketPrice']\n product_list_bo.sale_qty = rec_data['SaleQty']\n product_list_bo.total_qty = rec_data['TotalQty']\n product_list_bo.discount = rec_data['Discount']\n product_list_bo.commission = rec_data['Commission']\n product_list_bo.group_type = 'DOWN'\n product_list_bo.sale_status = rec_data.get('SaleStatus', None)\n ProductListDao.insert(product_list_bo)\n self.down_good_detail(rec_data['ID'], city_id)\n\n Logger.log('开始下载爆款推荐', this_dir=self.log_path)\n hot_url = 'https://api.xxkucun.com/v1/product/hot/list?page_index=1&lng=103.56358166666665&lat=33.00125&city_id='+ str(city_id)\n hot_res = self.request_get_proxy(hot_url).json()\n if 0 == hot_res['err_code']:\n for rec_data in hot_res['data']:\n product_list_bo = ProductListBO()\n product_list_bo.city_id = city_id\n product_list_bo.product_id = rec_data['ID']\n product_list_bo.name = rec_data['Name']\n product_list_bo.img_url = rec_data['Img']\n self.download_pic_url(rec_data['Img'], rec_data['ID'])\n product_list_bo.brand_name = rec_data['BrandName']\n product_list_bo.price = rec_data['Price']\n product_list_bo.market_price = rec_data['MarketPrice']\n product_list_bo.sale_qty = rec_data['SaleQty']\n product_list_bo.total_qty = rec_data['TotalQty']\n product_list_bo.discount = rec_data['Discount']\n product_list_bo.commission = rec_data['Commission']\n product_list_bo.group_type = 'HOT'\n product_list_bo.sale_status = rec_data.get('SaleStatus', None)\n ProductListDao.insert(product_list_bo)\n self.down_good_detail(rec_data['ID'], city_id)\n\n Logger.log('开始下载专栏', this_dir=self.log_path)\n category_url = 'https://api.xxkucun.com/v1/product/GetListArrayByCategory?lng=33.00125&lat=103.56358166666665&city_id='+ str(city_id)\n category_res = self.request_get_proxy(category_url).json()\n if 0 == category_res['err_code']:\n for rec_data in category_res['data']:\n type_name = rec_data['Name']\n db_category = CategoryDao.select_by_city_id_categoryid(city_id, rec_data['ID'])\n # category_bo = CategoryBo()\n # category_bo.category_name = type_name\n # category_bo.city_id = city_id\n # category_bo.level = 99\n # db_category = CategoryDao.select_by_city_id_level_name(city_id, 99, type_name)\n # if db_category is None:\n # category_pid = CategoryDao.insert(category_bo)\n # else:\n # category_pid = db_category.pid\n for item in rec_data['List']:\n product_list_bo = ProductListBO()\n product_list_bo.city_id = city_id\n product_list_bo.product_id = item['ID']\n product_list_bo.name = item['Name']\n product_list_bo.img_url = item['Img']\n self.download_pic_url(item['Img'], item['ID'])\n product_list_bo.brand_name = item['BrandName']\n product_list_bo.price = item['Price']\n product_list_bo.market_price = item['MarketPrice']\n product_list_bo.sale_qty = item['SaleQty']\n product_list_bo.total_qty = item['TotalQty']\n product_list_bo.discount = item['Discount']\n product_list_bo.commission = item['Commission']\n product_list_bo.group_type = 'CATEGORY_' + type_name\n product_list_bo.category_pid = db_category.pid\n product_list_bo.sale_status = rec_data.get('SaleStatus', None)\n ProductListDao.insert(product_list_bo)\n self.down_good_detail(item['ID'], city_id)\n\n def down_good_detail(self, product_id, city_id):\n detail_url = 'https://api.xxkucun.com/v1/product/detail/get?prod_id=%d&lng=103.56358166666665&lat=33.00125&city_id=%d' %(product_id, city_id)\n detail_res = self.request_get_proxy(detail_url).json()\n if 0 == detail_res['err_code']:\n rec_data = detail_res['data']\n detail_bo = ProductDetailBO()\n detail_bo.product_id = rec_data['ID']\n detail_bo.name = rec_data['Name']\n detail_bo.imgs = str(rec_data['Imgs'])\n num = 0\n for img in rec_data['Imgs']:\n self.download_pic_url(img, file_name=str(rec_data['ID']) + '_'+ str(num))\n num = num + 1\n detail_bo.price = rec_data['Price']\n detail_bo.market_price = rec_data['MarketPrice']\n detail_bo.brand_id = rec_data['BrandID']\n detail_bo.brand_name = rec_data['BrandName']\n detail_bo.brand_logo = rec_data['BrandLogo']\n detail_bo.fxwa = rec_data['FXWA']\n detail_bo.commission = rec_data['Commission']\n detail_bo.max_commission = rec_data['MaxCommission']\n detail_bo.sale_qty = rec_data['SaleQty']\n detail_bo.total_qty = rec_data['AvaQty']\n detail_bo.sale_time = rec_data['SaleTime']\n detail_bo.offline_time = rec_data['OfflineTime']\n detail_bo.total_seconds = rec_data['TotalSeconds']\n detail_bo.stores = json.dumps(rec_data['Stores'], ensure_ascii=False)\n # detail_bo.condition = rec_data['Condition']\n # detail_bo.remark = rec_data['Remark']\n # detail_bo.setmeal = rec_data['Setmeal']\n detail_bo.condition_url = rec_data['ConditionUrl']\n detail_bo.remark_url = rec_data['RemarkUrl']\n detail_bo.setmeal_url = rec_data['SetmealUrl']\n ProductDetailDao.insert(detail_bo)\n\n\n\n def download_pic_url(self, img_url=None, file_name=None):\n #todo 去除url中的参数串\n try:\n if '?' in img_url:\n img_url = img_url[:img_url.index('?')]\n split_file_name = img_url.split('/')[-1]\n path_arr = img_url.split('/')[3:-1]\n if file_name is None:\n file_name = split_file_name\n else:\n file_name = str(file_name) + os.path.splitext(split_file_name)[-1]\n if img_url.startswith('/'):\n img_url = img_url[1:]\n download_path = os.path.join(XKCCommonParam.script_download_path,*path_arr)\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n r = requests.get(img_url, stream=True, timeout=60*5)\n abs_file_path = os.path.join(download_path, file_name)\n # print('保存图片:' + abs_file_path)\n with open(abs_file_path, 'wb') as f:\n f.write(r.content)\n return True\n except Exception as err:\n traceback.print_exc()\n Logger.log('img %s download err' % img_url, this_dir=self.log_path)\n return False\n\n def get_code_from_href(self, href):\n if href is None:\n return None\n return href[href.index('itemSku=')+len('itemSku='):]\n\nscheduler = BlockingScheduler()\[email protected]_job(\"cron\", day_of_week='*', hour=XKCCommonParam.job_hour, minute=XKCCommonParam.job_min, second='00')\ndef rebate():\n script = XXKuCunScript()\n try:\n Logger.log('spider task start', this_dir=script.log_path)\n script.download_goods()\n Logger.log('Spider task end', this_dir=script.log_path)\n except BaseException as err:\n Logger.log('Spider task failed: ' + time.strftime('%Y-%m-%d', time.localtime()), this_dir=script.log_path)\n traceback.print_exc()\n # Logger.log(\"statistic scheduler execute success\" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\nif __name__ == '__main__':\n try:\n Logger.log(\"statistic scheduler start\", this_dir=XXKuCunScript.log_path)\n DBUtil.param = XKCCommonParam\n # scheduler.start()\n XXKuCunScript().download_goods()\n except (KeyboardInterrupt, SystemExit):\n scheduler.shutdown()\n Logger.log(\"statistic scheduler start-up fail\", this_dir=XXKuCunScript.log_path)\n\n\n\n" }, { "alpha_fraction": 0.5564516186714172, "alphanum_fraction": 0.7177419066429138, "avg_line_length": 15.533333778381348, "blob_id": "9344cfc59d016e2c04d2e694614d0f6afe43c79c", "content_id": "7274abb4f00d016ff7216beb6ec34137ee6965dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 248, "license_type": "no_license", "max_line_length": 71, "num_lines": 15, "path": "/script/houniao/common.ini", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "[download.path]\nwin_path = D:/download/houniao\n\n[database]\nurl = mysql+pymysql://root:Pa9090Db*@192.168.102.143:3306/houniao_goods\npool_size = 50\nmax_overflow = 300\n\n[houniao]\nuser_name = 15618834549\npassword = xzb87654321\n\n[job]\nhour = 19\nmin = 32\n" }, { "alpha_fraction": 0.5566983222961426, "alphanum_fraction": 0.5709388256072998, "avg_line_length": 26.882352828979492, "blob_id": "2f6097e94463b08f5dba9d535fcd6083ca981441", "content_id": "3d938535039719ec9f2ace8e1dd143c9fe785065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3832, "license_type": "no_license", "max_line_length": 83, "num_lines": 136, "path": "/script/houniao/houniao_bo.py", "repo_name": "francis12/spider_base", "src_encoding": "UTF-8", "text": "import datetime\nimport time\nfrom script.houniao.houniao_util import DBUtil, BeanUtil\nfrom sqlalchemy import Column, String\nfrom sqlalchemy.types import INTEGER\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\nclass ActivtiyBO(Base):\n # 表的名字:\n __tablename__ = 't_activity'\n\n # 表的结构:\n pid = Column(INTEGER, primary_key=True, autoincrement=True)\n activity_code = Column(String(255))\n name = Column(String(255))\n type = Column(String(255))\n url = Column(String(255))\n pic_url = Column(String(255))\n desc = Column(String(255))\n day = Column(String(255))\n create_time = Column(String(255))\n\n def __init__(self,\n activity_code=None,\n name=None,\n type=None,\n url=None,\n pic_url=None,\n day=None,\n desc=None):\n self.activity_code = activity_code\n self.name = name\n self.type = type\n self.url = url\n self.pic_url = pic_url\n self.day = time.strftime(\"%Y-%m-%d\", time.localtime())\n self.desc = desc\n self.create_time = datetime.datetime.now()\n\n\nclass ActivtiyDao:\n @staticmethod\n def insert(item: ActivtiyBO):\n session = DBUtil.get_session()\n # 插入\n session.add(item)\n session.commit()\n pid = item.pid\n session.close()\n return pid\n\n @staticmethod\n def select_all():\n session = DBUtil.get_session()\n res = session.query(ActivtiyBO)\n session.close()\n return res\n\n @staticmethod\n def update_room_detail(room: ActivtiyBO):\n session = DBUtil.get_session()\n item = session.query(ActivtiyBO).filter(ActivtiyBO.pid == room.pid).first()\n BeanUtil.copy_obj_properties(room, item)\n session.commit()\n session.close()\n\n\nclass GoodBo(Base):\n # 表的名字:\n __tablename__ = 't_good'\n\n # 表的结构:\n pid = Column(INTEGER, primary_key=True, autoincrement=True)\n name = Column(String(255))\n code = Column(String(255))\n price = Column(String(255))\n url = Column(String(255))\n sub_title = Column(String(255))\n sub_sub_title = Column(String(255))\n activity_code = Column(String(255))\n activity_pid = Column(INTEGER)\n day = Column(String(255))\n pic_url = Column(String(255))\n create_time = Column(String(255))\n\n def __init__(self,\n name=None,\n code=None,\n url=None,\n sub_title=None,\n sub_sub_title=None,\n activity_code=None,\n activity_pid=None,\n price=None,\n day=None,\n pic_url=None):\n self.name = name\n self.code = code\n self.url = url\n self.pic_url = pic_url\n self.sub_title = sub_title\n self.sub_sub_title = sub_sub_title\n self.activity_code = activity_code\n self.activity_pid = activity_pid\n self.day = time.strftime(\"%Y-%m-%d\", time.localtime())\n self.price = price\n self.create_time = datetime.datetime.now()\n\n\nclass GoodDao:\n @staticmethod\n def insert(item: GoodBo):\n session = DBUtil.get_session()\n # 插入\n session.add(item)\n session.commit()\n pid = item.pid\n session.close()\n return pid\n\n @staticmethod\n def select_all():\n session = DBUtil.get_session()\n res = session.query(GoodBo)\n session.close()\n return res\n\n @staticmethod\n def update_room_detail(room: GoodBo):\n session = DBUtil.get_session()\n item = session.query(GoodBo).filter(GoodBo.pid == room.pid).first()\n BeanUtil.copy_obj_properties(room, item)\n session.commit()\n session.close()\n" } ]
9
BrianIshii/foodies
https://github.com/BrianIshii/foodies
9a49eb769a76cb0c93948a6fe21d39745e297cd1
82632f2f78e2f51efab0c22244096b1da964ad47
757f9a5d7d228dd36e7c386735477e3a5d47218a
refs/heads/master
2021-05-08T16:06:55.517846
2018-02-04T19:22:39
2018-02-04T19:22:39
120,141,708
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6183802485466003, "alphanum_fraction": 0.6264417767524719, "avg_line_length": 32.4564323425293, "blob_id": "94ba81374262cbde804fa0f3bb05a1b0f43e664e", "content_id": "e607bc05a0107fce0d87811d74a9eea8d887e38e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8063, "license_type": "no_license", "max_line_length": 144, "num_lines": 241, "path": "/main.py", "repo_name": "BrianIshii/foodies", "src_encoding": "UTF-8", "text": "import base64\nimport os\nimport sys\n\nfrom flask import Flask, redirect, render_template, request\n\nfrom google.cloud import datastore\nfrom google.cloud import storage\nfrom google.cloud import vision\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef homepage():\n # Create a Cloud Datastore client.\n datastore_client = datastore.Client(\"foodies-194120\")\n\n # Use the Cloud Datastore client to fetch information from Datastore about\n # each photo.\n #query = datastore_client.query(kind='Photos')\n #image_entities = list(query.fetch()) \n\n # Return a Jinja2 HTML template.\n return render_template('homepage.html')\n\[email protected]('/upload_photo', methods=['GET', 'POST'])\ndef upload_photo():\n\n # Create a Cloud Storage client.\n storage_client = storage.Client()\n\n # Get the Cloud Storage bucket that the file will be uploaded to.\n bucket = storage_client.get_bucket(os.environ.get('CLOUD_STORAGE_BUCKET'))\n\n\n kind = 'Photo' \n # Create a new blob and upload the file's content to Cloud Storage.\n photo = request.files['file']\n blob = bucket.blob(photo.filename)\n blob.upload_from_string(\n photo.read(), content_type=photo.content_type)\n\n # Make the blob publicly viewable.\n blob.make_public()\n image_public_url = blob.public_url\n \n # Create a Cloud Vision client.\n vision_client = vision.ImageAnnotatorClient()\n\n # Retrieve a Vision API response for the photo stored in Cloud Storage\n source_uri = 'gs://{}/{}'.format(os.environ.get('CLOUD_STORAGE_BUCKET'), blob.name)\n response = vision_client.annotate_image({\n 'image': {'source': {'image_uri': source_uri}},\n })\n labels = response.label_annotations\n web_entities = response.web_detection.web_entities\n\n # Create a Cloud Datastore client\n datastore_client = datastore.Client()\n\n # The name/ID for the new entity\n name = blob.name\n\n # Create the Cloud Datastore key for the new entity\n key = datastore_client.key(kind, name)\n\n # Construct the new entity using the key. Set dictionary values for entity\n # keys image_public_url and label. If we are using python version 2, we need to convert\n # our image URL to unicode to save it to Datastore properly.\n entity = datastore.Entity(key)\n if sys.version_info >= (3, 0):\n entity['image_public_url'] = image_public_url\n else:\n entity['image_public_url'] = unicode(image_public_url, \"utf-8\")\n entity['label'] = labels[0].description\n\n # Save the new entity to Datastore\n datastore_client.put(entity)\n\n username = request.form['inputName']\n # TODO: Get user information through a form \n user_entity = create_user(username)\n\n labels = check_fruit(labels, user_entity)\n # Redirect to the home page.\n ingredients = print_ingredients(user_entity)\n recipes = check_recipes(ingredients)\n return render_template('homepage.html', name=username, labels=labels, public_url=image_public_url, ingredients=ingredients, recipes=recipes)\n\ndef create_user(username):\n datastore_client = datastore.Client(\"foodies-194120\")\n query = datastore_client.query(kind='Person')\n people = list(query.fetch())\n for p in people:\n print(p)\n if (p.key.name == username):\n return p\n datastore_client = datastore.Client()\n entity_kind = 'Person'\n key = datastore_client.key(entity_kind, username)\n entity = datastore.Entity(key)\n entity.update({\n 'name': username,\n 'ingredients': []\n })\n datastore_client.put(entity)\n return entity\n\[email protected](500)\ndef server_error(e):\n return \"\"\"\n An internal error occurred: <pre>{}</pre>\n See logs for full stacktrace.\n \"\"\".format(e), 500\n\[email protected]('/showSignUp')\ndef showSignUp():\n return render_template('signup.html')\n\ndef check_recipes(ingredients):\n recipes = []\n datastore_client = datastore.Client(\"foodies-194120\")\n query = datastore_client.query(kind='Recipe')\n Recipe = list(query.fetch())\n for r in Recipe:\n good = True\n for i in r['ingredients']:\n has_fruit = False\n for ing in ingredients:\n if (ing[0].key.name == i):\n has_fruit = True\n if (has_fruit is False):\n good = False\n if (good is True):\n recipes.append(r.key.name)\n\n return recipes\n\n\ndef create_new_recipe():\n datastore_client = datastore.Client()\n entity_kind = 'Recipe'\n key = datastore_client.key(entity_kind, \"Apple Banana Smoothie\")\n entity = datastore.Entity(key)\n entity.update({\n 'ingredients': [\"apple\", \"banana\"]\n })\n print(entity)\n datastore_client.put(entity)\n\ndef check_fruit(labels, user):\n fruit_labels = []\n fruits = [\"apple\",\"apricot\",\"avocado\",\"banana\",\"bell pepper\",\n \"bilberry\",\"blackberry\",\"blackcurrant\",\"blood orange\",\"blueberry\",\n \"boysenberry\",\"breadfruit\",\"canary melon\",\"cantaloupe\",\"cherimoya\",\n \"cherry\",\"chili pepper\",\"clementine\",\"cloudberry\",\"coconut\",\"cranberry\",\n \"cucumber\",\"currant\",\"damson\",\"date\",\"dragonfruit\",\"durian\",\"eggplant\",\n \"elderberry\",\"feijoa\",\"fig\",\"goji berry\",\"gooseberry\",\"grape\",\n \"grapefruit\",\"guava\",\"honeydew\",\"huckleberry\",\"jackfruit\",\"jambul\",\n \"jujube\",\"kiwi fruit\",\"kumquat\",\"lemon\",\"lime\",\"loquat\",\"lychee\",\n \"mandarine\",\"mango\",\"mulberry\",\"nectarine\",\"nut\",\"olive\",\"orange\",\n \"pamelo\",\"papaya\",\"passionfruit\",\"peach\",\"pear\",\"persimmon\",\"physalis\",\n \"pineapple\",\"plum\",\"pomegranate\",\"pomelo\",\"purple mangosteen\",\"quince\",\"raisin\",\n \"rambutan\",\"raspberry\",\"redcurrant\",\"rock melon\",\"salal berry\",\"satsuma\",\n \"star fruit\",\"strawberry\",\"tamarillo\",\"tangerine\",\"tomato\",\"ugli fruit\",\n \"watermelon\"]\n for label in labels:\n for fruit in fruits:\n if label.description == fruit:\n if(check_ingredients(label, user) is False):\n print(\"new ingredient\")\n print(label.description)\n create_ingredient(label.description, user)\n fruit_labels.append(label)\n return fruit_labels\n\ndef update_ingredients(username):\n datastore_client = datastore.Client(\"foodies-194120\")\n query = datastore_client.query(kind='Person')\n people = list(query.fetch())\n for p in people:\n if (p['name'] == username):\n print(\"update_ingred\")\n datastore_client.put(user)\n\ndef create_ingredient(name, user):\n datastore_client = datastore.Client()\n entity_kind = 'Ingredient'\n key = datastore_client.key(entity_kind, name)\n entity = datastore.Entity(key)\n entity.update({\n user.key.name: 1\n })\n datastore_client.put(entity)\n return entity \n\ndef check_ingredients(label, user):\n datastore_client = datastore.Client(\"foodies-194120\")\n query = datastore_client.query(kind='Ingredient')\n ingredients = list(query.fetch())\n for i in ingredients:\n if (i.key.name == label.description):\n print(\"same\")\n print(i.key.name)\n try:\n print(\"found old count\")\n count = i[user.key.name] + 1\n i.update({\n user.key.name: count\n })\n except:\n i.update({\n user.key.name: 1\n })\n datastore_client.put(i)\n return True\n return False\n\n\ndef print_ingredients(user):\n ingred_list = []\n datastore_client = datastore.Client(\"foodies-194120\")\n query = datastore_client.query(kind='Ingredient')\n ing = list(query.fetch())\n for i in ing:\n print(i)\n try:\n temp = i[user.key.name]\n ingred_list.append((i, temp))\n except:\n temp = 1\n print(ingred_list)\n return ingred_list\n\n\nif __name__ == '__main__':\n # This is used when running locally. Gunicorn is used to run the\n # application on Google App Engine. See entrypoint in app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n" } ]
1
awichmann-mintel/lpipe
https://github.com/awichmann-mintel/lpipe
3cf2cae72cec15e4571df1363455b215ca507c7f
0b0a58a85310c234134dad2395a7f63e7a5e86c2
4bd2bddecbec4c29525c36435f9596125b2061dc
refs/heads/master
2022-11-12T15:27:06.498742
2020-07-10T00:32:34
2020-07-10T00:32:39
265,680,448
0
0
Apache-2.0
2020-05-20T20:35:25
2020-05-20T20:26:23
2020-05-20T20:34:12
null
[ { "alpha_fraction": 0.6048387289047241, "alphanum_fraction": 0.6055532693862915, "avg_line_length": 35.2814826965332, "blob_id": "98784a94bc39eac0a7e06a0c945859085b4a7827", "content_id": "7572065314d8191b42692a392d78b5be063002a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19592, "license_type": "permissive", "max_line_length": 189, "num_lines": 540, "path": "/lpipe/pipeline.py", "repo_name": "awichmann-mintel/lpipe", "src_encoding": "UTF-8", "text": "import base64\nimport json\nimport warnings\nfrom collections import defaultdict, namedtuple\nfrom enum import Enum, EnumMeta\nfrom types import FunctionType\nfrom typing import Any, Union\n\nimport lpipe.exceptions\nimport lpipe.logging\nfrom lpipe import normalize, signature, utils\nfrom lpipe.action import Action\nfrom lpipe.contrib import kinesis, mindictive, sqs\nfrom lpipe.payload import Payload\nfrom lpipe.queue import Queue, QueueType\n\nPayloadEvent = namedtuple(\"Event\", [\"event\", \"context\", \"payload\"])\n\n\ndef build_event_response(n_records, n_ok, logger) -> dict:\n response = {\n \"event\": \"Finished.\",\n \"stats\": {\"received\": n_records, \"successes\": n_ok},\n }\n if hasattr(logger, \"events\") and logger.events:\n response[\"logs\"] = json.dumps(logger.events, cls=utils.AutoEncoder)\n return response\n\n\ndef process_event(\n event,\n context,\n queue_type: QueueType,\n paths: dict = None,\n path_enum: EnumMeta = None,\n default_path: Union[str, Enum] = None,\n call: FunctionType = None,\n logger=None,\n debug: bool = False,\n exception_handler: FunctionType = None,\n) -> dict:\n \"\"\"Process an AWS Lambda event.\n\n Args:\n event: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html\n context: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html\n queue_type (QueueType): The event source type.\n paths (dict): Keys are path names / enums and values are a list of Action objects\n path_enum (EnumMeta): An Enum class which define the possible paths available in this lambda.\n default_path (Union[str, Enum]): The path to be run for every message received.\n call (FunctionType): A callable which, if set and `paths` is not, will disable directed-graph workflow features and default to calling this\n logger:\n debug (bool):\n exception_handler (FunctionType): A function which will be used to capture exceptions (e.g. contrib.sentry.capture)\n \"\"\"\n logger = lpipe.logging.setup(logger=logger, context=context, debug=debug)\n logger.debug(f\"Event received. queue: {queue_type}, event: {event}\")\n\n try:\n assert isinstance(queue_type, QueueType)\n except AssertionError as e:\n raise lpipe.exceptions.InvalidConfigurationError(\n f\"Invalid queue type '{queue_type}'\"\n ) from e\n\n if isinstance(call, FunctionType):\n if not paths:\n default_path = \"AUTO_PATH\"\n paths = {default_path: [call]}\n else:\n raise lpipe.exceptions.InvalidConfigurationError(\n \"If you initialize lpipe with a function/callable, you cannot define paths, as you have disabled the directed-graph interface.\"\n )\n\n paths, path_enum = normalize.normalize_path_enum(path_enum=path_enum, paths=paths)\n\n successful_records = []\n records = get_records_from_event(queue_type, event)\n try:\n assert isinstance(records, list)\n except AssertionError as e:\n logger.error(f\"'records' is not a list {utils.exception_to_str(e)}\")\n return build_event_response(0, 0, logger)\n\n _output = []\n _exceptions = []\n for encoded_record in records:\n ret = None\n try:\n try:\n _payload = get_payload_from_record(\n queue_type=queue_type,\n record=encoded_record,\n validate=False if default_path else True,\n )\n _path = default_path if default_path else _payload[\"path\"]\n _kwargs = _payload if default_path else _payload[\"kwargs\"]\n _event_source = get_event_source(queue_type, encoded_record)\n payload = Payload(\n path=_path, kwargs=_kwargs, event_source=_event_source\n ).validate(path_enum)\n except AssertionError as e:\n raise lpipe.exceptions.InvalidPayloadError(\n \"'path' or 'kwargs' missing from payload.\"\n ) from e\n except TypeError as e:\n raise lpipe.exceptions.InvalidPayloadError(\n f\"Bad record provided for queue type {queue_type}. {encoded_record} {utils.exception_to_str(e)}\"\n ) from e\n\n with logger.context(bind={\"payload\": payload.to_dict()}):\n logger.log(\"Record received.\")\n\n # Run your path/action/functions against the payload found in this record.\n ret = execute_payload(\n payload=payload,\n path_enum=path_enum,\n paths=paths,\n logger=logger,\n event=event,\n context=context,\n debug=debug,\n )\n\n # Will handle cleanup for successful records later, if necessary.\n successful_records.append(encoded_record)\n except lpipe.exceptions.FailButContinue as e:\n # CAPTURES:\n # lpipe.exceptions.InvalidPayloadError\n # lpipe.exceptions.InvalidPathError\n logger.error(str(e))\n if exception_handler:\n exception_handler(e)\n continue # User can say \"bad thing happened but keep going.\" This drops poisoned records on the floor.\n except lpipe.exceptions.FailCatastrophically as e:\n # CAPTURES:\n # lpipe.exceptions.InvalidConfigurationError\n # raise (later)\n if exception_handler:\n exception_handler(e)\n _exceptions.append({\"exception\": e, \"record\": encoded_record})\n _output.append(ret)\n\n response = build_event_response(\n n_records=len(records), n_ok=len(successful_records), logger=logger\n )\n\n if _exceptions:\n # Handle cleanup for successful records, if necessary, before creating an error state.\n advanced_cleanup(queue_type, successful_records, logger)\n\n logger.info(\n f\"Encountered exceptions while handling one or more records. RESPONSE: {response}\"\n )\n raise lpipe.exceptions.FailCatastrophically(_exceptions)\n\n if any(_output):\n response[\"output\"] = _output\n if debug:\n response[\"debug\"] = json.dumps({\"records\": records}, cls=utils.AutoEncoder)\n\n return response\n\n\ndef execute_payload(\n payload: Payload,\n path_enum: EnumMeta,\n paths: dict,\n logger,\n event,\n context,\n debug: bool = False,\n exception_handler: FunctionType = None,\n) -> Any:\n \"\"\"Given a Payload, execute Actions in a Path and fire off messages to the payload's Queues.\n\n Args:\n payload (Payload):\n path_enum (EnumMeta): An Enum class which define the possible paths available in this lambda.\n paths (dict): Keys are path names / enums and values are a list of Action objects\n logger:\n event: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html\n context: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html\n debug (bool):\n exception_handler (FunctionType): A function which will be used to capture exceptions (e.g. contrib.sentry.capture)\n \"\"\"\n if not logger:\n logger = lpipe.logging.LPLogger()\n\n ret = None\n\n if payload.path is not None and not isinstance(payload.path, path_enum):\n payload.path = normalize.normalize_path(path_enum, payload.path)\n\n if isinstance(payload.path, Enum): # PATH\n paths[payload.path] = normalize.normalize_actions(paths[payload.path])\n\n for action in paths[payload.path]:\n ret = execute_action(\n payload=payload,\n path_enum=path_enum,\n paths=paths,\n action=action,\n logger=logger,\n event=event,\n context=context,\n debug=debug,\n exception_handler=exception_handler,\n )\n\n elif isinstance(payload.queue, Queue): # QUEUE (aka SHORTCUT)\n queue = payload.queue\n assert isinstance(queue.type, QueueType)\n if queue.path:\n record = {\"path\": queue.path, \"kwargs\": payload.kwargs}\n else:\n record = payload.kwargs\n with logger.context(\n bind={\n \"path\": queue.path,\n \"queue_type\": queue.type,\n \"queue_name\": queue.name,\n \"record\": record,\n }\n ):\n logger.log(\"Pushing record.\")\n put_record(queue=queue, record=record)\n else:\n logger.info(\n f\"Path should be a string (path name), Path (path Enum), or Queue: {payload.path})\"\n )\n\n return ret\n\n\ndef execute_action(\n payload: Payload,\n path_enum: EnumMeta,\n paths: dict,\n action: Action,\n logger,\n event,\n context,\n debug: bool = False,\n exception_handler: FunctionType = None,\n):\n \"\"\"Execute functions, paths, and queues (shortcuts) in an Action.\n\n Args:\n payload (Payload):\n path_enum (EnumMeta): An Enum class which define the possible paths available in this lambda.\n paths (dict): Keys are path names / enums and values are a list of Action objects\n action: (Action):\n logger:\n event: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html\n context: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html\n debug (bool):\n exception_handler (FunctionType): A function which will be used to capture exceptions (e.g. contrib.sentry.capture)\n \"\"\"\n assert isinstance(action, Action)\n ret = None\n\n # Build action kwargs and validate type hints\n try:\n dummy = [\"logger\", \"event\"]\n action_kwargs = build_action_kwargs(\n action, {**{k: None for k in dummy}, **payload.kwargs}\n )\n for k in dummy:\n action_kwargs.pop(k, None)\n except (TypeError, AssertionError) as e:\n raise lpipe.exceptions.InvalidPayloadError(\n f\"Failed to run {payload.path.name} {action} due to {utils.exception_to_str(e)}\"\n ) from e\n\n default_kwargs = {\n \"logger\": logger,\n \"event\": PayloadEvent(event=event, context=context, payload=payload),\n }\n\n # Run action functions\n for f in action.functions:\n assert isinstance(f, FunctionType)\n try:\n # TODO: if ret, set _last_output\n _log_context = {\"path\": payload.path.name, \"function\": f.__name__}\n with logger.context(bind={**_log_context, \"kwargs\": action_kwargs}):\n logger.log(\"Executing function.\")\n with logger.context(bind=_log_context):\n ret = f(**{**action_kwargs, **default_kwargs})\n ret = return_handler(\n ret=ret,\n path_enum=path_enum,\n paths=paths,\n logger=logger,\n event=event,\n context=context,\n debug=debug,\n )\n except lpipe.exceptions.LPBaseException:\n # CAPTURES:\n # lpipe.exceptions.FailButContinue\n # lpipe.exceptions.FailCatastrophically\n raise\n except Exception as e:\n logger.error(\n f\"Skipped {payload.path.name} {f.__name__} due to unhandled Exception. This is very serious; please update your function to handle this. Reason: {utils.exception_to_str(e)}\"\n )\n if exception_handler:\n exception_handler(e)\n if debug:\n raise lpipe.exceptions.FailCatastrophically(\n utils.exception_to_str(e)\n ) from e\n\n payloads = []\n for _path in action.paths:\n payloads.append(\n Payload(\n path=normalize.normalize_path(path_enum, _path),\n kwargs=action_kwargs,\n event_source=payload.event_source,\n ).validate(path_enum)\n )\n\n for _queue in action.queues:\n payloads.append(\n Payload(\n queue=_queue, kwargs=action_kwargs, event_source=payload.event_source\n ).validate()\n )\n\n for p in payloads:\n ret = execute_payload(p, path_enum, paths, logger, event, context, debug)\n\n return ret\n\n\ndef return_handler(\n ret: Any, path_enum: EnumMeta, paths: dict, logger, event, context, debug: bool\n) -> Any:\n if not ret:\n return ret\n _payloads = []\n try:\n if isinstance(ret, Payload):\n _payloads.append(ret.validate(path_enum))\n elif isinstance(ret, list):\n for r in ret:\n if isinstance(r, Payload):\n _payloads.append(r.validate(path_enum))\n except Exception as e:\n logger.debug(utils.exception_to_str(e))\n raise lpipe.exceptions.FailButContinue(\n f\"Something went wrong while extracting Payloads from a function return value: {ret}\"\n ) from e\n\n if _payloads:\n logger.debug(f\"{len(_payloads)} dynamic payloads received\")\n for p in _payloads:\n logger.debug(f\"Executing dynamic payload: {p}\")\n try:\n ret = execute_payload(p, path_enum, paths, logger, event, context, debug)\n except Exception as e:\n logger.debug(utils.exception_to_str(e))\n raise lpipe.exceptions.FailButContinue(\n f\"Failed to execute returned Payload: {p}\"\n ) from e\n return ret\n\n\ndef advanced_cleanup(queue_type: QueueType, records: list, logger, **kwargs):\n \"\"\"If exceptions were raised, cleanup all successful records before raising.\n\n Args:\n queue_type (QueueType):\n records (list): records which we succesfully executed\n logger:\n \"\"\"\n if queue_type == QueueType.SQS:\n cleanup_sqs_records(records, logger)\n # If the queue type was not handled, no cleanup was necessary by lpipe.\n\n\ndef cleanup_sqs_records(records: list, logger):\n base_err_msg = (\n \"Unable to delete successful records messages from SQS queue. AWS should \"\n \"still handle this automatically when the lambda finishes executing, but \"\n \"this may result in successful messages being sent to the DLQ if any \"\n \"other messages fail.\"\n )\n try:\n Message = namedtuple(\"Message\", [\"message_id\", \"receipt_handle\"])\n messages = defaultdict(list)\n for record in records:\n m = Message(\n message_id=mindictive.get_nested(record, [\"messageId\"]),\n receipt_handle=mindictive.get_nested(record, [\"receiptHandle\"]),\n )\n messages[mindictive.get_nested(record, [\"eventSourceARN\"])].append(m)\n for k in messages.keys():\n queue_url = sqs.get_queue_url(k)\n sqs.batch_delete_messages(\n queue_url,\n [\n {\"Id\": m.message_id, \"ReceiptHandle\": m.receipt_handle}\n for m in messages\n ],\n )\n except KeyError as e:\n logger.warning(\n f\"{base_err_msg} If you're testing, this is not an issue. {utils.exception_to_str(e)}\"\n )\n except Exception as e:\n logger.warning(f\"{base_err_msg} {utils.exception_to_str(e)}\")\n\n\ndef build_action_kwargs(action: Action, kwargs: dict) -> dict:\n \"\"\"Build dictionary of kwargs for a specific action.\n\n Args:\n action (Action)\n kargs (dict): kwargs provided in the event's message\n\n Returns:\n dict: validated kwargs required by action\n \"\"\"\n action_kwargs = build_kwargs(\n functions=action.functions,\n required_params=action.required_params,\n kwargs=kwargs,\n )\n if action.include_all_params:\n action_kwargs.update(kwargs)\n return action_kwargs\n\n\ndef build_kwargs(kwargs: dict, functions: list, required_params: list = None) -> dict:\n \"\"\"Build dictionary of kwargs for the union of function signatures.\n\n Args:\n functions (list): functions which a particular action should call\n required_params (list): manually defined parameters\n kargs (dict): kwargs provided in the event's message\n\n Returns:\n dict: validated kwargs required by action\n \"\"\"\n kwargs_union = {}\n if not required_params and functions:\n kwargs_union = signature.validate(functions, kwargs)\n elif required_params and isinstance(required_params, list):\n for param in required_params:\n param_name = param[0] if isinstance(param, tuple) else param\n try:\n # Assert required field was provided.\n assert param_name in kwargs\n except AssertionError as e:\n raise lpipe.exceptions.InvalidPayloadError(\n f\"Missing param '{param_name}'\"\n ) from e\n\n # Set param in kwargs. If the param is a tuple, use the [1] as the new key.\n if isinstance(param, tuple) and len(param) == 2:\n kwargs_union[param[1]] = kwargs[param[0]]\n else:\n kwargs_union[param] = kwargs[param]\n elif not required_params:\n return {}\n else:\n raise lpipe.exceptions.InvalidPayloadError(\n \"You either didn't provide functions or required_params was not an instance of list or NoneType.\"\n )\n return kwargs_union\n\n\ndef get_raw_payload(record) -> dict:\n \"\"\"Decode and validate a json record.\"\"\"\n assert record is not None\n return record if isinstance(record, dict) else json.loads(record)\n\n\ndef get_kinesis_payload(record) -> dict:\n \"\"\"Decode and validate a kinesis record.\"\"\"\n assert record[\"kinesis\"][\"data\"] is not None\n return json.loads(base64.b64decode(bytearray(record[\"kinesis\"][\"data\"], \"utf-8\")))\n\n\ndef get_sqs_payload(record) -> dict:\n \"\"\"Decode and validate an sqs record.\"\"\"\n assert record[\"body\"] is not None\n return json.loads(record[\"body\"])\n\n\ndef get_records_from_event(queue_type: QueueType, event):\n if queue_type == QueueType.RAW:\n return event\n if queue_type == QueueType.KINESIS:\n return event[\"Records\"]\n if queue_type == QueueType.SQS:\n return event[\"Records\"]\n\n\ndef get_event_source(queue_type: QueueType, record):\n if queue_type in (QueueType.RAW, QueueType.KINESIS, QueueType.SQS):\n return mindictive.get_nested(record, [\"event_source_arn\"], None)\n warnings.warn(f\"Unable to fetch event_source for {queue_type} record.\")\n return None\n\n\ndef get_payload_from_record(queue_type: QueueType, record, validate=True) -> dict:\n try:\n if queue_type == QueueType.RAW:\n payload = get_raw_payload(record)\n if queue_type == QueueType.KINESIS:\n payload = get_kinesis_payload(record)\n if queue_type == QueueType.SQS:\n payload = get_sqs_payload(record)\n except json.JSONDecodeError as e:\n raise lpipe.exceptions.InvalidPayloadError(\n f\"Payload contained invalid json. {utils.exception_to_str(e)}\"\n ) from e\n if validate:\n for field in [\"path\", \"kwargs\"]:\n assert field in payload\n return payload\n\n\ndef put_record(queue: Queue, record: dict):\n if queue.type == QueueType.KINESIS:\n return kinesis.put_record(stream_name=queue.name, data=record)\n if queue.type == QueueType.SQS:\n if not queue.url:\n queue.url = sqs.get_queue_url(queue.name)\n try:\n return sqs.put_message(queue_url=queue.url, data=record)\n except Exception as e:\n raise lpipe.exceptions.FailCatastrophically(\n f\"Failed to send message to {queue}\"\n ) from e\n" } ]
1
emenutask/emenu
https://github.com/emenutask/emenu
13af910b90cea9caa15a34402a927d8935b4621c
ea345ff7e687bc220a32cc9ee4864c64d5b54568
8e48ad80623328d1c63c1892dc683b5aa8a69772
refs/heads/master
2020-03-27T02:57:14.281708
2018-08-25T13:04:17
2018-08-25T13:04:17
145,828,517
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5109826326370239, "alphanum_fraction": 0.5271676182746887, "avg_line_length": 20.625, "blob_id": "74a5a1ef4aa4e51f50fb9e999fd7b96c39c49d3f", "content_id": "f5c5cf4859e9c3d60a605f6ac14233abd378f488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 865, "license_type": "no_license", "max_line_length": 55, "num_lines": 40, "path": "/frontend/src/components/menu/Details.js", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport { Link } from 'react-router-dom';\n\nimport './Details.css';\n\n\nclass Details extends Component {\n state = {\n name: null,\n dishes: [],\n };\n\n async componentDidMount() {\n const id = this.props.match.params.id;\n fetch(`http://127.0.0.1:8000/menu/${id}/`)\n .then(results => { return results.json();})\n .then(data => this.setState({\n name: data.name,\n dishes: data.dish_set\n }))\n }\n\n render() {\n return (\n <div>\n <h1>{this.state.name}</h1>\n {this.state.dishes.map(item => (\n <div key={item.id} className='dish'>\n <h3>{item.name}</h3>\n <span className='price'>{item.price}</span>\n <div>{item.description}</div>\n </div>\n ))}\n <Link to=''>Back</Link>\n </div>\n );\n }\n}\n\nexport default Details;\n" }, { "alpha_fraction": 0.7744916677474976, "alphanum_fraction": 0.7781885266304016, "avg_line_length": 30.823530197143555, "blob_id": "81e4dcb0e6a04c6585c2cd0d9dca28a1d08c116f", "content_id": "61a8bb16ade2371f7f1b337ff6b57af350798654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 61, "num_lines": 17, "path": "/menu/views.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "from rest_framework import generics\nfrom rest_framework.filters import OrderingFilter\n\nfrom .models import Menu\nfrom .serializers import MenuDetailSerializer, MenuSerializer\n\n\nclass MenuList(generics.ListAPIView):\n queryset = Menu.objects.filter(dishes_count__gte=1)\n serializer_class = MenuSerializer\n filter_backends = (OrderingFilter,)\n ordering_fields = ('name', 'dishes_count')\n\n\nclass MenuDetail(generics.RetrieveAPIView):\n serializer_class = MenuDetailSerializer\n queryset = Menu.objects.filter(dishes_count__gte=1)\n" }, { "alpha_fraction": 0.7798742055892944, "alphanum_fraction": 0.7987421154975891, "avg_line_length": 25.5, "blob_id": "f3420e9249e8ff9606d6a08f82ff4664259b43b5", "content_id": "ce08a693a0a11539297fb5c953b6d66f512c7024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 159, "license_type": "no_license", "max_line_length": 41, "num_lines": 6, "path": "/frontend/Dockerfile", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "FROM node:8.2.1-alpine\nRUN mkdir /frontend-code\nWORKDIR /frontend-code\nCOPY frontend/package.json /frontend-code\nRUN npm install\nADD frontend/ /frontend-code/\n" }, { "alpha_fraction": 0.4569651782512665, "alphanum_fraction": 0.4686567187309265, "avg_line_length": 29.687023162841797, "blob_id": "8892e023bc8663b990b8c6e9dd48920a2bf2b760", "content_id": "d213988a26f526275300886a046469e9aa05d4f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4020, "license_type": "no_license", "max_line_length": 69, "num_lines": 131, "path": "/menu/tests/test_views.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "import datetime\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom ..models import Dish, Menu\n\n\nclass MenuListTests(APITestCase):\n def test_list_menus(self):\n \"\"\"\n Ensure we list only non-empty menus\n \"\"\"\n menu_with_two_dishes = Menu.objects.create(\n name='Small menu',\n description=''\n )\n Dish.objects.create(\n menu=menu_with_two_dishes,\n name='First dish',\n description='nothing special',\n is_vegetarian=True,\n price=12,\n preparation_time='2:00'\n )\n Dish.objects.create(\n menu=menu_with_two_dishes,\n name='Second dish',\n description='nothing special',\n is_vegetarian=True,\n price=12,\n preparation_time='2:00'\n )\n menu_with_one_dish = Menu.objects.create(\n name='Even smaller menu',\n description=''\n )\n Dish.objects.create(\n menu=menu_with_one_dish,\n name='The only dish in this menu',\n description='nothing special',\n is_vegetarian=True,\n price=12,\n preparation_time='2:00'\n )\n menu_without_dishes = Menu.objects.create(\n name='Nonsense menu',\n description=''\n )\n\n url = reverse('menu_api')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(\n {\n 'id': menu_with_one_dish.id,\n 'name': menu_with_one_dish.name,\n 'description': menu_with_one_dish.description\n },\n response.data['results']\n )\n self.assertIn(\n {\n 'id': menu_with_two_dishes.id,\n 'name': menu_with_two_dishes.name,\n 'description': menu_with_two_dishes.description\n },\n response.data['results']\n )\n self.assertNotIn(\n {\n 'id': menu_without_dishes.id,\n 'name': menu_without_dishes.name,\n 'description': menu_without_dishes.description\n },\n response.data['results']\n )\n\n\nclass MenuDetailTests(APITestCase):\n def test_retrieve_menu(self):\n \"\"\"\n Ensure we retrieve menu\n \"\"\"\n menu = Menu.objects.create(\n name='Small menu',\n description=''\n )\n dish1 = Dish.objects.create(\n menu=menu,\n name='Dish with meat',\n description='nothing special',\n is_vegetarian=False,\n price=28,\n preparation_time='28:00'\n )\n dish2 = Dish.objects.create(\n menu=menu,\n name='Vegetarian dish',\n description='nothing special',\n is_vegetarian=True,\n price=24,\n preparation_time='24:00'\n )\n\n url = reverse('menu_api_detail', kwargs={'pk': menu.id})\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n {\n 'id': menu.id,\n 'name': 'Small menu',\n 'description': '',\n 'created_at': menu.created_at.isoformat()[:26] + 'Z',\n 'dish_set': [\n {\n 'id': dish2.id,\n 'name': 'Vegetarian dish',\n 'description': 'nothing special',\n 'price': '24.00'\n },\n {\n 'id': dish1.id,\n 'name': 'Dish with meat',\n 'description': 'nothing special',\n 'price': '28.00'\n },\n ]\n },\n response.json()\n )\n" }, { "alpha_fraction": 0.5791062712669373, "alphanum_fraction": 0.5929951667785645, "avg_line_length": 32.79591751098633, "blob_id": "eae3dc4e63ae5d7160185210bacfef4cda16673d", "content_id": "474c111fe6b949fe1d1de7848189cd7ef461dbdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1656, "license_type": "no_license", "max_line_length": 77, "num_lines": 49, "path": "/menu/tests/test_signals.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom django.test import TestCase\n\nfrom ..models import Dish, Menu\n\n\nclass IncrementDishesCountTest(TestCase):\n def test_if_incremented_on_create(self):\n menu = Menu.objects.create(name='Summer specials', description='')\n self.assertEqual(menu.dishes_count, 0)\n Dish.objects.create(\n menu=menu,\n name='Lemonade',\n description='pieces of lemon, cucumber and mint served with ice',\n is_vegetarian=True,\n price=12,\n preparation_time='2:00'\n )\n self.assertEqual(menu.dishes_count, 1)\n\n def test_if_not_incremented_on_update(self):\n menu = Menu.objects.create(name='Summer specials', description='')\n dish = Dish.objects.create(\n menu=menu,\n name='Lemonade',\n description='pieces of lemon, cucumber and mint served with ice',\n is_vegetarian=True,\n price=12,\n preparation_time='2:00'\n )\n self.assertEqual(menu.dishes_count, 1)\n dish.price = 14\n dish.save()\n self.assertEqual(menu.dishes_count, 1)\n\n def test_if_decremented_on_delete(self):\n menu = Menu.objects.create(name='Summer specials', description='')\n dish = Dish.objects.create(\n menu=menu,\n name='Lemonade',\n description='pieces of lemon, cucumber and mint served with ice',\n is_vegetarian=True,\n price=12,\n preparation_time='2:00'\n )\n self.assertEqual(menu.dishes_count, 1)\n dish.delete()\n self.assertEqual(menu.dishes_count, 0)\n" }, { "alpha_fraction": 0.6584070920944214, "alphanum_fraction": 0.6584070920944214, "avg_line_length": 23.565217971801758, "blob_id": "218f0e17f5b511a8167d12c0999006335d13596c", "content_id": "8cbeec32956ce110de725841cebb51656db9821e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/menu/serializers.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import Dish, Menu\n\n\nclass DishSerializer(serializers.ModelSerializer):\n class Meta:\n model = Dish\n fields = ('id', 'name', 'description', 'price')\n\n\nclass MenuDetailSerializer(serializers.ModelSerializer):\n dish_set = DishSerializer(many=True)\n\n class Meta:\n model = Menu\n fields = ('id', 'name', 'description', 'created_at', 'dish_set')\n\n\nclass MenuSerializer(serializers.ModelSerializer):\n class Meta:\n model = Menu\n fields = ('id', 'name', 'description')\n" }, { "alpha_fraction": 0.7542600631713867, "alphanum_fraction": 0.7721973061561584, "avg_line_length": 32.787879943847656, "blob_id": "7562481ff5f73ce0cc9739fbac205ebcdd8e4188", "content_id": "e7ecf31772d56749b98cc5efbcbf507cc38eb9e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 190, "num_lines": 33, "path": "/README.md", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "# E-menu\n\nThis is a simple app for managing menus in admin panel and displaying them to users\n\n### Setup instructions\n\nTo set up the application run the following commands from the root directory\n1. `docker-compose build`\n2. `docker-compose up db`\n3. `docker-compose up`\n4. `docker-compose exec web ./manage.py migrate`\n\nTo manage menus via admin panel create admin account - it can be done by running\n\n`docker-compose exec web ./manage.py createsuperuser`\n\nthen visit [localhost:8000/admin/](http://localhost:8000/admin/)\n\nTo load fixture with default menus run\n\n`docker-compose exec web ./manage.py loaddata menu`\n\nThe application will be available at [localhost:3000/](http://localhost:3000)\n\nTo run the tests use the following command\n\n`docker-compose exec web ./manage.py test`\n\n### To consider later:\n\n* change docker settings to skip starting database separately from other containers, see e.g. [https://docs.docker.com/compose/startup-order/](https://docs.docker.com/compose/startup-order/)\n* create separate settings for different environments\n* add more features like uploading and displaying dish images\n" }, { "alpha_fraction": 0.5956873297691345, "alphanum_fraction": 0.5956873297691345, "avg_line_length": 19.61111068725586, "blob_id": "f1b087b7e441c9558a4872bbbf74f75259a8ca08", "content_id": "abfaca320f20380ede84691511be1b0617f74902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 371, "license_type": "no_license", "max_line_length": 57, "num_lines": 18, "path": "/frontend/src/components/App.js", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport { Route } from 'react-router-dom';\n\nimport Details from './menu/Details';\nimport List from './menu/List';\n\nclass App extends React.Component {\n render() {\n return (\n <div>\n <Route exact path=\"/\" component={List}/>\n <Route exact path=\"/:id\" component={Details}/>\n </div>\n )\n }\n}\n\nexport default App;\n" }, { "alpha_fraction": 0.7172523736953735, "alphanum_fraction": 0.720447301864624, "avg_line_length": 31.947368621826172, "blob_id": "b4bbedf4fdd62ce01957606bd87916798656552e", "content_id": "50f01e90470a355e01b1cad2759875f32d2c9033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/menu/signals.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "from django.db.models.signals import post_delete, post_save\nfrom django.dispatch import receiver\n\nfrom .models import Dish\n\n\n@receiver(post_save, sender=Dish, dispatch_uid='incrementing_dishes_count')\ndef increment_dishes_count(sender, instance, created, **kwargs):\n if created:\n menu = instance.menu\n menu.dishes_count += 1\n menu.save(update_fields=['dishes_count'])\n\n\n@receiver(post_delete, sender=Dish, dispatch_uid='decrementing_dishes_count')\ndef decrement_dishes_count(sender, instance, **kwargs):\n menu = instance.menu\n menu.dishes_count -= 1\n menu.save(update_fields=['dishes_count'])\n" }, { "alpha_fraction": 0.6738754510879517, "alphanum_fraction": 0.6825259327888489, "avg_line_length": 30.243244171142578, "blob_id": "91b934def92fae64c913e80a59e18e316b72a421", "content_id": "f23a7b141ceca0a0a4c07795b9f5097f2f18c0f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1156, "license_type": "no_license", "max_line_length": 79, "num_lines": 37, "path": "/menu/models.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.core import validators\n\n\nclass Menu(models.Model):\n name = models.CharField(max_length=200, unique=True)\n description = models.TextField()\n created_at = models.DateTimeField('creation date', auto_now_add=True)\n modified_at = models.DateTimeField('last modification date', auto_now=True)\n dishes_count = models.IntegerField(default=0, editable=False)\n\n class Meta:\n ordering = ('id',)\n\n def __str__(self):\n return self.name\n\n\nclass Dish(models.Model):\n name = models.CharField(max_length=200)\n description = models.TextField()\n price = models.DecimalField(\n decimal_places=2,\n max_digits=9,\n validators=[validators.MinValueValidator(0)]\n )\n preparation_time = models.DurationField()\n is_vegetarian = models.BooleanField()\n created_at = models.DateTimeField('creation date', auto_now_add=True)\n modified_at = models.DateTimeField('last modification date', auto_now=True)\n menu = models.ForeignKey(Menu, on_delete=models.CASCADE)\n\n class Meta:\n verbose_name_plural = 'dishes'\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.5374677181243896, "alphanum_fraction": 0.5865632891654968, "avg_line_length": 20.5, "blob_id": "764bc0b5e514ba248408451d6723245f1b1069c1", "content_id": "d85932f6fadd39c325afa6bf5a2bbb74694dd712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 65, "num_lines": 18, "path": "/menu/migrations/0002_menu_dishes_count.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2018-08-21 20:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('menu', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='menu',\n name='dishes_count',\n field=models.IntegerField(default=0, editable=False),\n ),\n ]\n" }, { "alpha_fraction": 0.8148148059844971, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 12.5, "blob_id": "aabbb87de9cf72a3eca4be8bb2f71fa0bb0d5371", "content_id": "8700b929875836bab09dd01daf332604bb708bc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 81, "license_type": "no_license", "max_line_length": 19, "num_lines": 6, "path": "/requirements.txt", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "Django==2.1\npsycopg2\npyaml\ndjangorestframework\ndjango-filter\ndjango-cors-headers\n" }, { "alpha_fraction": 0.5502778887748718, "alphanum_fraction": 0.5618999600410461, "avg_line_length": 38.58000183105469, "blob_id": "25feba2ba2ffaf296da35632bbbce5fb27cd6e76", "content_id": "7fff89d3080ba09419259df69c56fa1593de6e2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1979, "license_type": "no_license", "max_line_length": 137, "num_lines": 50, "path": "/menu/migrations/0001_initial.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2018-08-21 17:24\n\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Dish',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('description', models.TextField()),\n ('price', models.DecimalField(decimal_places=2, max_digits=9, validators=[django.core.validators.MinValueValidator(0)])),\n ('preparation_time', models.DurationField()),\n ('is_vegetarian', models.BooleanField()),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='creation date')),\n ('modified_at', models.DateTimeField(auto_now=True, verbose_name='last modification date')),\n ],\n options={\n 'verbose_name_plural': 'dishes',\n },\n ),\n migrations.CreateModel(\n name='Menu',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200, unique=True)),\n ('description', models.TextField()),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='creation date')),\n ('modified_at', models.DateTimeField(auto_now=True, verbose_name='last modification date')),\n ],\n options={\n 'ordering': ('id',),\n },\n ),\n migrations.AddField(\n model_name='dish',\n name='menu',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.Menu'),\n ),\n ]\n" }, { "alpha_fraction": 0.47999998927116394, "alphanum_fraction": 0.4872727394104004, "avg_line_length": 16.1875, "blob_id": "ce2dcd3aaee90bf419204a19329fad111cd8a726", "content_id": "52ab06ee1df8980048f5e4f4536590992ae35c23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 35, "num_lines": 16, "path": "/menu/urls.py", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^$',\n views.MenuList.as_view(),\n name='menu_api'\n ),\n url(\n r'^(?P<pk>[0-9]+)/$',\n views.MenuDetail.as_view(),\n name='menu_api_detail'\n ),\n]\n" }, { "alpha_fraction": 0.5691763162612915, "alphanum_fraction": 0.5733590722084045, "avg_line_length": 26.75, "blob_id": "164a3d76a4420409ade895992158bf5c46cacc6b", "content_id": "14afb946b67d5e56827705298a9c462b687257b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3108, "license_type": "no_license", "max_line_length": 136, "num_lines": 112, "path": "/frontend/src/components/menu/List.js", "repo_name": "emenutask/emenu", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport { Link } from 'react-router-dom';\n\nimport './List.css'\n\n\nclass List extends Component {\n state = {\n menus: [],\n sortingHumanName: 'id',\n next: null,\n previous: null\n };\n\n constructor(props) {\n super(props);\n this.handleClickOutside = this.handleClickOutside.bind(this);\n };\n\n async componentDidMount() {\n this.getMenus();\n document.addEventListener('click', this.handleClickOutside);\n };\n\n componentWillUnmount() {\n document.removeEventListener('click', this.handleClickOutside);\n };\n\n getMenusUrl(sorting) {\n let url = 'http://127.0.0.1:8000/menu/'\n if (sorting){\n url = url.concat(`?ordering=${sorting}`);\n }\n return url;\n };\n\n getMenus(url) {\n if (!url) {\n url = this.getMenusUrl();\n }\n fetch(url)\n .then(results => { return results.json();})\n .then(data => this.setState({\n menus: data.results,\n next: data.next,\n previous: data.previous\n }))\n };\n\n openDropdown() {\n document.getElementById('myDropdown').classList.toggle('show');\n };\n\n handleClickOutside(event) {\n if (!event.target.matches('.dropbtn')) {\n var dropdowns = document.getElementsByClassName('dropdown-content');\n var i;\n for (i = 0; i < dropdowns.length; i++) {\n var openDropdown = dropdowns[i];\n if (openDropdown.classList.contains('show')) {\n openDropdown.classList.remove('show');\n }\n }\n }\n };\n\n updateSorting(name, humanName) {\n this.setState({sortingHumanName: humanName});\n const url = this.getMenusUrl(name);\n this.getMenus(url);\n };\n\n updatePage(url) {\n this.getMenus(url);\n };\n\n render() {\n return (\n <div>\n <div className='sorting'>\n sort by\n <div className='dropdown'>\n <button onClick={this.openDropdown} className='dropbtn btn'>{this.state.sortingHumanName}</button>\n <div id='myDropdown' className='dropdown-content'>\n <a onClick={() => this.updateSorting('name', 'names asc')}>name asc</a>\n <a onClick={() => this.updateSorting('-name', 'name desc')}>name desc</a>\n <a onClick={() => this.updateSorting('dishes_count', 'number of dishes asc')}>number of dishes asc</a>\n <a onClick={() => this.updateSorting('-dishes_count', 'number of dishes desc')}>number of dishes desc</a>\n </div>\n </div>\n </div>\n <div className='clearfix'/>\n\n {this.state.menus.map(item => (\n <div key={item.id} className='menu'>\n <Link to={`/${item.id}`}>\n <h1>{item.name}</h1>\n <span>{item.description}</span>\n </Link>\n </div>\n ))}\n\n <div className='pagination'>\n {this.state.previous && <button className='btn' onClick={() => this.updatePage(this.state.previous)}>Previous page</button>}\n {this.state.next && <button className='btn' onClick={() => this.updatePage(this.state.next)}>Next page</button>}\n </div>\n </div>\n );\n }\n}\n\nexport default List;\n" } ]
15
xeenypl/ebookCat
https://github.com/xeenypl/ebookCat
55887472cbf0d5070d4a9fb64385c875e2033f06
df3d50f0f9be131fe38008c38726b7a8e2fe7b51
05e1538e8eb2f3b9cef235d32e297262b55b1a68
refs/heads/master
2020-06-02T15:27:32.191652
2019-06-10T17:20:54
2019-06-10T17:20:54
191,208,953
1
0
null
2019-06-10T16:53:29
2019-06-10T16:56:17
2019-06-10T17:01:12
Python
[ { "alpha_fraction": 0.5286549925804138, "alphanum_fraction": 0.5426900386810303, "avg_line_length": 22.108108520507812, "blob_id": "8df72d0b9f7c09c09d2bd20a4ecc295750a7dd74", "content_id": "90ad9973408ed0da174bbcc0c023fe99deb07766", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "permissive", "max_line_length": 57, "num_lines": 37, "path": "/epub.py", "repo_name": "xeenypl/ebookCat", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys\nimport os\nimport html2text\nimport pdftotext\n\nfrom ebooklib import epub\n\ndef epubRead(fname):\n book = epub.read_epub(sys.argv[1])\n h = html2text.HTML2Text()\n for item in book.items:\n if isinstance(item, epub.EpubHtml):\n print(\"=\" * 80)\n print(h.handle(item.content.decode(\"utf-8\")))\n\ndef pdfRead(fname):\n with open(fname, \"rb\") as f:\n pdf = pdftotext.PDF(f)\n for page in pdf:\n print(page)\n\ndef helpText():\n print(\"using:\")\n print(\" \", sys.argv[0], \"fileName\")\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n f, ext = os.path.splitext(sys.argv[1])\n if ext == \".pdf\":\n pdfRead(sys.argv[1])\n elif ext == \".epub\":\n epubRead(sys.argv[1])\n else:\n helpText()\n else:\n helpText()\n" }, { "alpha_fraction": 0.7336065769195557, "alphanum_fraction": 0.7336065769195557, "avg_line_length": 19.25, "blob_id": "a3822f1ed8c63038af5be19a420c8b90a6f1baee", "content_id": "cdeefe3fd5a5ba179fb502f637ba6995edd7260e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 244, "license_type": "permissive", "max_line_length": 89, "num_lines": 12, "path": "/README.md", "repo_name": "xeenypl/ebookCat", "src_encoding": "UTF-8", "text": "ebookCat\n========\nebookCat is simplest posible program to read ebooks\n\n# How to use:\nebookCat works like cli program cat takes filename and prints text on stdout for example:\n```bash\nebookCat book.epub\n```\n# suported file format:\n* epub\n* pdf \n" } ]
2
sergant97/SpamSms
https://github.com/sergant97/SpamSms
09b42ad99d658797ec9b53c0bfa2cbaabcad40c5
c8fa06ebed8f6733080ef61e46a8e60a9b99880c
d0f20c395f41ebf41763ecc42b45586a9cf0e653
refs/heads/master
2020-06-16T10:15:49.776770
2019-06-25T09:46:12
2019-06-25T09:46:12
195,536,343
0
0
null
2019-07-06T12:20:22
2019-06-25T09:51:20
2019-06-25T09:46:13
null
[ { "alpha_fraction": 0.5457198619842529, "alphanum_fraction": 0.6274319291114807, "avg_line_length": 23.4761905670166, "blob_id": "4dced2bfc76718c0e542b0a566d089b42fd4e6cc", "content_id": "f9891698a9c458d4f1e3e7b12921e9353b073f30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1082, "license_type": "permissive", "max_line_length": 83, "num_lines": 42, "path": "/SpamSms/src/sms.py", "repo_name": "sergant97/SpamSms", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Coded by mrb12in\n\"\"\"Khromav lox\n\"\"\"\nfrom multiprocessing.pool import ThreadPool\ntry:\n\timport os, time, requests, sys\nexcept ModuleNotFoundError:\n\tprint(\"\\nНадо доустановить кое-что\")\n\tprint(\"$ pip install requests\\n\")\n\texit()\n\nbanner=(\"\"\"\\033[1;36m\n _ _\n _| || |_ \\033[1;32mSMS SPAMER (UPDATE)\\033[1;36m\n |_ .. _|\n |_ _| \\033[1;31mContact=>https://vk.com/mrb12in\\033[1;36m\n |_||_| \\033[1;31mGithub=>https://github.com/mrb12in\n\"\"\")\n\nos.system('clear')\nprint(banner)\nno = input(\"\\033[1;37m Target Number =>\\033[1;32m\")\ntot = int(input(\"\\033[1;37mMessage Spam =>\\033[1;32m\"))\nspam = {'msisdn':no}\nidk = '200'\ndef main(arg):\n\ttry:\n\t\tr = requests.post('https://registrasi.tri.co.id/daftar/generateOTP?',data = spam)\n#\t\tprint(r.text)\n\t\tif str(idk) in str(r.text):\n\t\t\tprint(\"\\033[1;32m[+] SUCCESS(РАботает ебать)\")\n\t\telse:\n\t\t\tprint(\"\\033[1;31m[-] У сука сервис пизды дал\")\n\texcept: pass\n\njobs = []\nfor x in range(tot):\n jobs.append(x)\np=ThreadPool(10)\np.map(main,jobs)\n" }, { "alpha_fraction": 0.7228464484214783, "alphanum_fraction": 0.7303370833396912, "avg_line_length": 13.052631378173828, "blob_id": "487ac2f4a9462756905e3032fe33ed648afd1849", "content_id": "978b4bb8baf5b50bda1601bfa975fa125f4bc645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 296, "license_type": "no_license", "max_line_length": 45, "num_lines": 19, "path": "/README.md", "repo_name": "sergant97/SpamSms", "src_encoding": "UTF-8", "text": "# SpamSms\nSpecial for Khromov\n# How to Use\n$ pkg update && pkg upgrade\n\n$ pkg install python\n\n$ pkg install git\n\n$ git clone https://github.com/mrb-in/SpamSms\n\n$ pip install requests mechanize bs4\n\n$ cd SpamSms\n\n$ python main.py\n\n# Примечание\n6ой сервис в разработке\n" } ]
2
Sabrinalulu/eggs-and-floor-problem-with-cost-factor
https://github.com/Sabrinalulu/eggs-and-floor-problem-with-cost-factor
3c4d8e2250bb6bf71b83a5bb77ea358af089bb9a
17a85f6ed4d0e9e0bee0dbae934c3d2023d7b7bc
e4b805796143b07184ca60baabf5116907f0d18a
refs/heads/master
2020-12-05T14:55:56.338312
2020-01-08T21:09:15
2020-01-08T21:09:15
232,146,042
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.39319249987602234, "alphanum_fraction": 0.43192487955093384, "avg_line_length": 27.399999618530273, "blob_id": "2ff88ec8ffd800a07ed265c9d924163f51be2b50", "content_id": "904996129d778104d6061f9d3af8f150a3538495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "no_license", "max_line_length": 87, "num_lines": 30, "path": "/throwing.py", "repo_name": "Sabrinalulu/eggs-and-floor-problem-with-cost-factor", "src_encoding": "UTF-8", "text": "import numpy as np\n# m: number of floors, n: number of eggs \ndef MaxCost(m,n):\n# generating a two-dimensional array\n C = np.zeros((m+1, n+1), dtype=np.int) \n \n for i in range(1,n+1):\n C[1][i] = 15\n \n for j in range(0, m+1 ):\n if (j==0):\n C[j][1]=0\n else:\n C[j][1] = j * 10 + 5\n \n for x in range(2, n +1):\n for y in range(2, m +1):\n for z in range(1, y +1):\n if (z == 1):\n # initial value\n C[y][x] = 10 + max(C[z - 1][x - 1] + 5, C[y - z][x]) \n else:\n C[y][x] = min(C[y][x], (10 + max(C[z - 1][x - 1] + 5, C[y - z][x])))\n print(C)\n print(\"The mininum cost is :\",C[y][x])\n\nif __name__=='__main__':\n\tM=int(input(\"please enter eggNum\\n\"))\n\tN=int(input(\"please enter floorNum\\n\"))\n\tMaxCost(M,N)\n" }, { "alpha_fraction": 0.7326419949531555, "alphanum_fraction": 0.7434625625610352, "avg_line_length": 99.7727279663086, "blob_id": "928606a09ede8983fc08cdc459c5e9453a413a1c", "content_id": "17ab6a7745f05c6121df9f0ec90630ce4e68f49f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2218, "license_type": "no_license", "max_line_length": 605, "num_lines": 22, "path": "/README.md", "repo_name": "Sabrinalulu/eggs-and-floor-problem-with-cost-factor", "src_encoding": "UTF-8", "text": "# eggs-and-floor-problem-with-cost-factor\nThe solution of eggs and floors problem may be a way to test the quality of a mobile phone (e.g., bump-resistance). After adding the cost factor, we can also compute the minimum cost of the testing. If the final cost is affordable, this throwing method is feasible. If not, we had better change another way to test. \n\n## Problem Outline\nYou are given n eggs and a m-floor building. You need to figure out the highest floor an egg can be dropped without breaking, assuming that (i) all eggs are identical, (ii) if an egg breaks after being dropped from one floor, then the egg will also break if dropped from all higher floors, (iii) if an egg does not break after being thrown from a certain floor, it retains all of its strength and you can continue to use that egg, and (iiii) if an egg break, it costs 5 extra dollars, otherwise it cost 10 dollars. Your goal is to minimize the cost of throws. From which floor do you drop the first egg? \n\n## Solution\nUse dynamic programming method to solve this problem. With the base case (minimum cost of given one egg or one floor) and recurrence relation, we derive the final outcome to avoid the overlapping.\n\n### Notation\nC[y][x] means the minimum cost when y floors and x eggs are given. If y=1, C[1][x]=15 which means only one test is needed,and the worst situation: the egg is broken). If x=1, C[y][1]=10y+5 which means when we throw the egg from the bottom floor, the egg will break on the top floor (the wrost situation).\n\n### Optimality\nThe operation starts from bottom to top of the building to find the optimal of each floor. So, adding an argument to get the minimum cost from the top level shuold be optiaml.\n\n### Recurrence relation\nWe can suppose z to represent the floor we judge, and the minimum cost of worst situation will be:\n- C[y][x]= min( C[y][x], 10 + max(C[z-1][x-1]+5,C[y-z][x]) ).\n(if(z=1): C[y][x]= 10+ max( C[z-1][x-1]+5, C[y-z][x]) ).\n\n> Left element: the senario of broken egg. The (z-1) denotes we don't need to test above floors because the egg is broken. \n> Right element: the senario of unbroken egg. The (y-z) denotes there are still y-z floors we need to test and the number of egg is unchanged.\n\n" } ]
2
vijaymarupudi/Categorization_Models
https://github.com/vijaymarupudi/Categorization_Models
df56ddce810a26660e5b34000482a4f09450dfd6
cac6fa72ca4dffc63a15b52a2dcc6a15f18e47cf
c48b1c646b0f8e9790425b0801ad2201d78649d9
refs/heads/master
2022-03-28T22:23:20.246927
2020-01-20T01:42:49
2020-01-20T01:42:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6845637559890747, "alphanum_fraction": 0.7154362201690674, "avg_line_length": 33.35384750366211, "blob_id": "0fee5484b44a7e727e993bcd28f53e97bca3080e", "content_id": "1f80030b0b49d8932361488daac236579c606603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2235, "license_type": "no_license", "max_line_length": 113, "num_lines": 65, "path": "/PrototypeModel.py", "repo_name": "vijaymarupudi/Categorization_Models", "src_encoding": "UTF-8", "text": "import numpy as np #this is needed for any arrays and other stuff\nimport matplotlib.pyplot as plt\n\n\n#The values below should change for different catgory sets. \n# e.g. NumDim is 6 for 6dNLSStim.txt\n\nNumDim = 4 #the number of dimenions or features\nNumAStim = 5 #the number of category A exemplars\nNumBStim = 4 #the number of category B exemplars\nNumTrainStim = 9 #The total number of training exemaplrs\nNumTransferStim = 7 #the number of transfer stimuli (if used)\nNumTotalStim = 16 #the total number of stimuli in the problem set\n\n#To make the easy, create one array for all the stimuli to be categorized\nStim = np.zeros((NumTotalStim, NumDim))\nStim = np.loadtxt(\"54taskStm.txt\", delimiter=\" \")\n\n#other examples\n#Stim = np.loadtxt(\"6dNLSStim.txt\", delimiter=\" \")\n#Stim = np.loadtxt(\"The75Task.txt\", delimiter=\" \")\n\n#declaring the prototypes here\nA_Prot=np.array([0,0,0,0])\nB_Prot=np.array([1,1,1,1])\n#A_Prot=np.array([0,0,0,0,0,0])\n#B_Prot=np.array([1,1,1,1,1,1])\n\n\n#an array of the weights in the task, corresponds to numDim\n#There are 6, so 0-5. #weights sum to 1.0 and have a range of 0-1\nweights = np.array ([.25, .25, .25, .25]) #for the four dim set\n#weights = np.array ([.17, .17, .17, .16, .16, .17]) #for the 6 dim set\n\nsens = 2 #can vary for simulations, usually between 0.1 and 10.0\nDistA = np.zeros(NumDim)\nDistB = np.zeros(NumDim)\nSimA = np.zeros(NumTotalStim)\nSimB = np.zeros(NumTotalStim)\nprobs = np.zeros(NumTotalStim)\n\n\n\nfor ThisStim in range(NumTotalStim):\n\tDistA = np.zeros(NumDim)\n\tDistB = np.zeros(NumDim)\n\n\tfor ThisDim in range(NumDim):\n\t\t#The comparison process is across all the dimenions in the task\n\t\t#each dimension has an attentional weight (which is given or estimated) that is a multiplier for each dimension\n\n\t\tDistA[ThisDim] = DistA[ThisDim] + (weights[ThisDim]) * abs((Stim[ThisStim,ThisDim] - A_Prot[ThisDim]))\n\t\tDistB[ThisDim] = DistB[ThisDim] + (weights[ThisDim]) * abs((Stim[ThisStim,ThisDim] - B_Prot[ThisDim]))\n\tSimA[ThisStim] = np.exp(-sens * sum(DistA))\n\tSimB[ThisStim] = np.exp(-sens * sum(DistB))\n\n\n\nfor ThisStim in range(NumTotalStim):\n\tprobs[ThisStim] = SimA[ThisStim] / (SimA[ThisStim] + SimB[ThisStim])\n\nprobs = np.vstack(probs)\nprint(np.round(probs,3))\n\nplt.show(plt.plot(probs))\n\n\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 46.5, "blob_id": "32003a2f848ff6ba3908499b9f7801673b539ed3", "content_id": "7b337d81ca1f967da494bf4aaf1de5ed742bfbb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 95, "license_type": "no_license", "max_line_length": 70, "num_lines": 2, "path": "/README.md", "repo_name": "vijaymarupudi/Categorization_Models", "src_encoding": "UTF-8", "text": "# Categorization_Models\nPython code for Minda &amp; Smith's Prototype model and Nosofsky's GCM\n" }, { "alpha_fraction": 0.7089397311210632, "alphanum_fraction": 0.7266111969947815, "avg_line_length": 30, "blob_id": "0dd9ecf792535c32e357fe580afa03892532fad5", "content_id": "9e846c52cd1ff8922acfe7616f81766feec17ecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1924, "license_type": "no_license", "max_line_length": 73, "num_lines": 62, "path": "/ExemplarModel.py", "repo_name": "vijaymarupudi/Categorization_Models", "src_encoding": "UTF-8", "text": "import numpy as np #this is needed for any arrays and other stuff\nimport matplotlib.pyplot as plt\n\n#The values below should change for different catgory sets. \n# e.g. NumDim is 6 for 6dNLSStim.txt\n\nNumDim = 4 #the number of dimenions or features\nNumAStim = 5 #the number of category A exemplars\nNumBStim = 4 #the number of category B exemplars\nNumTrainStim = 9 #The total number of training exemaplrs\nNumTransferStim = 7 #the number of transfer stimuli (if used)\nNumTotalStim = 16 #the total number of stimuli in the problem set\n\n#To make the easy, create one array for all the stimuli to be categorized\nStim = np.zeros((NumTotalStim, NumDim))\nStim = np.loadtxt(\"54taskStm.txt\", delimiter=\" \")\n\n#other examples\n#Stim = np.loadtxt(\"6dNLSStim.txt\", delimiter=\" \")\n#Stim = np.loadtxt(\"The75Task.txt\", delimiter=\" \")\n\n\n#an array of the weights in the task, corresponds to numDim\nweights = np.array ([.25, .25, .25, .25])\nsens = 6\nSharedTraits = np.zeros(NumDim)\nSimA = np.zeros(NumTotalStim)\nSimB = np.zeros(NumTotalStim)\nprobs = np.zeros(NumTotalStim)\n\nthismult = 0\ncummulta = 0\ncummultb = 0\n\nfor ThisStim in range(NumTotalStim):\n\tfor ThisTrainStim in range(NumTrainStim):\n\t\tfor ThisDim in range(NumDim):\n\t\t\tif Stim[ThisStim, ThisDim] == Stim[ThisTrainStim, ThisDim]: \n\t\t\t\tSharedTraits[ThisDim] = 0\n\t\t\telse:\n\t\t\t\tSharedTraits[ThisDim] = weights[ThisDim]\n\t\tfor ThisDim in range(NumDim):\n\t\t\tthismult = thismult + SharedTraits[ThisDim]\n\t\tif ThisTrainStim <= NumAStim:\n\t\t\tcummulta = cummulta + np.exp(-sens * thismult)\n\t\telse:\n\t\t\tcummultb = cummultb + np.exp(-sens * thismult)\n\t\tthismult = 0.0\n\t\tfor ThisDim in range(NumDim):\n\t\t\tSharedTraits[ThisDim] = 0.0\n\tSimA[ThisStim] = cummulta\n\tSimB[ThisStim] = cummultb\n\tcummulta = 0\n\tcummultb = 0\n\nfor ThisStim in range(NumTotalStim):\n\tprobs[ThisStim] = SimA[ThisStim] / (SimA[ThisStim] + SimB[ThisStim])\n\nprobs = np.vstack(probs)\nprint(np.round(probs,3))\n\nplt.show(plt.plot(probs))\n\n\n" } ]
3
adtamayop/image_caption
https://github.com/adtamayop/image_caption
2716f8b566db6a9c5a750d8bd8597f997461ff1c
adbc010f8a61d5e39a986cacd5d8b54b79790799
9606d9089b78b0fd2918e73f9a998c6f739091d7
refs/heads/master
2022-04-07T15:43:06.434689
2020-03-13T21:09:31
2020-03-13T21:09:31
245,726,922
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7653260827064514, "alphanum_fraction": 0.780978262424469, "avg_line_length": 78.96521759033203, "blob_id": "40445a728baced22ac496ded8168c878dcf9731e", "content_id": "6b3c3e27091a40b8bfc6a1bc1e762561463f35f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9302, "license_type": "no_license", "max_line_length": 591, "num_lines": 115, "path": "/README.md", "repo_name": "adtamayop/image_caption", "src_encoding": "UTF-8", "text": "# Photo caption generator\n\nEste problema consiste en la generación de subtítulos de imágenes, es decir una descripción legible y concisa de los contenidos de una fotografía.\n\nLa generación de descripciones para una imagen requiere tanto los métodos de la visión por computadora para comprender el contenido de la imagen como un modelo de lenguaje del campo del procesamiento del lenguaje natural para convertir la comprensión de la imagen en palabras en el orden correcto.\n\nEl Dataset a usar conseta de 8000 imágenes, y cada imágen tiene 5 descripciones diferentes\n\nLos pasos necesario para entrentar este problema es:\n\n1. Preprocesar las imágenes\n2. Preprocesar el texto\n3. Desarrollo del modelo:\n 1. Cargar datos\n 2. Definir el modelo\n 3. Entrenar el modelo\n 4. Evaluar el modelo\n4. Generación de descripciones\n\n\n## 1. Preprocesamiento de las imágenes:\n\nSe va a utilizar transfer learning para interpretar el contenido de las fotos, en este caso la arquitectura VGG16\n\n<p align=\"center\">\n <img src=\"https://www.researchgate.net/profile/Max_Ferguson/publication/322512435/figure/fig3/AS:697390994567179@1543282378794/Fig-A1-The-standard-VGG-16-network-architecture-as-proposed-in-32-Note-that-only.png\" width=\"400\">\n</p>\n\nEn el entrenamiento del modelo podríamos pasar cada imagen que vamos a procesar por el modelo preentrenado y unirla a la arquitectura de image caption que se va a proponer más adelante, pero para ahorrar tiempo y recursos podemos pre calcular las \"photo features\" usando el modelo preentrenado y guardar estas interpretaciones por el modelo en un archivo y luego cargarlas, para meter esa interpretacion a nuestro modelo, que sería lo mismo que por cada imagen pasarla por el modelo VGG solo que lo haremos de forma anticipada, esto hará el modelo más rápido y consumirá mucha menos memoria.\n\nPara extraer correctamente las características de la foto se remueve la última capa de la red preentrenada, que sería la parte encargada de la red que hace la clasificación, pero en este problema no estamos interesados en clasificar las imágenes, sino en la interpretación interna de la foto, que es lo que se hace justo antes de clasificarla, allí están las \"características\" que el modelo ha extraído de la foto.\n\n## 2. Preprocesamiento del texto\n\nEl dataset contiene multiples desciptiones por foto y la descripciones requieren algo de limpieza,así que se pasa todo minusculas, quitar puntuaciones, palabras de una sola letras, palabras con numeros dentro de ellas, entonces una vez ya hemos limpiado el vocabulario, podemos resumir el tamaño del vocabulario, lo ideal es que el vocabulario que tenemos sea tan expresivo y pequeño como sea posible, un vocabulario pequeño resultará en un modelo más pequeño que entrenaremos más rápido\n\nGuardamos un diccionario de las descripciones de cada imagen por identificador de cada imagen en un archivo llamado description.txt, con un identificador y descripción por línea\n\n## 3. Construcción del modelo\n\nEl modelo que se va a desarrollar generará una descripción dada una foto, pero esta descripción se va a construir una palabra al tiempo, entonces cada palabra que se vaya generando de la descripción se le vuelve a ingresar a la entrada del modelo de forma recurrente, entonces se va a utilizar una palabra que va \"igniciar\" el proceso de generación y una última palabra para darle la señal de que termine la descripción, en este casó será las palabras 'startseq' y 'endseq'\n\n X1,\tX2 (Secuencia de palabras) y (palabra)\n photo\tstartseq, little\n photo\tstartseq, little, girl\n photo\tstartseq, little, girl, \t running\n photo\tstartseq, little, girl, running, in\n photo\tstartseq, little, girl, running, in, field\n photo\tstartseq, little, girl, running, in, field, endseq\n\n<p align=\"center\">\n <img src=\"https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2017/10/Recursive-Framing-of-the-Caption-Generation-Model.png\" width=\"400\">\n</p>\n\nEntonces cuando el modelo sea usado para generar descripciones, las palabras generadas serán concatenadas, y recursivamente serán utilizadas como entrada para generar una descripción para cada imagen.\n\nSe transforman los datos a supervisados, entonces se tiene un array de features de las fotos, y otro con el texto codificado \n\nEl texto de entrada es codificado como entero, el cual será alimentado a una capa \"word embedding\", entonces las características de la foto será alimentadas directamente a otra parte del modelo, el modelo sacará una predicción la cual es una distribución de probabilidad sobre todas las palabras del vocabulario, los datos de salida será por lo tanto un one-hot encoded del vocabulario, de cada palabra, representando una probabilidad de distribution idealizada con valores de 0 para todas las posiciones de palabras.\n\nlas arquitecturas encoder-decoder son referentes en la solución de problema de este tipo (traducción, problemas de índole secuencial), en donde una red codifica datos, y otra los interpreta.\n\n<p align=\"center\">\n <img src=\"https://cdn-images-1.medium.com/max/1000/1*1JcHGUU7rFgtXC_mydUA_Q.jpeg\" width=\"400\">\n</p>\n\nEn nuestro caso se tienen 2 elementos básicos:\n\n* Encoder: Una red que lee la fotografía y codifica el contenido en un fector de tamaño fijo, usando una representación interna\n\n* Decoder: Una red que lee la fotografía codificada y genera la descripción\n\nNormalmente lo que se hace es coger una red convolucional para codificar la imagen y una red recurrente como una LSTM por ejm, para bien sea codificar la secuencia de entrada y/o generar la siguiente palabra en la secuencia \n\n**Marc Tanti** Propuso una arquitectura, muy efectiva para generar image caption llamada **Merge-model**\n\nEl modelo de merge combina la forma codificada de la imagen de entrada con la forma codificada de la descripción de texto generada hasta ahora,la combinación de estas dos entradas codificadas es utilizada por un modelo de decodificador muy simple para generar la siguiente palabra en la secuencia,el enfoque utiliza la red neuronal recurrente solo para codificar el texto generado hasta ahora.\n\n<p align=\"center\">\n <img src=\"https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2017/10/Merge-Architecture-for-the-Encoder-Decoder-Model.png\" width=\"400\">\n</p>\n\nEsto separa los features de la entrada de imagen, la entrada de texto y la combinación e interpretación de las entradas codificadas.\n\nComo se mencionó, es común usar un modelo previamente entrenado para codificar la imagen, pero de manera similar, esta arquitectura también permite usar un modelo de lenguaje previamente entrenado para codificar la entrada de texto de subtítulos.\n\n\nEl modelo se puede describir en 3 partes:\n\n* El extractor de características de la foto: en este caso la VGG16 sin la capa de salida\n* El procesador de secuencias: Esta es una capa de word embedding (n dónde las palabras o frases del vocabulario son vinculadas a vectores de números reales) para manipular el texto de entrada seguida por una lstm \n* Decoder, el extractor y el procesador sacan un vector de tamaño fijo, estos son combinados y procesador por una red densa para hacer una predicción final\n\nEl modelo Photo Feature Extractor espera que las características de entrada de fotos sean un vector de 4.096 elementos. Estos son procesados por una capa densa para producir una representación de 256 elementos de la foto.\n\nEl modelo del procesador de secuencia espera secuencias de entrada con una longitud predefinida (34 palabras) que se introducen en una capa de embedding que utiliza una máscara para ignorar los valores rellenados. Esto es seguido por una capa LSTM con 256 neuronas.\n\nAmbos modelos de entrada producen un vector de 256 elementos. Además, ambos modelos de entrada utilizan la regularización Dropout 50%. Esto es para reducir el sobreajuste del conjunto de datos de entrenamiento, ya que esta configuración de modelo aprende muy rápido.\n\nEl modelo Decoder combina los vectores de ambos modelos de entrada utilizando una operación de suma. Esto luego se alimenta a una capa de neuronas 256 densas y luego a una capa densa de salida final que hace una predicción softmax sobre todo el vocabulario de salida para la siguiente palabra en la secuencia.\n\n\n\n**Evaluamos el modelo:**\n\nUna vez entrenamos el modelo, podemos meterle nuestro dataset de prueba\n\nEntonces lo primero es evaluar el modelo generando las descripciones para todas las fotos en el dataset de puebas y evaluando esas predicciones con una función de costo estandar\n\nLas descripciones reales y pronosticadas se recopilan y evalúan colectivamente utilizando la puntuación BLEU del corpus que resume lo cerca que está el texto generado del texto esperado.\n\nLa métrica BLEU es usada para evaluar traducción de texto, evaluando el texto que debió ser, contra el predicho.\n\n\nEn este caso se calculo el BLEU score para 1, 2, 3 y 4 n-gramas acumulativos, un puntaje más cerca de 1 es mejor y 0 es peor\n\n\n\n\n" }, { "alpha_fraction": 0.6836606860160828, "alphanum_fraction": 0.6861680150032043, "avg_line_length": 27.5, "blob_id": "91ef999f8cc6360f297c4f7e4c0e2e56f8397a8e", "content_id": "9376b96dc86fcc69403908adac0280e36ab3f0f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2396, "license_type": "no_license", "max_line_length": 64, "num_lines": 84, "path": "/src/preprocessing/preprocess_text.py", "repo_name": "adtamayop/image_caption", "src_encoding": "UTF-8", "text": "import string\n\ndef load_doc(filename):\n\t\"\"\"\n\tSe carga el archivo que contiene todas las \n\tdescripciones de las fotos, y quedan en un \n\tsolo string grande\n\t\"\"\"\n\tfile = open(filename, 'r')\n\ttext = file.read()\n\tfile.close()\n\treturn text\n\ndef load_descriptions(doc):\n\t\"\"\"\n\tCrea un diccionario donde la clave es el \n\tidentificador único de la foto y el valor \n\tson una lista con las diferentes \n\tdescripciones por imagen\n\t\"\"\"\n\tmapping = dict()\n\tfor line in doc.split('\\n'):\n\t\ttokens = line.split()\n\t\tif len(line) < 2:\n\t\t\tcontinue\n\t\timage_id, image_desc = tokens[0], tokens[1:]\n\t\timage_id = image_id.split('.')[0]\n\t\timage_desc = ' '.join(image_desc)\n\t\tif image_id not in mapping:\n\t\t\tmapping[image_id] = list()\n\t\tmapping[image_id].append(image_desc)\n\treturn mapping\n\ndef clean_descriptions(descriptions):\n\t\"\"\"\n\tDado un diccionario con descripciones se limpian las \n\tdescripciones convirtiendo todo a minusculas, se \n\tremueven los puntos, se remueven todas las palabras \n\tde un solo caracter, y se remueven todas las palabras \n\tcon números\n\t\"\"\"\n\ttable = str.maketrans('', '', string.punctuation)\n\tfor key, desc_list in descriptions.items():\n\t\tfor i in range(len(desc_list)):\n\t\t\tdesc = desc_list[i]\t\t\t\n\t\t\tdesc = desc.split()\t\t\t\n\t\t\tdesc = [word.lower() for word in desc]\t\t\t\n\t\t\tdesc = [w.translate(table) for w in desc]\t\t\t\n\t\t\tdesc = [word for word in desc if len(word)>1]\t\t\t\n\t\t\tdesc = [word for word in desc if word.isalpha()]\t\t\t\n\t\t\tdesc_list[i] = ' '.join(desc)\n\ndef to_vocabulary(descriptions):\n\t\"\"\"\n\tConvierte descripciones en un vocabulario de palabras\n\t\"\"\"\n\tall_desc = set()\n\tfor key in descriptions.keys():\n\t\t[all_desc.update(d.split()) for d in descriptions[key]]\n\treturn all_desc\n\ndef save_descriptions(descriptions, filename):\n\t\"\"\"\n\tGuardamos identifificador de imagen y su \n\tdescripción en un archivo, una por linea\n\t\"\"\"\n\tlines = list()\n\tfor key, desc_list in descriptions.items():\n\t\tfor desc in desc_list:\n\t\t\tlines.append(key + ' ' + desc)\n\tdata = '\\n'.join(lines)\n\tfile = open(filename, 'w')\n\tfile.write(data)\n\tfile.close()\n\nif __name__ == \"__main__\":\n\tfilename = './data/Flickr8k.token.txt'\n\tdoc = load_doc(filename)\n\tdescriptions = load_descriptions(doc)\n\t# print('Loaded: %d ' % len(descriptions))\n\tclean_descriptions(descriptions)\n\tvocabulary = to_vocabulary(descriptions)\n\t# print('Vocabulary Size: %d' % len(vocabulary))\n\tsave_descriptions(descriptions, './src/files/descriptions.txt')" }, { "alpha_fraction": 0.6732673048973083, "alphanum_fraction": 0.6936517357826233, "avg_line_length": 27.633333206176758, "blob_id": "8df2c8841f207e6680315a780bf4018150adb8cd", "content_id": "8aadad9dcbaf51372e11017cf2976dd640df61f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1722, "license_type": "no_license", "max_line_length": 55, "num_lines": 60, "path": "/src/preprocessing/preprocess_images.py", "repo_name": "adtamayop/image_caption", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom pickle import dump\nfrom keras.models import Model\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.applications.vgg16 import preprocess_input\n\ndef extract_features(directory):\n\t\"\"\"Retorna las características extraídas por el modelo\n\t de cada foto en el directorio\n\t\n\tArguments:\n\t\tdirectory {str}\n\t\n\tReturns:\n\t\t[dict] {img_id: features_img_id}\n\t\"\"\"\n\tmodel = VGG16()\n\tmodel.layers.pop()\n\tmodel = Model(inputs=model.inputs, \n\t\t\t\t outputs=model.layers[-1].output)\n\t# print(model.summary())\n\tfeatures = dict()\n\tfor name in listdir(directory):\n\t\tfilename = directory + '/' + name\n\t\timage = load_img(filename, target_size=(224, 224))\n\t\timage = img_to_array(image)\n\t\timage = image.reshape((1, \n\t\t\t\t\t\t\t image.shape[0], \n\t\t\t\t\t\t\t image.shape[1], \n\t\t\t\t\t\t\t image.shape[2]))\n\t\t# se prepara las imagenes para entrar al modelo VGG\n\t\timage = preprocess_input(image)\n\t\t# se hace un .predict para obtener\n\t\t# las características de cada imagen\n\t\tfeature = model.predict(image, verbose=0)\n\t\timage_id = name.split('.')[0]\n\t\tfeatures[image_id] = feature\n\t\tprint('>%s' % name)\n\treturn features\n\ndef extract_feature(filename):\n\t\"\"\"\n\tRetorna las características extraídas por el modelo\n\tpara una sola foto\n\t\"\"\"\n\tmodel = VGG16()\n\tmodel.layers.pop()\n\tmodel = Model(inputs=model.inputs, \n\t\t\t\t outputs=model.layers[-1].output)\n\timage = load_img(filename, target_size=(224, 224))\n\timage = img_to_array(image)\n\timage = image.reshape((1, \n\t\t\t\t\t\t image.shape[0], \n\t\t\t\t\t\t image.shape[1], \n\t\t\t\t\t\t image.shape[2]))\n\timage = preprocess_input(image)\n\tfeature = model.predict(image, verbose=0)\n\treturn feature" }, { "alpha_fraction": 0.6883295178413391, "alphanum_fraction": 0.7027459740638733, "avg_line_length": 30.670289993286133, "blob_id": "dab4f2567ef4d9653fe12f9694359de0f018def9", "content_id": "6f694adecf84fdde63468fe9f1f86e478e129477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8749, "license_type": "no_license", "max_line_length": 129, "num_lines": 276, "path": "/src/model/model.py", "repo_name": "adtamayop/image_caption", "src_encoding": "UTF-8", "text": "import keras\nfrom pickle import load\nfrom pickle import dump\nfrom numpy import array\nfrom numpy import argmax\nfrom keras.utils import plot_model\nfrom keras.models import load_model\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Embedding\nfrom keras.layers import Dropout\nfrom keras.layers.merge import add\nfrom keras.callbacks import ModelCheckpoint\nfrom nltk.translate.bleu_score import corpus_bleu\nfrom keras.preprocessing.sequence import pad_sequences\n\n\ndef load_doc(filename):\n\t\"\"\"\n\tCarga y retorna archivo de texto\n\t\"\"\"\n\tfile = open(filename, 'r')\t\n\ttext = file.read()\n\tfile.close()\n\treturn text\n\n\ndef load_set(filename):\n\t\"\"\"\n\tCarga una lista predefinidas de datos\n\t(Entrenamiento, validacion, prueba)\n\t\"\"\"\n\tdoc = load_doc(filename)\n\tdataset = list()\n\tfor line in doc.split('\\n'):\n\t\tif len(line) < 1:\n\t\t\tcontinue\n\t\tidentifier = line.split('.')[0]\n\t\tdataset.append(identifier)\n\treturn set(dataset)\n\n\ndef load_clean_descriptions(filename, dataset):\n\t\"\"\"\n\tCarga las descripciones limpias del archivo de descripciones,\n\ty añade 'startseq' y 'endseq' al inicio y fin de la descripcion\n\t\"\"\"\n\tdoc = load_doc(filename)\n\tdescriptions = dict()\n\tfor line in doc.split('\\n'):\t\t\n\t\ttokens = line.split()\t\t\n\t\timage_id, image_desc = tokens[0], tokens[1:]\t\t\n\t\tif image_id in dataset:\t\t\n\t\t\tif image_id not in descriptions:\n\t\t\t\tdescriptions[image_id] = list()\t\t\n\t\t\tdesc = 'startseq ' + ' '.join(image_desc) + ' endseq'\t\t\n\t\t\tdescriptions[image_id].append(desc)\n\treturn descriptions\n\n\ndef load_photo_features(filename, dataset):\n\t\"\"\"\n\tCarga todo el archivo de features y selecciona \n\tel conjunto correspondiente (train,val,test)\n\t\"\"\"\n\tall_features = load(open(filename, 'rb'))\n\tfeatures = {k: all_features[k] for k in dataset}\n\treturn features\n\n\ndef to_lines(descriptions):\n\t\"\"\"\n\tConvierte un diccionario limpio de \n\tdescripciones a una lista de descripciones\n\t\"\"\"\n\tall_desc = list()\n\tfor key in descriptions.keys():\n\t\t[all_desc.append(d) for d in descriptions[key]]\n\treturn all_desc\n\n\ndef create_tokenizer(descriptions):\n\t\"\"\"\n\tCodifica lista de descripciones para crear un \n\ttext corpus, y retorna el tokenizer ajustado\n\t\"\"\"\n\tlines = to_lines(descriptions)\n\ttokenizer = Tokenizer()\n\ttokenizer.fit_on_texts(lines)\n\treturn tokenizer\n\n\ndef max_length(descriptions):\n\t\"\"\"\n\tSe calcula el tamaño máximo de \n\tlas descripción con más palabras\n\t\"\"\"\n\tlines = to_lines(descriptions)\n\treturn max(len(d.split()) for d in lines)\n\n\ndef create_sequences(tokenizer, max_length, desc_list, \n\t\t\t\t\t photo, vocab_size):\n\t\"\"\"\n\tcrea secuencias de imagenes, secuencias de entrada \n\ty palabras de salida para una imagen dada\t\n\t\"\"\"\n\tX1, X2, y = list(), list(), list()\t\n\tfor desc in desc_list:\n\t\t# codifica la secuencia de texto\n\t\tseq = tokenizer.texts_to_sequences([desc])[0]\n\t\t# parte la secuencia en multiples pares X,y\n\t\tfor i in range(1, len(seq)):\n\t\t\t# parte en un par de entrada y salida\n\t\t\tin_seq, out_seq = seq[:i], seq[i]\n\t\t\t# aplica el pad_sequences a la secuencia de entrada\n\t\t\tin_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n\t\t\t# codifica la secuencia de salida\n\t\t\tout_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n\t\t\tX1.append(photo)\n\t\t\tX2.append(in_seq)\n\t\t\ty.append(out_seq)\n\treturn array(X1), array(X2), array(y)\n\n\ndef create_all_sequences(tokenizer, max_length, descriptions, photos, vocab_size):\n\tX1, X2, y = list(), list(), list()\n\t# walk through each image identifier\n\tfor key, desc_list in descriptions.items():\n\t\t# walk through each description for the image\n\t\tfor desc in desc_list:\n\t\t\t# encode the sequence\n\t\t\tseq = tokenizer.texts_to_sequences([desc])[0]\n\t\t\t# split one sequence into multiple X,y pairs\n\t\t\tfor i in range(1, len(seq)):\n\t\t\t\t# split into input and output pair\n\t\t\t\tin_seq, out_seq = seq[:i], seq[i]\n\t\t\t\t# pad input sequence\n\t\t\t\tin_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n\t\t\t\t# encode output sequence\n\t\t\t\tout_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n\t\t\t\t# store\n\t\t\t\tX1.append(photos[key][0])\n\t\t\t\tX2.append(in_seq)\n\t\t\t\ty.append(out_seq)\n\treturn array(X1), array(X2), array(y)\n\n\ndef define_model(vocab_size, max_length):\n\t# feature extractor model\n\tinputs1 = Input(shape=(4096,))\n\tfe1 = Dropout(0.5)(inputs1)\n\tfe2 = Dense(256, activation='relu')(fe1)\n\t# sequence model\n\tinputs2 = Input(shape=(max_length,))\n\tse1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)\n\tse2 = Dropout(0.5)(se1)\n\tse3 = LSTM(256)(se2)\n\t# decoder model\n\tdecoder1 = add([fe2, se3])\n\tdecoder2 = Dense(256, activation='relu')(decoder1)\n\toutputs = Dense(vocab_size, activation='softmax')(decoder2)\n\t# tie it together [image, seq] [word]\n\tmodel = Model(inputs=[inputs1, inputs2], outputs=outputs)\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\t# summarize model\n\tprint(model.summary())\n\tplot_model(model, to_file='model.png', show_shapes=True)\n\treturn model\n\ndef word_for_id(integer, tokenizer):\n\t\"\"\"\n\tMapea la predicción que es un entero \n\ta una palabra del tokenizer\n\t\"\"\"\n\tfor word, index in tokenizer.word_index.items():\n\t\tif index == integer:\n\t\t\treturn word\n\treturn None\n\ndef generate_desc(model, tokenizer, photo, max_length):\n\t\"\"\"\n\tGenera una descripcion textual dado un modelo entrenado\n\ty una foto preparada como input\n\t\"\"\"\t\n\tin_text = 'startseq'\t\n\tfor i in range(max_length):\n\t\tsequence = tokenizer.texts_to_sequences([in_text])[0]\n\t\tsequence = pad_sequences([sequence], maxlen=max_length)\n\t\tyhat = model.predict([photo,sequence], verbose=0)\n\t\tyhat = argmax(yhat)\n\t\tword = word_for_id(yhat, tokenizer)\n\t\tif word is None:\n\t\t\tbreak\n\t\tin_text += ' ' + word\n\t\tif word == 'endseq':\n\t\t\tbreak\n\treturn in_text\n\ndef evaluate_model(model, descriptions, photos, tokenizer, max_length):\n\t\"\"\"\n\tMétricas del modelo\n\t\"\"\"\n\tactual, predicted = list(), list()\n\t# se va a iterar sobre todas las descripciones de prueba\n\tfor key, desc_list in descriptions.items():\n\t\t# se genera la descipción\n\t\tyhat = generate_desc(model, tokenizer, photos[key], max_length)\n\t\t# se almacena la descripción real y la de predicha por el modelo\n\t\treferences = [d.split() for d in desc_list]\n\t\tactual.append(references)\n\t\tpredicted.append(yhat.split())\n\t# Se calcula el BLUE score\n\tprint('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))\n\tprint('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))\n\tprint('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))\n\tprint('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))\n\n\ndef data_generator(descriptions, photos, tokenizer, max_length, vocab_size):\n\t\"\"\"\n\n\t\"\"\"\n\twhile 1:\n\t\tfor key, desc_list in descriptions.items():\n\t\t\tphoto = photos[key][0]\n\t\t\tin_img, in_seq, out_word = create_sequences(tokenizer, max_length, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdesc_list, photo, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tvocab_size)\n\t\t\tyield [[in_img, in_seq], out_word]\n\n\ndef loading_train_model(model, epochs, train_descriptions, train_features, tokenizer,\n\t\t\t\tmax_length, vocab_size, val_descriptions, val_features):\n\t\"\"\"\n\tSe entrena el modelo con el datagenerator\n\t\"\"\"\n\tsteps = len(train_descriptions)\n\tval_steps = len(val_descriptions)\n\n\tes = keras.callbacks.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, \n\t\t\t\t\t\t\t\t\t\t\t\tpatience=2, verbose=0, mode='auto', \n\t\t\t\t\t\t\t\t\t\t\t\tbaseline=None, restore_best_weights=False)\n\n\tfor i in range(epochs):\n\t\tgenerator = data_generator(train_descriptions, train_features, \n\t\t\t\t\t\t\t\ttokenizer, max_length, vocab_size)\n\t\t\n\t\tval_generator = data_generator(val_descriptions, val_features,\n\t\t\t\t\t\t\t\t\ttokenizer, max_length, vocab_size)\n\t\tmodel.fit_generator(generator,\n\t\t\t\t\t\t\tsteps_per_epoch = steps,\n\t\t\t\t\t\t\tvalidation_data = val_generator,\n\t\t\t\t\t\t\tvalidation_steps = val_steps,\n\t\t\t\t\t\t\tepochs = 1, verbose = 1, \n\t\t\t\t\t\t\tcallbacks = [es], shuffle = True)\n\t\tmodel.save('model_' + str(i) + '.h5')\n\n\n\ndef train_model(tokenizer, max_length, train_descriptions, train_features, vocab_size, test_descriptions, test_features):\n\t\n\tX1train, X2train, ytrain = create_all_sequences(tokenizer, max_length, train_descriptions, train_features, vocab_size)\n\tX1test, X2test, ytest = create_all_sequences(tokenizer, max_length, test_descriptions, test_features, vocab_size)\n\n\tmodel = define_model(vocab_size, max_length)\n\t# define checkpoint callback\n\tfilepath = 'model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'\n\tcheckpoint = keras.callbacks.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n\t# fit model\n\tmodel.fit([X1train, X2train], ytrain, epochs=20, verbose=1, callbacks=[checkpoint], validation_data=([X1test, X2test], ytest))" } ]
4
frandorr/sat-extractor
https://github.com/frandorr/sat-extractor
695d6b9a70f7cee1d6013bcaec4d84810e3cd633
8ebe0209c8fb7b7ec7b5d59da8f00c178a10532e
940f4f005f91022fe8923227995446abb43dc1eb
refs/heads/main
2023-08-27T09:18:07.129374
2021-10-29T13:08:44
2021-10-29T13:08:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8313252925872803, "alphanum_fraction": 0.8313252925872803, "avg_line_length": 40.5, "blob_id": "3a9a34ffad41019ceecb7453acba0190e872fa16", "content_id": "04539e879ee08c78fd0f4208ea4855647c309b2d", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "permissive", "max_line_length": 45, "num_lines": 2, "path": "/src/satextractor/scheduler/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .scheduler import create_tasks_by_splits\nfrom .scheduler import get_scheduler\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 35, "blob_id": "d39942b329be56ba51a3c91cd352f8263463b8b6", "content_id": "f152fc888de6ea0cc62853a81c1c431b8d17a5f2", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "permissive", "max_line_length": 35, "num_lines": 1, "path": "/src/satextractor/monitor/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .gcp_monitor import GCPMonitor\n" }, { "alpha_fraction": 0.6090520024299622, "alphanum_fraction": 0.616536021232605, "avg_line_length": 31.627906799316406, "blob_id": "84a61f7efc16c04dd70ead17ef231ab3e93ae2d0", "content_id": "78704e19d9ff77dab966fa49b2f98a15c353ad25", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2806, "license_type": "permissive", "max_line_length": 87, "num_lines": 86, "path": "/ui/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import json\nimport os\n\nimport dash\nimport dash_auth\nfrom flask import Flask\nfrom flask.helpers import get_root_path\n\nfrom .routes import routes\n\n\ndef create_app():\n \"\"\"Construct core Flask application with embedded Dash app.\"\"\"\n app = Flask(__name__)\n\n assert \"USERNAME\" in os.environ.keys(), \"Specify USERNAME in environment variables\"\n assert (\n \"USERPASSWORD\" in os.environ.keys()\n ), \"Specify USERPASSWORD in environment variables\"\n\n if \"GOOGLE_APPLICATION_CREDENTIALS\" not in os.environ.keys():\n\n # write google credentials to file... annoying\n credentials_json = {\n \"type\": os.environ[\"type\"],\n \"project_id\": os.environ[\"project_id\"],\n \"private_key_id\": os.environ[\"private_key_id\"],\n \"private_key\": os.environ[\"private_key\"].replace(\"\\\\n\", \"\\n\"),\n \"client_email\": os.environ[\"client_email\"],\n \"client_id\": os.environ[\"client_id\"],\n \"auth_uri\": os.environ[\"auth_uri\"],\n \"token_uri\": os.environ[\"token_uri\"],\n \"auth_provider_x509_cert_url\": os.environ[\"auth_provider_x509_cert_url\"],\n \"client_x509_cert_url\": os.environ[\"client_x509_cert_url\"],\n }\n print(credentials_json)\n\n json.dump(\n credentials_json,\n open(os.path.join(os.getcwd(), \"gcp-credentials.json\"), \"w\"),\n )\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = os.path.join(\n os.getcwd(),\n \"gcp-credentials.json\",\n )\n\n from ui.dashboard.layout import layout\n from ui.dashboard.callbacks import register_callbacks\n\n register_dashapp(app, \"ML4CC\", \"dashboard\", layout, register_callbacks)\n\n app.register_blueprint(routes, url_prefix=\"/\")\n\n return app\n\n\ndef register_dashapp(app, title, base_pathname, layout, register_callbacks_fun):\n \"\"\"Register the Dash callbacks with the app.\"\"\"\n meta_viewport = {\n \"name\": \"viewport\",\n \"content\": \"width=device-width, initial-scale=1, shrink-to-fit=no\",\n }\n\n external_stylesheets = [\n \"https://stackpath.bootstrapcdn.com/bootswatch/4.5.2/yeti/bootstrap.min.css\",\n \"https://use.fontawesome.com/releases/v5.10.2/css/all.css\",\n ]\n\n my_dashapp = dash.Dash(\n __name__,\n server=app,\n url_base_pathname=f\"/{base_pathname}/\",\n assets_folder=get_root_path(__name__) + f\"/{base_pathname}/assets/\",\n external_stylesheets=external_stylesheets,\n meta_tags=[meta_viewport],\n )\n\n VALID_USERS = [(os.environ[\"USERNAME\"], os.environ[\"USERPASSWORD\"])]\n\n dash_auth.BasicAuth(my_dashapp, VALID_USERS)\n\n with app.app_context():\n my_dashapp.title = title\n my_dashapp.layout = layout\n if register_callbacks_fun:\n register_callbacks_fun(my_dashapp)\n" }, { "alpha_fraction": 0.5448799133300781, "alphanum_fraction": 0.5455120205879211, "avg_line_length": 27.763635635375977, "blob_id": "00d44686712579ada83f82a6fb5a384651951df3", "content_id": "aba86fdb700a7cb40b3f987b80801e1da93f0c54", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1582, "license_type": "permissive", "max_line_length": 83, "num_lines": 55, "path": "/src/satextractor/monitor/gcp_monitor.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\nfrom google.cloud import bigquery\nfrom satextractor.monitor.base import BaseMonitor\n\n\nclass GCPMonitor(BaseMonitor):\n def __init__(\n self,\n table_name: str,\n storage_path: str,\n job_id: str,\n task_id: str,\n constellation: str,\n ):\n\n self.client = bigquery.Client()\n self.table_name = table_name\n self.storage_path = storage_path\n self.job_id = job_id\n self.task_id = task_id\n self.constellation = constellation\n self.dataset_name = storage_path.split(\"/\")[-1]\n\n def post_status(\n self,\n msg_type: str,\n msg_payload: str,\n ) -> bool:\n\n # CLOUD FUNCTION CANNOT INSERT ROWS, ONLY UPDATE STATUS\n msg_types = [\"STARTED\", \"FINISHED\", \"FAILED\"]\n\n assert (\n msg_type in msg_types\n ), f\"msg_type '{msg_type}' not allowed. msg_type must be in '{msg_types}' \"\n\n vals = {\n \"job_id\": self.job_id,\n \"task_id\": self.task_id,\n \"storage_gs_path\": self.storage_path,\n \"msg_type\": msg_type,\n \"msg_payload\": msg_payload,\n \"timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\"),\n \"dataset_name\": self.dataset_name,\n \"constellation\": self.constellation,\n }\n\n errors = self.client.insert_rows_json(self.table_name, [vals])\n if errors != []:\n raise ValueError(\n f\"there where {len(errors)} error when inserting. \" + str(errors),\n )\n\n return True\n" }, { "alpha_fraction": 0.5924713611602783, "alphanum_fraction": 0.6147299408912659, "avg_line_length": 28.375, "blob_id": "0c8d668c6fafc742022010859d390c950370a894", "content_id": "1bb3a8dbe8e920702e64a11279a1db2cb9c80cd5", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3055, "license_type": "permissive", "max_line_length": 92, "num_lines": 104, "path": "/src/satextractor/utils.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import contextlib\nimport datetime\nimport functools\nfrom typing import List\nfrom typing import Tuple\n\nimport joblib\nimport pyproj\n\n\[email protected]\ndef tqdm_joblib(tqdm_object):\n \"\"\"Context manager to patch joblib to report into tqdm progress bar given as argument\"\"\"\n\n class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n tqdm_object.update(n=self.batch_size)\n return super().__call__(*args, **kwargs)\n\n old_batch_callback = joblib.parallel.BatchCompletionCallBack\n joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback\n try:\n yield tqdm_object\n finally:\n joblib.parallel.BatchCompletionCallBack = old_batch_callback\n tqdm_object.close()\n\n\ndef get_dates_in_range(\n start: datetime.datetime,\n end: datetime.datetime,\n interval: int,\n) -> List[Tuple[datetime.datetime, datetime.datetime]]:\n \"\"\"Get all the possible date pairs between start and end for a given interval.\n\n Args:\n start (datetime): start date\n end (datetime): end date (included)\n interval (int): interval in days\n\n Returns:\n Tuple[datetime.datetime, datetime.datetime]: A list of all posible (start,end) dates\n \"\"\"\n # Get all the date ranges for the given interval\n delta = datetime.timedelta(days=interval)\n dates = []\n while start <= end:\n to_date = start + delta\n dates.append((start, to_date))\n start = to_date\n return dates\n\n\ndef get_utm_zone(lat, lon):\n \"\"\"A function to grab the UTM zone number for any lat/lon location\"\"\"\n zone_str = str(int((lon + 180) / 6) + 1)\n\n if (lat >= 56.0) & (lat < 64.0) & (lon >= 3.0) & (lon < 12.0):\n zone_str = \"32\"\n elif (lat >= 72.0) & (lat < 84.0):\n if (lon >= 0.0) & (lon < 9.0):\n zone_str = \"31\"\n elif (lon >= 9.0) & (lon < 21.0):\n zone_str = \"33\"\n elif (lon >= 21.0) & (lon < 33.0):\n zone_str = \"35\"\n elif (lon >= 33.0) & (lon < 42.0):\n zone_str = \"37\"\n\n return zone_str\n\n\ndef get_utm_epsg(lat, lon, utm_zone=None):\n \"\"\"A function to combine the UTM zone number and the hemisphere into an EPSG code\"\"\"\n\n if utm_zone is None:\n utm_zone = get_utm_zone(lat, lon)\n\n if lat > 0:\n return int(f\"{str(326)+str(utm_zone)}\")\n else:\n return int(f\"{str(327)+str(utm_zone)}\")\n\n\n# SentinHub proj functions:\[email protected]_cache(maxsize=5)\ndef get_transform_function(crs_from: str, crs_to: str, always_xy=True):\n return pyproj.Transformer.from_proj(\n projection(crs_from),\n projection(crs_to),\n always_xy=always_xy,\n ).transform\n\n\[email protected]_cache(maxsize=5)\ndef projection(crs):\n if crs == \"WGS84\":\n proj_str = \"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\"\n else:\n proj_str = f\"EPSG:{crs}\"\n return pyproj.Proj(proj_str, preserve_units=True)\n" }, { "alpha_fraction": 0.7069474458694458, "alphanum_fraction": 0.7174385786056519, "avg_line_length": 39.97770690917969, "blob_id": "a86ed83c8123cfc294528a381eec807030ec54d1", "content_id": "761436e6f26e1e16013259944388d3d135633b31", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12868, "license_type": "permissive", "max_line_length": 544, "num_lines": 314, "path": "/README.md", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "<div id=\"top\"></div>\n<!--\n*** Thanks for checking out the Best-README-Template. If you have a suggestion\n*** that would make this better, please fork the repo and create a pull request\n*** or simply open an issue with the tag \"enhancement\".\n*** Don't forget to give the project a star!\n*** Thanks again! Now go create something AMAZING! :D\n-->\n\n\n\n<!-- PROJECT SHIELDS -->\n<!--\n*** I'm using markdown \"reference style\" links for readability.\n*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).\n*** See the bottom of this document for the declaration of the reference variables\n*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.\n*** https://www.markdownguide.org/basic-syntax/#reference-style-links\n-->\n<!-- [![Contributors][contributors-shield]][contributors-url]\n[![Forks][forks-shield]][forks-url]\n[![Stargazers][stars-shield]][stars-url]\n[![Issues][issues-shield]][issues-url]\n[![MIT License][license-shield]][license-url]\n[![LinkedIn][linkedin-shield]][linkedin-url] -->\n\n\n\n<!-- PROJECT LOGO -->\n<br />\n<div align=\"center\">\n <a href=\"https://github.com/othneildrew/Best-README-Template\">\n <img src=\"images/satextractor.png\" alt=\"Logo\">\n </a>\n\n <h3 align=\"center\">SatExtractor</h3>\n\n <p align=\"center\">\n Build, deploy and extract satellite public constellations with one command line.\n <br />\n <a href=\"https://github.com/othneildrew/Best-README-Template\">\n <img src=\"images/stac.gif\" alt=\"Logo\">\n </a>\n</div>\n\n\n\n<!-- TABLE OF CONTENTS -->\n<details>\n <summary>Table of Contents</summary>\n <ol>\n <li>\n <a href=\"#about-the-project\">About The Project</a>\n </li>\n <li>\n <a href=\"#getting-started\">Getting Started</a>\n <ul>\n <li><a href=\"#structure\">Structure</a></li>\n <li><a href=\"#prerequisites\">Prerequisites</a></li>\n <li><a href=\"#installation\">Installation</a></li>\n </ul>\n </li>\n <li><a href=\"#usage\">Usage</a></li>\n <li><a href=\"#contributing\">Contributing</a></li>\n <li><a href=\"#license\">License</a></li>\n <li><a href=\"#citation\">Citation</a></li>\n <li><a href=\"#acknowledgments\">Acknowledgments</a></li>\n </ol>\n</details>\n\n\n\n<!-- ABOUT THE PROJECT -->\n## About The Project\n\n- *tldr*: **SatExtractor** gets **all revisits in a date range** from a given **geojson region** from any public satellite constellation and store it in a **cloud friendly format**.\n\n\nThe large amount of image data makes it difficult to create datasets to train models quickly and reliably. Existing methods for extracting satellite images take a long time to process and have user quotas that restrict access.\n\nTherefore, we created an open source extraction tool **SatExtractor** to perform worldwide datasets extractions using serverless providers such as **Google Cloud Platform** or **AWS** and based on a common existing standard: **STAC**.\n\nThe tool scales horizontally as needed, extracting revisits and storing them in **zarr** format to be easily used by deep learning models.\n\nIt is fully configurable using [Hydra]([hydra](https://hydra.cc/)).\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n<!-- GETTING STARTED -->\n## Getting Started\n\n**SatExtractor** needs a cloud provider to work. Before you start using it, you'll need to create and configure a cloud provider account.\n\nWe provide the implementation to work with [Google Cloud](https://cloud.google.com/), but **SatExtractor** is implemented to be easily extensible to other providers.\n\n### Structure\n\nThe package is structured in a modular and configurable approach. It is basically a pipeline containing 6 important steps (separated in modules).\n\n- **Builder**: contains the logic to build the container that will run the extraction. <details>\n <summary>more info</summary>\n SatExtractor is based on a docker container. The Dockerfile in the root dir is used to build the core package and a reference in it to the specific provider extraction logic should be explicitly added (see the gcp example in directory providers/gcp).\n\n This is done by setting <code> ENV PROVIDER </code> var to point the provider directory. In the default Dockerfile it is set to gcp: <code> ENV PROVIDER providers/gcp </code>.\n</details>\n\n- **Stac**: converts a public constellation to the **STAC standard**. <details>\n <summary>more info</summary>\n If the original constellation is not already in STAC standard it should be converted. To do so, you have to implement the constellation specific STAC conversor. Sentinel 2 and Landsat 7/8 examples can be found in <code> src/satextractor/stac </code>. The function that is actually called to perform the conversion to the STAC standard is set in stac hydra config file ( <code> conf/stac/gcp.yaml </code>)\n</details>\n\n- **Tiler**: Creates tiles of the given region to perform the extraction. <details>\n <summary>more info</summary>\n The Tiler split the region in UTM tiles using <a href=https://sentinelhub-py.readthedocs.io/en/latest/examples/large_area_utilities.html> SentinelHub splitter </a>. There will be one Extraction Task per Tile. The config about the tiler can be found in <code> conf/tiler/utm.yaml </code>. There, the size of the tiles can be specified. Take into account that these tiles are not the actual patches that are later stored in your cloud provider, this is just the unit from where the (smaller) patches will be extracted.\n</details>\n\n- **Scheduler**: Decides how those tiles are going to be scheduled creating extractions tasks. <details>\n <summary>more info</summary>\n The Scheduler takes the resulting tiles from the Tiler and creates the actual patches (called also tiles) to be extracted.\n\n For example, if the Tiler splitted the region in 10000x10000 tiles, now the scheduler can be set to extract from each of the tiles smaller patches of, say, 1000x1000. Also, the scheduler calculates the intersection between the patches and the constellation STAC assets. At the end, you'll have and object called <code> ExtractionTask </code> with the information to extract one revisit, one band and one tile splitted in multiple patches. This <code> ExtractionTask </code> will be send to the cloud provider to perform the actual extraction.\n\n The config about the scheduler can be found in <code> conf/scheduler/utm.yaml </code>.\n</details>\n\n- **Preparer**: Prepare the files in the cloud storage. <details>\n <summary>more info</summary>\n The Preparer creates the cloud file structure. It creates the needed zarr groups and arrays in order to later store the extracted patches.\n\n The gcp preparer config can be found in <code> conf/preparer/gcp.yaml </code>.\n</details>\n\n- **Deployer**: Deploy the extraction tasks created by the scheduler to perform the extraction. <details>\n <summary>more info</summary>\n The Deployer sends one message per ExtractionTask to the cloud provider to perform the actal extraction. It works by publishing messages to a PubSub queue where the extraction is subscribed to. When a new message (ExtractionTask) arrives it will be automatically run on the cloud autoscaling.\n The gcp deployer config can be found in <code> conf/deployer/gcp.yaml </code>.\n</details>\n\n\nAll the steps are **optional** and the user decides which to run the **main config file**.\n\n\n### Prerequisites\n\nIn order to run **SatExtractor** we recommend to have a virtual env and a cloud provider user should already been created.\n\n### Installation\n\n\n1. Clone the repo\n ```sh\n git clone https://github.com/FrontierDevelopmentLab/sat-extractor\n ```\n2. Install python packages\n ```sh\n pip install .\n ```\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n\n<!-- USAGE EXAMPLES -->\n## Usage\n&#x1F534;&#x1F534;&#x1F534;\n```diff\n- WARNING!!!!:\nRunning SatExtractor will use your billable cloud provider services.\nWe strongly recommend testing it with a small region to get acquainted \nwith the process and have a first sense of your cloud provider costs \nfor the datasets you want to generate. Be sure you are running all your \ncloud provider services in the same region to avoid extra costs.\n```\n&#x1F534;&#x1F534;&#x1F534;\n\nOnce a cloud provider user is set and the package is installed you'll need to grab the GeoJSON region you want (you can get it from the super-cool tool [geojson.io](http://geojson.io/)) and change the config files.\n\n\n1. Choose a region name (eg `cordoba` below) and create an output directory for it:\n```\nmkdir output/cordoba\n```\n2. Save the region GeoJSON as `aoi.geojson` and store it in the folder you just created.\n3. Open the `config.yaml` and you'll see something like this:\n\n```yaml\ndataset_name: cordoba\noutput: ./output/${dataset_name}\n\nlog_path: ${output}/main.log\ncredentials: ${output}/token.json\ngpd_input: ${output}/aoi.geojson\nitem_collection: ${output}/item_collection.geojson\ntiles: ${output}/tiles.pkl\nextraction_tasks: ${output}/extraction_tasks.pkl\n\nstart_date: 2020-01-01\nend_date: 2020-02-01\n\nconstellations:\n - sentinel-2\n - landsat-5\n - landsat-7\n - landsat-8\n\ndefaults:\n - stac: gcp\n - tiler: utm\n - scheduler: utm\n - deployer: gcp\n - builder: gcp\n - cloud: gcp\n - preparer: gcp\n - _self_\ntasks:\n - build\n - stac\n - tile\n - schedule\n - prepare\n - deploy\n\nhydra:\n run:\n dir: .\n```\n\nThe important here is to set the `dataset_name` to `<your_region_name>`, define the `start_date` and `end_date` for your revisits, your `constellations` and the tasks to be run (you would want to run the `build` only one time and the comment it out.)\n\n**Important**: the `token.json` contains the needed credentials to access you cloud provider. In this example case it contains the gcp credentials. You can see instructions for getting it below in the [Authentication](#authentication) instructions.\n\n3. Open the `cloud/<provider>.yaml` and add there your account info as in the default provided file.\n (optional): you can choose different configurations by changing modules configs: `builder`, `stac`, `tiler`, `scheduler`, `preparer`, etc. There you can change things like patch_size, chunk_size.\n\n4. Run `python src/satextractor/cli.py` and enjoy!\n\nSee the [open issues](https://github.com/FrontierDevelopmentLab/sat-extractor/issues) for a full list of proposed features (and known issues).\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n## Authentication\n### Google Cloud\nTo get the `token.json` for Google Cloud, the recommended approach is to create a service account:\n1. Go to [Credentials](https://console.cloud.google.com/apis/credentials)\n2. Click `Create Credentials` and choose `Service account`\n3. Enter a name (e.g. `sat-extractor`) and click `Done` (you may also want to modify permissions and users)\n4. Choose the account from the list and then to to the `Keys` tab\n5. Click `Add key` -> `Create new key` -> `JSON` and save the file that gets downloaded\n6. Rename to `token.json` and you're done!\n\nYou may also need to run `gcloud config set project your-proj-name` for `sat-extractor` to work properly.\n\n<!-- CONTRIBUTING -->\n## Contributing\n\nContributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.\n\nIf you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag \"enhancement\".\nDon't forget to give the project a star! Thanks again!\n\n1. Fork the Project\n2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)\n3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)\n4. Push to the Branch (`git push origin feature/AmazingFeature`)\n5. Open a Pull Request\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n\n<!-- LICENSE -->\n## License\n\nDistributed under the BSD 2 License. See `LICENSE.txt` for more information.\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n\n## Citation \n\nIf you want to use this repo please cite:\n\n```\n@software{dorr_francisco_2021_5609657,\n author = {Dorr, Francisco and\n Kruitwagen, Lucas and\n Ramos, Raúl and\n García, Dolores and\n Gottfriedsen, Julia and\n Kalaitzis, Freddie},\n title = {SatExtractor},\n month = oct,\n year = 2021,\n publisher = {Zenodo},\n version = {v0.1.0},\n doi = {10.5281/zenodo.5609657},\n url = {https://doi.org/10.5281/zenodo.5609657}\n}\n```\n\n<p align=\"right\">(<a href=\"#top\">back to top</a>)</p>\n\n## Acknowledgments\n\n<div align=\"center\">\n <a href=\"https://fdleurope.org/\">\n <img src=\"images/fdleuropeESA.png\" alt=\"fdleurope\">\n </a>\n</div>\n\n\nThis work is the result of the 2021 ESA Frontier Development Lab World Food Embeddings team. We are grateful to all organisers, mentors and sponsors for providing us this opportunity. We thank Google Cloud for providing computing and storage resources to complete this work.\n\n" }, { "alpha_fraction": 0.8205128312110901, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 38, "blob_id": "007530b56d9b3a5aef3fca4dda37a1d9bdf833f1", "content_id": "ee16df8ae60b7d9624e7b1a30c82c39fd27cae00", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "permissive", "max_line_length": 38, "num_lines": 1, "path": "/src/satextractor/deployer/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .gcp_deployer import deploy_tasks\n" }, { "alpha_fraction": 0.8372092843055725, "alphanum_fraction": 0.8372092843055725, "avg_line_length": 42, "blob_id": "c47d1e720b3cf5e6c3cf5a0ae312aba5cd14096e", "content_id": "b6c96925505f88c0ba08072b0f76e996b6537792", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "permissive", "max_line_length": 42, "num_lines": 1, "path": "/src/satextractor/extractor/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .extractor import task_mosaic_patches\n" }, { "alpha_fraction": 0.5867133736610413, "alphanum_fraction": 0.5906212329864502, "avg_line_length": 27.763565063476562, "blob_id": "4a2075b5461a8094097194f0f3863fef6bcd4b71", "content_id": "6f536bc6ec68d1d2e6fcbd7824f312d4b213a7e2", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7421, "license_type": "permissive", "max_line_length": 96, "num_lines": 258, "path": "/src/satextractor/extractor/extractor.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import os\nfrom typing import Any\nfrom typing import Callable\nfrom typing import List\nfrom typing import Tuple\n\nimport numpy as np\nimport rasterio\nfrom affine import Affine\nfrom loguru import logger\nfrom osgeo import gdal\nfrom osgeo import osr\nfrom rasterio import warp\nfrom rasterio.crs import CRS\nfrom rasterio.merge import merge as riomerge\nfrom satextractor.models import ExtractionTask\nfrom satextractor.models import Tile\n\n\ndef get_window_union(\n tiles: List[Tile],\n ds: rasterio.io.DatasetReader,\n) -> rasterio.windows.Window:\n\n \"\"\"Get the window union to read all tiles from the geotiff.\n\n Args:\n tiles (List[Tile]): the tiles\n ds (rasterio.io.DatasetReader): the rasterio dataset to read (for the transform)\n\n Returns:\n rasterio.windows.Window: The union of all tile windows.\n \"\"\"\n\n windows = []\n\n for tile in tiles:\n\n bounds_arr_tile_crs = tile.bbox\n bounds_arr_rast_crs = warp.transform_bounds(\n ds.crs,\n CRS.from_epsg(tile.epsg),\n *bounds_arr_tile_crs,\n )\n\n window = rasterio.windows.from_bounds(*bounds_arr_rast_crs, ds.transform)\n\n windows.append(window)\n\n return rasterio.windows.union(windows)\n\n\ndef get_proj_win(tiles: List[Tile]) -> Tuple[int, int, int, int]:\n \"\"\"Get the projection bounds window of the tiles.\n\n Args:\n tiles (List[Tile]): the tiles\n\n Returns:\n [Tuple[int, int, int, int]]: upperleft_x,upperleft_y,lowerright_x,lowerright_y\n \"\"\"\n ulx = min([t.bbox[0] for t in tiles])\n uly = max([t.bbox[3] for t in tiles])\n lrx = max([t.bbox[2] for t in tiles])\n lry = min([t.bbox[1] for t in tiles])\n return int(ulx), int(uly), int(lrx), int(lry)\n\n\ndef get_tile_pixel_coords(tiles: List[Tile], raster_file: str) -> List[Tuple[int, int]]:\n \"\"\"Get the tile coord in pixels for the given file. Returns\n\n Args:\n tiles (List[Tile]): tile list\n file (str): the raster_file to get the coords from\n\n Returns:\n List[Tuple[int, int]]: The coords in pixels\n \"\"\"\n xs, ys = zip(*[(tile.bbox[0], tile.bbox[3]) for tile in tiles])\n\n with rasterio.open(raster_file) as ds:\n rows, cols = rasterio.transform.rowcol(ds.transform, xs, ys)\n\n return list(zip(rows, cols))\n\n\ndef download_and_extract_tiles_window_COG(\n fs: Any,\n task: ExtractionTask,\n resolution: int,\n) -> List[str]:\n \"\"\"Download and extract from the task assets the data for the window from each asset.\n\n Args:\n task (ExtractionTask): The extraction task\n resolution (int): The target resolution\n\n Returns:\n List[str]: A list of files that store the crops of the original assets\n \"\"\"\n\n # task tiles all have same CRS, so get their max extents and crs\n left, top, right, bottom = get_proj_win(task.tiles)\n epsg = task.tiles[0].epsg\n\n # set the transforms for the output file\n dst_transform = Affine(resolution, 0.0, left, 0.0, -resolution, top)\n out_shp = (int((right - left) / resolution), int((top - bottom) / resolution))\n\n outfiles = []\n\n band = task.band\n urls = [item.assets[band].href for item in task.item_collection.items]\n\n for ii, url in enumerate(urls):\n with fs.open(url) as f:\n with rasterio.open(f) as ds:\n window = get_window_union(task.tiles, ds)\n\n rst_arr = ds.read(\n 1,\n window=window,\n out_shape=out_shp,\n fill_value=0,\n ) # boundless=True?\n\n out_f = f\"{task.task_id}_{ii}.tif\"\n\n with rasterio.open(\n out_f,\n \"w\",\n driver=\"GTiff\",\n count=1,\n width=out_shp[0],\n height=out_shp[1],\n transform=dst_transform,\n crs=CRS.from_epsg(epsg),\n dtype=rst_arr.dtype,\n ) as dst:\n\n dst.write(rst_arr, indexes=1)\n\n outfiles.append(out_f)\n\n return outfiles\n\n\ndef download_and_extract_tiles_window(\n download_f: Callable,\n task: ExtractionTask,\n resolution: int,\n) -> List[str]:\n \"\"\"Download and extract from the task assets the window bounding the tiles.\n i.e a crop of the original assets will\n\n Args:\n download_f (Callable): The download function to use. It should return a BytesIO\n to read the content.\n task (ExtractionTask): The extraction task\n resolution (int): The target resolution\n\n Returns:\n List[str]: A list of files that store the crops of the original assets\n \"\"\"\n band = task.band\n urls = [item.assets[band].href for item in task.item_collection.items]\n\n epsg = task.tiles[0].epsg\n out_files = []\n for i, url in enumerate(urls):\n content = download_f(url)\n\n gdal.FileFromMemBuffer(\"/vsimem/content\", content.read())\n d = gdal.Open(\"/vsimem/content\", gdal.GA_Update)\n\n proj = osr.SpatialReference(wkt=d.GetProjection())\n proj = proj.GetAttrValue(\"AUTHORITY\", 1)\n d = None\n\n proj_win = get_proj_win(task.tiles)\n\n if int(proj) != epsg:\n file = gdal.Warp(\n f\"{task.task_id}_warp.vrt\",\n \"/vsimem/content\",\n dstSRS=f\"EPSG:{epsg}\",\n )\n else:\n file = \"/vsimem/content\"\n\n out_f = f\"{task.task_id}_{i}.jp2\"\n gdal.Translate(\n out_f,\n file,\n projWin=proj_win,\n projWinSRS=f\"EPSG:{epsg}\",\n xRes=resolution,\n yRes=resolution,\n )\n file = None\n out_files.append(out_f)\n return out_files\n\n\ndef task_mosaic_patches(\n cloud_fs: Any,\n download_f: Callable,\n task: ExtractionTask,\n method: str = \"first\",\n resolution: int = 10,\n dst_path=\"merged.jp2\",\n) -> List[np.ndarray]:\n \"\"\"Get tile patches from the mosaic of a given task\n\n Args:\n download_f (Callable): The function to download the task assets\n task (ExtractionTask): The task\n method (str, optional): The method to use while merging the assets. Defaults to \"first\".\n resolution (int, optional): The target resolution. Defaults to 10.\n dst_path (str): path to store the merged files\n\n Returns:\n List[np.ndarray]: The tile patches as numpy arrays\n \"\"\"\n\n if task.constellation == \"sentinel-2\":\n out_files = download_and_extract_tiles_window(download_f, task, resolution)\n else:\n out_files = download_and_extract_tiles_window_COG(cloud_fs, task, resolution)\n\n out_f = f\"{task.task_id}_{dst_path}\"\n datasets = [rasterio.open(f) for f in out_files]\n riomerge(datasets, method=method, dst_path=out_f)\n\n coords = get_tile_pixel_coords(task.tiles, out_f)\n patches = []\n bboxes = [t.bbox_size for t in task.tiles]\n\n with rasterio.open(out_f) as ds:\n # Loop through your list of coords\n for i, (py, px) in enumerate(coords):\n # Build a window\n w = ds.read(\n 1,\n window=rasterio.windows.Window(\n px,\n py,\n int(bboxes[i][0]) // resolution,\n int(bboxes[i][1]) // resolution,\n ),\n )\n patches.append(w)\n\n logger.info(\"Cleaning files.\")\n for f in out_files:\n os.remove(f)\n os.remove(out_f)\n return patches\n" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 49, "blob_id": "4f37bb23b96b6c38cf523bd1cbeda01f4a62da4c", "content_id": "f345c46cfa9124a22353970aa11a17066bcbdaaa", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "permissive", "max_line_length": 49, "num_lines": 1, "path": "/src/satextractor/preparer/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .preparer import create_zarr_patch_structure\n" }, { "alpha_fraction": 0.5828579664230347, "alphanum_fraction": 0.5842840671539307, "avg_line_length": 34.41414260864258, "blob_id": "7d83f0eb67399efadbb69f01dec96a838cb71480", "content_id": "f1415e7ca4187a19d0e575eb0a1cf1a8e3596180", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7012, "license_type": "permissive", "max_line_length": 94, "num_lines": 198, "path": "/src/satextractor/scheduler/scheduler.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from itertools import compress\nfrom typing import List\n\nimport geopandas as gpd\nimport pandas as pd\nimport pystac\nimport shapely\nfrom joblib import delayed\nfrom joblib import Parallel\nfrom loguru import logger\nfrom satextractor.models import ExtractionTask\nfrom satextractor.models import Tile\nfrom satextractor.models.constellation_info import BAND_INFO\nfrom satextractor.tiler import split_region_in_utm_tiles\nfrom satextractor.utils import get_dates_in_range\nfrom satextractor.utils import tqdm_joblib\nfrom sentinelhub import CRS\nfrom tqdm import tqdm\n\n\ndef get_scheduler(name, **kwargs):\n return eval(name)\n\n\ndef create_tasks_by_splits(\n tiles: List[Tile],\n split_m: int,\n item_collection: str,\n constellations: List[str],\n bands: List[str] = None,\n interval: int = 1,\n n_jobs: int = -1,\n verbose: int = 0,\n **kwargs,\n) -> List[ExtractionTask]:\n \"\"\"Group tiles in splits of given split_m size. It creates a task per split\n with the tiles contained by that split and the intersection with the\n stac items.\n An extraction task is created for each band listed as param (eo:bands extension otherwise)\n and for each revisit in the given date range\n\n\n Args:\n tiles (List[Tile]): The tiles to separate in zones\n split_m (int): the split square size in m,\n item_collection (str): The items geojson path containing the assets\n bands (List[str]): the bands to extract\n interval (int): the day intervale between revisits\n n_jobs (int): n_jobs used by joblib\n verbos (int): verbose for joblib\n\n\n Returns:\n List[ExtractionTask]: List of extraction tasks ready to deploy\n \"\"\"\n stac_items = pystac.ItemCollection.from_file(item_collection)\n\n logger.info(\"Loading items geojson...\")\n\n gdf = gpd.GeoDataFrame.from_file(item_collection)\n gdf.datetime = pd.to_datetime(gdf.datetime).dt.tz_localize(None)\n\n tiles_gdf = cluster_tiles_in_utm(tiles, split_m)\n\n logger.info(\"Creating extraction tasks for each constellations, date, and band ...\")\n tasks: List[ExtractionTask] = []\n\n task_tracker = 0\n\n for constellation in constellations:\n\n # Get all the date ranges for the given interval\n dates = get_dates_in_range(\n gdf.loc[gdf.constellation == constellation, \"datetime\"]\n .min()\n .to_pydatetime(),\n gdf.loc[gdf.constellation == constellation, \"datetime\"]\n .max()\n .to_pydatetime(),\n interval,\n )\n\n if bands is not None:\n run_bands = [\n b[\"band\"].name\n for kk, b in BAND_INFO[constellation].items()\n if b[\"band\"].name in bands\n ]\n else:\n run_bands = [b[\"band\"].name for kk, b in BAND_INFO[constellation].items()]\n\n logger.info(f\"Getting cluster item indexes for {constellation} in parallel...\")\n with tqdm_joblib(tqdm(desc=\"Extraction Tasks creation.\", total=len(dates))):\n cluster_items = Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(get_cluster_items_indexes)(\n gdf[\n (gdf.datetime >= start)\n & (gdf.datetime <= end)\n & (gdf.constellation == constellation)\n ],\n tiles_gdf,\n )\n for start, end in dates\n )\n\n for i, date_cluster_item in enumerate(cluster_items):\n for k, v in date_cluster_item.items():\n if v:\n c_tiles = tiles_gdf[tiles_gdf[\"cluster_id\"] == k]\n c_items_geom = gdf.iloc[v].unary_union\n t_indexes = c_tiles[\n c_tiles.geometry.apply(c_items_geom.contains)\n ].index\n if not t_indexes.empty:\n c_items = pystac.ItemCollection(\n [stac_items.items[item_index] for item_index in v],\n )\n region_tiles = [tiles[t_index] for t_index in t_indexes]\n sensing_time = dates[i][0]\n\n for b in run_bands:\n tasks.append(\n ExtractionTask(\n task_id=str(task_tracker),\n tiles=region_tiles,\n item_collection=c_items,\n band=b,\n constellation=constellation,\n sensing_time=sensing_time,\n ),\n )\n task_tracker += 1\n\n logger.info(f\"There are a total of {len(tasks)} tasks\")\n return tasks\n\n\ndef cluster_tiles_in_utm(tiles: List[Tile], split_m: int) -> gpd.GeoDataFrame:\n \"\"\"Group tiles in splits of given split_m size.\n\n\n Args:\n tiles (List[Tile]): The tiles to separate in zones\n split_m (int): the split square size in m,\n\n Returns:\n gpd.GeoDataFrame: The resulting geopandas df of the tiles and their clusters\n \"\"\"\n\n tiles_geom = gpd.GeoSeries([shapely.geometry.box(*t.bbox_wgs84) for t in tiles])\n\n # Split the tiles regions in UTM squares of size split_m\n logger.info(\"Creating multipolygon of the tiles geometries...\")\n tiles_geom_multi = shapely.geometry.MultiPolygon(tiles_geom.values)\n splits = split_region_in_utm_tiles(\n tiles_geom_multi,\n crs=CRS.WGS84,\n bbox_size=(split_m, split_m),\n )\n\n tiles_gdf = gpd.GeoDataFrame({\"geometry\": tiles_geom})\n for cluster_i, s in enumerate(splits):\n contained_tile_indexes = [i for i in range(len(tiles)) if s.contains(tiles[i])]\n tiles_gdf.loc[contained_tile_indexes, \"cluster_id\"] = cluster_i\n\n return tiles_gdf\n\n\ndef get_cluster_items_indexes(\n items_gdf: gpd.GeoDataFrame,\n tile_clusters: gpd.GeoDataFrame,\n) -> dict:\n \"\"\"Given an items geodataframe and a tile clusters geodataframe,\n return the items indexes that belong to each cluster.\n\n Args:\n items_gdf (gpd.GeoDataFrame): the items gdf\n tile_clusters (gpd.GeoDataFrame): the tile cluster gdf that should contain\n a cluster_id col\n\n Returns:\n dict: a dictionary where keys are clusters and values item indexes\n \"\"\"\n cluster_item_indexes = {}\n clusters = tile_clusters.cluster_id.unique()\n for c in clusters:\n s_geom_bounds = shapely.geometry.MultiPolygon(\n tile_clusters[tile_clusters.cluster_id == c].geometry.values,\n )\n prep_geom = shapely.ops.prep(s_geom_bounds)\n s = items_gdf.geometry.apply(prep_geom.intersects)\n\n # Select the tiles for that cluster\n region_items = list(compress(items_gdf.index.values.tolist(), s))\n\n cluster_item_indexes[c] = region_items\n\n return cluster_item_indexes\n" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 16, "blob_id": "e83ac452b2d7c66f47dd44d5cba6c3a4b8b7ff64", "content_id": "6939e320bf66b13dd6272566d478614024ecbc6f", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "permissive", "max_line_length": 31, "num_lines": 15, "path": "/src/satextractor/monitor/base.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from abc import ABC\nfrom abc import abstractmethod\n\n\nclass BaseMonitor(ABC):\n def __init(self, **kwargs):\n pass\n\n @abstractmethod\n def post_status(\n self,\n msg_type: str,\n msg_payload: str,\n ) -> bool:\n pass\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 33, "blob_id": "88dac1a43ae0fd55aba707a2e829b93152e8db76", "content_id": "4d54e45fec3bed331a5ed6b36619afdea7c6e925", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "permissive", "max_line_length": 33, "num_lines": 1, "path": "/src/satextractor/storer/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .storer import store_patches\n" }, { "alpha_fraction": 0.6274731755256653, "alphanum_fraction": 0.6280384659767151, "avg_line_length": 25.14285659790039, "blob_id": "039c1577e751443aa4bf5d3383f21531435bbd53", "content_id": "34e12141f45fedd81d52dee4e09b232b8cb403b6", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5307, "license_type": "permissive", "max_line_length": 99, "num_lines": 203, "path": "/src/satextractor/cli.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "\"\"\"\nModule that contains the command line app.\n\nWhy does this file exist, and why not put this in __main__?\n\n You might be tempted to import things from __main__ later, but that will cause\n problems: the code will get executed twice:\n\n - When you run `python -msatextractor` python will execute\n ``__main__.py`` as a script. That means there won't be any\n ``satextractor.__main__`` in ``sys.modules``.\n - When you import __main__ it will get executed again (as a module) because\n there's no ``satextractor.__main__`` in ``sys.modules``.\n\n Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration\n\"\"\"\nimport os\nimport pickle\n\nimport geopandas as gpd\nimport hydra\nfrom loguru import logger\nfrom omegaconf import DictConfig\n\n\ndef build(cfg):\n logger.info(f\"using {cfg.builder._target_} builder\")\n\n hydra.utils.call(cfg.builder, cfg)\n\n\ndef stac(cfg):\n logger.info(f\"using {cfg.stac._target_} stac creator.\")\n\n if os.path.exists(cfg.item_collection):\n logger.info(\n f\"stac item_collection already exists in {cfg.item_collection}. Skipping.\",\n )\n return\n\n gdf = gpd.read_file(cfg.gpd_input)\n shp = gdf.unary_union\n\n item_collection = hydra.utils.call(\n cfg.stac,\n credentials=cfg.credentials,\n region=shp,\n start_date=cfg.start_date,\n end_date=cfg.end_date,\n constellations=cfg.constellations,\n )\n item_collection.save_object(cfg.item_collection)\n\n\ndef tiler(cfg):\n\n logger.info(f\"using {cfg.tiler._target_} tiler\")\n\n if os.path.exists(cfg.tiles):\n logger.info(f\"Tilers already exists in {cfg.tiles}. Skipping.\")\n return\n\n logger.info(f\"loading vector file {cfg.gpd_input} and reducing geometries\")\n gdf = gpd.read_file(cfg.gpd_input)\n shp = gdf.unary_union\n\n logger.info(cfg.tiler)\n\n tiles = hydra.utils.instantiate(cfg.tiler, shp)\n\n logger.info(f\"Generated tile patches: {len(tiles)}\")\n\n with open(cfg.tiles, \"wb\") as f:\n pickle.dump(tiles, f)\n\n\ndef scheduler(cfg):\n\n logger.info(f\"using {cfg.scheduler._target_} scheduler\")\n\n if os.path.exists(cfg.extraction_tasks):\n logger.info(\n f\"Extraction tasks already exists in {cfg.extraction_tasks}. Skipping.\",\n )\n return\n\n logger.info(\"Loading tiles and generating tasks\")\n with open(cfg.tiles, \"rb\") as f:\n tiles = pickle.load(f)\n\n # Get the schedule function from a function dict\n # We have to do it this way, because Hydra converts dataclasses and attr classes to configs\n # And cannot be passed as arguments as-is\n schedule_f = hydra.utils.call(cfg.scheduler)\n extraction_tasks = schedule_f(\n tiles=tiles,\n item_collection=cfg.item_collection,\n constellations=cfg.constellations,\n **cfg.scheduler,\n )\n\n logger.info(f\"Generated Extraction Tasks: {len(extraction_tasks)}\")\n\n with open(cfg.extraction_tasks, \"wb\") as f:\n pickle.dump(extraction_tasks, f)\n\n\ndef preparer(cfg):\n\n logger.info(f\"using {cfg.preparer._target_} to prepare zarr archives\")\n\n extraction_tasks = pickle.load(open(cfg.extraction_tasks, \"rb\"))\n tiles = pickle.load(open(cfg.tiles, \"rb\"))\n\n hydra.utils.call(\n cfg.preparer,\n cfg.credentials,\n extraction_tasks,\n tiles,\n cfg.constellations,\n f\"{cfg.cloud.storage_prefix}/{cfg.cloud.storage_root}/{cfg.dataset_name}\",\n cfg.tiler.bbox_size,\n )\n\n\ndef deployer(cfg):\n logger.info(f\"using {cfg.deployer._target_} deployer\")\n\n extraction_tasks = pickle.load(open(cfg.extraction_tasks, \"rb\"))\n\n topic = f\"projects/{cfg.cloud.project}/topics/{'-'.join([cfg.cloud.user_id, 'stacextractor'])}\"\n\n hydra.utils.call(\n cfg.deployer,\n cfg.credentials,\n extraction_tasks,\n f\"{cfg.cloud.storage_prefix}/{cfg.cloud.storage_root}/{cfg.dataset_name}\",\n cfg.preparer.chunk_size,\n topic,\n )\n\n # extraction_tasks_path = os.path.join(\n # \".\", cfg.dataset_name + \"_extraction_tasks.pkl\",\n # )\n\n # logger.info(f\"deploying on {cfg.deployer._target_} to {cfg.cloud.storage_root}\",)\n\n # extraction_tasks = pickle.load(open(extraction_tasks_path, \"rb\"))\n\n # # check tiles meet spec\n # for t in extraction_tasks:\n # assert isinstance(t, ExtractionTask), \"Task does not match ExtractionTask spec\"\n\n # hydra.utils.instantiate(cfg.deployer, extraction_tasks)\n\n\[email protected](config_path=\"./../../conf\", config_name=\"config\")\ndef main(cfg: DictConfig):\n \"\"\"\n Args:\n cfg: a dict config object\n\n Returns:\n int: A return code\n\n Does stuff.\n \"\"\"\n\n for t in cfg.tasks:\n assert t in [\n \"build\",\n \"stac\",\n \"tile\",\n \"schedule\",\n \"prepare\",\n \"deploy\",\n ], \"valid tasks are [build, stac, tile, schedule, prepare, deploy]\"\n\n logger.info(f\"Running tasks {cfg.tasks}\")\n\n if \"build\" in cfg.tasks:\n build(cfg)\n\n if \"stac\" in cfg.tasks:\n stac(cfg)\n\n if \"tile\" in cfg.tasks:\n tiler(cfg)\n\n if \"schedule\" in cfg.tasks:\n scheduler(cfg)\n\n if \"prepare\" in cfg.tasks:\n preparer(cfg)\n\n if \"deploy\" in cfg.tasks:\n deployer(cfg)\n\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6340718269348145, "alphanum_fraction": 0.6432391405105591, "avg_line_length": 30.16666603088379, "blob_id": "c94ce8677ab0ef6875cbe4e6efcbb61ed3938ac0", "content_id": "5304cea00c93541a02f040acba1f84f34fc20c3b", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1309, "license_type": "permissive", "max_line_length": 109, "num_lines": 42, "path": "/src/satextractor/tiler/tiler.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from typing import List\nfrom typing import Tuple\nfrom typing import Union\n\nimport shapely\nfrom satextractor.models import Tile\nfrom sentinelhub import CRS\nfrom sentinelhub import UtmZoneSplitter\n\n\ndef split_region_in_utm_tiles(\n region: Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon],\n crs: CRS = CRS.WGS84,\n bbox_size: Tuple[int, int] = (10000, 10000),\n **kwargs,\n) -> List[Tile]:\n \"\"\"Split a given geometry in squares measured in meters.\n It splits the region in utm grid and the convert back to given crs.\n\n Args:\n region (UnionList[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]): The region to split from\n bbox_size (Tuple[int, int]): square bbox in meters\n\n Returns:\n [List[Tile]]: The Tiles representing each of the boxes\n \"\"\"\n utm_zone_splitter = UtmZoneSplitter([region], crs, bbox_size)\n crs_bboxes = utm_zone_splitter.get_bbox_list()\n info_bboxes = utm_zone_splitter.get_info_list()\n\n tiles = [\n Tile(\n id=\"_\".join(\n [str(box.crs.epsg), str(info[\"index_x\"]), str(info[\"index_y\"])],\n ),\n epsg=box.crs.epsg,\n bbox=(box.min_x, box.min_y, box.max_x, box.max_y),\n )\n for info, box in zip(info_bboxes, crs_bboxes)\n ]\n\n return tiles\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 44, "blob_id": "242adb72cc5c6aabb68a070807f8b48f042967de", "content_id": "291d7795a276f54915ea30b9e2a876b3400978a4", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "permissive", "max_line_length": 44, "num_lines": 1, "path": "/src/satextractor/tiler/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .tiler import split_region_in_utm_tiles\n" }, { "alpha_fraction": 0.5119248628616333, "alphanum_fraction": 0.5150566101074219, "avg_line_length": 28.649999618530273, "blob_id": "b7b088efe73f2d86faf3f3b655cade1ced320a9b", "content_id": "ddbe20d58809ad3d7abd3f89207860227850ed3b", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8302, "license_type": "permissive", "max_line_length": 88, "num_lines": 280, "path": "/ui/dashboard/callbacks.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import json\nimport math\n\nimport dash\nimport dash_bootstrap_components as dbc\nimport yaml\nfrom dash import html\nfrom dash.dependencies import ALL\nfrom dash.dependencies import Input\nfrom dash.dependencies import MATCH\nfrom dash.dependencies import Output\nfrom dash.dependencies import State\nfrom dash.exceptions import PreventUpdate\nfrom satextractor.monitors import monitors\n\ncfg = yaml.load(open(\"./conf/config.yaml\", \"r\"), Loader=yaml.SafeLoader)\ncfg_cloud = yaml.load(open(\"./conf/cloud/gcp.yaml\", \"r\"), Loader=yaml.SafeLoader)\ncfg_monitor = yaml.load(open(\"./conf/monitor/gcp.yaml\", \"r\"), Loader=yaml.SafeLoader)\n\nmonitor_api = monitors[cfg_monitor[\"_target_\"]](**cfg_cloud, **cfg_monitor)\n\n\ndef tail(f, lines=20):\n total_lines_wanted = lines\n\n BLOCK_SIZE = 1024\n f.seek(0, 2)\n block_end_byte = f.tell()\n lines_to_go = total_lines_wanted\n block_number = -1\n blocks = []\n while lines_to_go > 0 and block_end_byte > 0:\n if block_end_byte - BLOCK_SIZE > 0:\n f.seek(block_number * BLOCK_SIZE, 2)\n blocks.append(f.read(BLOCK_SIZE))\n else:\n f.seek(0, 0)\n blocks.append(f.read(block_end_byte))\n lines_found = blocks[-1].count(b\"\\n\")\n lines_to_go -= lines_found\n block_end_byte -= BLOCK_SIZE\n block_number -= 1\n all_read_text = b\"\".join(reversed(blocks))\n return b\"\\n\".join(all_read_text.splitlines()[-total_lines_wanted:])\n\n\ndef build_row(job_df, job_id, row, total_tasks):\n def try_get(row, key):\n try:\n return row[key]\n except Exception:\n return \"\"\n\n def format_int(val):\n if math.isnan(val):\n return \"nan\"\n elif isinstance(val, int):\n return f\"{val:d}\"\n elif isinstance(val, float):\n return f\"{int(val):d}\"\n else:\n return str(val)\n\n elements = [html.Td(job_id, className=\"job-table-item\")]\n\n for key in [\"dataset_name\", \"task_type\", \"constellation\"]:\n elements.append(html.Td(job_df.loc[job_id, key], className=\"job-table-item\"))\n\n for key in [\"STARTED\", \"FINISHED\", \"FAILED\"]:\n elements.append(\n html.Td(\n format_int(row[key]) + \" / \" + format_int(total_tasks),\n className=\"job-table-item\",\n ),\n )\n\n button_div = html.Td(\n dbc.Row(\n [\n html.Div(\n dbc.Button(\n children=[html.I(className=\"fas fa-redo-alt\")],\n id={\"type\": \"rerun-btn\", \"index\": job_id},\n ),\n id=f\"tooltip-wrapper-rerun-{job_id}\",\n ),\n html.Div(\n dbc.Button(\n children=[html.I(className=\"fas fa-exclamation-triangle\")],\n id={\"type\": \"stacktrace-btn\", \"index\": job_id},\n ),\n id=f\"tooltip-wrapper-stacktrace-{job_id}\",\n ),\n ],\n ),\n className=\"job-table-item\",\n )\n\n elements.append(button_div)\n\n return html.Tr(elements)\n\n\ndef build_table(job_df, count_df, total_tasks_df):\n\n rows = [\n build_row(job_df, idx, row, total_tasks_df.loc[idx, \"count\"])\n for idx, row in count_df.iterrows()\n ]\n\n header = html.Tr(\n [\n html.Th(h, className=\"job-table-item\")\n for h in [\n \"job_id\",\n \"dataset_name\",\n \"task_type\",\n \"constellation\",\n \"STATUS:STARTED\",\n \"STATUS:FINISHED\",\n \"STATUS:FAILED\",\n \"action\",\n ]\n ],\n )\n\n tooltips = []\n for idx, row in count_df.iterrows():\n tooltips.append(\n dbc.Tooltip(\n \"rerun failed tasks\",\n target=f\"tooltip-wrapper-rerun-{idx}\",\n placement=\"top\",\n ),\n )\n tooltips.append(\n dbc.Tooltip(\n \"query stacktraces\",\n target=f\"tooltip-wrapper-stacktrace-{idx}\",\n placement=\"top\",\n ),\n )\n\n return html.Div(\n [html.Table([header] + rows, id=\"job-table\", className=\"job-table\")] + tooltips,\n )\n\n\ndef build_collapse_stacktrace(task_id, st):\n\n row = dbc.Row(\n [\n dbc.Col(\n [\n html.A(task_id, id={\"type\": \"stacktrace-button\", \"index\": task_id}),\n dbc.Collapse(\n dbc.Card(dbc.CardBody(st)),\n id={\"type\": \"stacktrace-collapse\", \"index\": task_id},\n is_open=False,\n ),\n ],\n ),\n ],\n )\n\n return row\n\n\ndef register_callbacks(dashapp):\n \"\"\"Register callbacks to the dashapp:\n populate logs, populate jobs-table, populate stacktrace, rerun jobs\"\"\"\n\n @dashapp.callback(\n Output(\"logs-div\", \"children\"),\n Input(\"logging-interval\", \"n_intervals\"),\n )\n def populate_logs(n):\n\n lines = tail(open(cfg[\"log_path\"], \"rb\"), lines=5)\n\n return [dbc.Row(html.Span(el)) for el in lines.decode(\"utf-8\").split(\"\\n\")]\n\n @dashapp.callback(\n Output(\"jobs-div\", \"children\"),\n Input(\"table-interval\", \"n_intervals\"),\n )\n def populate_jobs_table(n):\n\n job_df = monitor_api.get_job_parameters().set_index(\"job_id\")\n\n count_df = monitor_api.get_current_tasks_status()\n\n count_df = count_df.set_index([\"job_id\", \"msg_type\"]).unstack()\n count_df.columns = count_df.columns.droplevel(0)\n\n for key in [\"PARAMS\", \"STARTED\", \"FINISHED\", \"FAILED\"]:\n if key not in count_df:\n count_df[key] = math.nan\n\n total_tasks_df = monitor_api.get_total_tasks_by_job().set_index(\"job_id\")\n\n return build_table(job_df, count_df, total_tasks_df)\n\n @dashapp.callback(\n Output({\"type\": \"stacktrace-collapse\", \"index\": MATCH}, \"is_open\"),\n Input({\"type\": \"stacktrace-button\", \"index\": MATCH}, \"n_clicks\"),\n State({\"type\": \"stacktrace-collapse\", \"index\": MATCH}, \"is_open\"),\n )\n def open_st_collapse(n, is_open):\n if n:\n return not is_open\n else:\n return is_open\n\n @dashapp.callback(\n Output(\"stacktrace-div\", \"children\"),\n Input({\"type\": \"stacktrace-btn\", \"index\": ALL}, \"n_clicks\"),\n State(\"stacktrace-div\", \"children\"),\n )\n def query_stacktraces(ns, curren_div):\n\n ctx = dash.callback_context\n\n if not ctx.triggered:\n print(\"stacktrace - ctx not triggered\")\n raise PreventUpdate\n\n else:\n\n if ns is None:\n print(\"stacktrace - ns is None\")\n raise PreventUpdate\n elif sum([(n is not None) for n in ns]) == 0:\n print(\"stacktrace - all ns are None\")\n raise PreventUpdate\n else:\n job_id = json.loads(ctx.triggered[0][\"prop_id\"].split(\".\")[0])[\"index\"]\n\n result = monitor_api.get_stacktraces(job_id)\n\n rows = [\n build_collapse_stacktrace(task_id, st)\n for task_id, st in result[\n [\"task_id\", \"msg_payload\"]\n ].values.tolist()\n ]\n\n print(\"stacktrace - built rows\")\n\n return rows\n\n @dashapp.callback(\n [Output(\"rerun-alert\", \"is_open\"), Output(\"rerun-alert\", \"children\")],\n Input({\"type\": \"rerun-btn\", \"index\": ALL}, \"n_clicks\"),\n )\n def cb_rerun_failed_tasks(ns):\n\n ctx = dash.callback_context\n\n if not ctx.triggered:\n print(\"rerun - not triggered\")\n return [False, \"\"]\n\n elif ns is None:\n print(\"rerun - ns is None\")\n return [False, \"\"]\n\n elif sum([(n is not None) for n in ns]) == 0:\n print(\"rerun - all ns are None\")\n return [False, \"\"]\n\n else:\n\n print(\"rerun - rerunning\")\n\n job_id = json.loads(ctx.triggered[0][\"prop_id\"].split(\".\")[0])[\"index\"]\n\n n_reruns = monitor_api.rerun_failed_tasks(job_id)\n\n return [True, f\"Relaunched {n_reruns} failed tasks for job_id {job_id}\"]\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 33, "blob_id": "d9975afa1853c0fe0caef378525b29b7ab97690c", "content_id": "81db16fe7ad9dadf969f1d74d5cc6ad14ee5eb2e", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "permissive", "max_line_length": 41, "num_lines": 3, "path": "/src/satextractor/models/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .constellation_info import BAND_INFO\nfrom .models import ExtractionTask\nfrom .models import Tile\n" }, { "alpha_fraction": 0.5022599101066589, "alphanum_fraction": 0.5152542591094971, "avg_line_length": 27.54838752746582, "blob_id": "761ceb8525ff4703991a48048e41719a0e32658f", "content_id": "1a34f78adad8e55b408d179413d1897d179cb31c", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "permissive", "max_line_length": 81, "num_lines": 62, "path": "/src/satextractor/preparer/preparer.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import numpy as np\nimport zarr\n\n\ndef create_zarr_patch_structure(\n fs_mapper,\n storage_path,\n tile_id,\n patch_size,\n chunk_size,\n sensing_times,\n constellation,\n bands,\n):\n if not sensing_times.size == 0:\n patch_size_pixels = patch_size // min(b[\"gsd\"] for _, b in bands.items())\n\n patch_constellation_path = f\"{storage_path}/{tile_id}/{constellation}\"\n zarr.open(fs_mapper(patch_constellation_path), mode=\"a\")\n\n patch_path = f\"{patch_constellation_path}/data\"\n zarr.open_array(\n fs_mapper(patch_path),\n \"w\",\n shape=(\n len(sensing_times),\n len(bands),\n int(patch_size_pixels),\n int(patch_size_pixels),\n ),\n chunks=(1, 1, int(chunk_size), int(chunk_size)),\n dtype=np.uint16,\n )\n\n mask_path = f\"{patch_constellation_path}/mask\"\n zarr.open_array(\n fs_mapper(mask_path),\n \"w\",\n shape=(len(sensing_times), len(bands)),\n chunks=(1, 1),\n dtype=np.uint8,\n )\n\n percentiles_path = f\"{patch_constellation_path}/percentiles_0to100_5incr\"\n zarr.open_array(\n fs_mapper(percentiles_path),\n \"w\",\n shape=(len(sensing_times), len(bands), 21),\n chunks=(1, 1, 21),\n dtype=np.float32,\n )\n\n # Create timestamps array\n timestamps_path = f\"{patch_constellation_path}/timestamps\"\n z_dates = zarr.open_array(\n fs_mapper(f\"{timestamps_path}\"),\n mode=\"w\",\n shape=(len(sensing_times)),\n chunks=(len(sensing_times)),\n dtype=\"<U27\",\n )\n z_dates[:] = sensing_times\n" }, { "alpha_fraction": 0.5333263874053955, "alphanum_fraction": 0.5359340906143188, "avg_line_length": 27.7897891998291, "blob_id": "8742f29ef3c6ea191d706871cb3abbb53d821448", "content_id": "0f5550323db053eff72399ed728fae09aa7486b0", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9587, "license_type": "permissive", "max_line_length": 104, "num_lines": 333, "path": "/src/satextractor/builder/gcp_builder.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "\"\"\"\nBuild the cloud functions.\n1. Package the functions in zip and write them to the bucket\n2. Deploy using function with the bucket as source\nhlpful: https://stackoverflow.com/questions/47376380/create-google-cloud-function-using-api-in-python\n\"\"\"\nimport json\nimport subprocess\nfrom subprocess import run\n\nfrom google.api_core.exceptions import NotFound\nfrom google.cloud import bigquery\nfrom google.cloud import pubsub_v1 as pubsub\nfrom loguru import logger\n\n\ndef build_gcp(cfg):\n\n builder = BuildGCP(\n credentials=cfg.credentials,\n **cfg.cloud,\n **cfg.builder,\n )\n\n builder.build()\n\n return 1\n\n\nclass BuildGCP:\n def __init__(\n self,\n project,\n region,\n storage_root,\n credentials,\n user_id,\n **kwargs,\n ):\n\n self.project = project\n self.region = region\n self.dest_bucket = storage_root.split(\"/\")[0]\n self.gcp_credentials = credentials\n self.user_id = user_id\n\n if \"europe\" in self.region:\n self.image_region_code = \"eu.gcr.io\"\n elif \"america\" in self.region or self.region.split(\"-\") == \"us\":\n self.image_region_code = \"us.gcr.io\"\n elif \"asia\" in self.region or \"australia\" in self.region:\n self.image_region_code = \"asia.gcr.io\"\n else:\n self.image_region_code = \"gcr.io\"\n\n def build(self):\n\n logger.info(\"building docker image\")\n self.build_docker_image()\n logger.info(\"building pubsub topic\")\n self.build_pubsub_topic()\n logger.info(\"building tracking tables\")\n self.build_tracking_tables()\n logger.info(\"building cloud run service\")\n self.build_cloudrun_service()\n\n return 1\n\n def bq_dataset_exists(self, client, name):\n try:\n client.get_dataset(name)\n return True\n except NotFound:\n return False\n\n def bq_table_exists(self, client, name):\n try:\n client.get_table(name)\n return True\n except NotFound:\n return False\n\n def build_tracking_tables(self):\n\n bq_client = bigquery.Client.from_service_account_json(self.gcp_credentials)\n\n logger.info(\"Creating bigquery table to monitor tasks\")\n\n # check if dataset exists\n task_tracking_dataset = \".\".join(\n [\n self.project,\n \"satextractor\",\n ],\n )\n\n task_tracking_table = \".\".join(\n [\n self.project,\n \"satextractor\",\n self.user_id,\n ],\n )\n self.task_tracking_table = task_tracking_table\n\n if not self.bq_dataset_exists(bq_client, task_tracking_dataset):\n logger.info(f\"Creating Dataset {task_tracking_dataset}\")\n dataset = bigquery.Dataset(task_tracking_dataset)\n bq_client.create_dataset(dataset)\n logger.info(\"Created Dataset\")\n\n if not self.bq_table_exists(bq_client, task_tracking_table):\n logger.info(f\"Creating Table {task_tracking_table}\")\n\n schema = [\n bigquery.SchemaField(\"timestamp\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"job_id\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"task_id\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"storage_gs_path\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"msg_type\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"msg_payload\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"dataset_name\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"constellation\", \"STRING\", mode=\"REQUIRED\"),\n ]\n\n table = bigquery.Table(task_tracking_table, schema=schema)\n table = bq_client.create_table(table) # Make an API request.\n logger.info(\n f\"created table {table.project}.{table.dataset_id}.{table.table_id}\",\n )\n\n return 1\n\n def build_docker_image(self):\n logger.info(\"Building docker image...\")\n\n cmd = [\n \"gcloud\",\n \"builds\",\n \"submit\",\n \"--tag\",\n f\"{self.image_region_code}/{self.project}/{self.user_id}-stacextractor\",\n ]\n\n p = run(cmd, text=True, stdout=subprocess.DEVNULL)\n p.check_returncode()\n\n return 1\n\n def build_pubsub_topic(self):\n\n # build pubsub\n publisher = pubsub.PublisherClient.from_service_account_file(\n self.gcp_credentials,\n )\n\n topic_name = \"projects/{project_id}/topics/{topic}\".format(\n project_id=self.project,\n topic=\"-\".join([self.user_id, \"stacextractor\"]),\n )\n\n dlq_topic_name = \"projects/{project_id}/topics/{topic}\".format(\n project_id=self.project,\n topic=\"-\".join([self.user_id, \"stacextractor-dql\"]),\n )\n\n self.topic_name = topic_name\n self.dlq_topic_name = dlq_topic_name\n\n # check if topic exists\n existing_topics = publisher.list_topics(project=f\"projects/{self.project}\")\n existing_topic_names = [t.name for t in existing_topics]\n\n if topic_name not in existing_topic_names:\n publisher.create_topic(name=topic_name)\n\n if dlq_topic_name not in existing_topic_names:\n publisher.create_topic(name=dlq_topic_name)\n\n return 1\n\n def build_cloudrun_service(self):\n\n # https://cloud.google.com/run/docs/tutorials/pubsub\n\n # deploy the image\n # gcloud run deploy pubsub-tutorial --image gcr.io/PROJECT_ID/pubsub --no-allow-unauthenticated\n\n logger.info(\"deploying image\")\n\n cmd = [\n \"gcloud\",\n \"run\",\n \"deploy\",\n f\"{self.user_id}-stacextractor\",\n \"--region\",\n f\"{self.region}\",\n \"--image\",\n f\"{self.image_region_code}/{self.project}/{self.user_id}-stacextractor\",\n \"--update-env-vars\",\n f\"MONITOR_TABLE={self.task_tracking_table}\",\n \"--no-allow-unauthenticated\",\n \"--memory\",\n \"4G\",\n \"--timeout\",\n \"15m\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n # subscribe the credentialed service account to the pubsub topic\n\n # gcloud run services add-iam-policy-binding pubsub-tutorial \\\n # --member=serviceAccount:cloud-run-pubsub-invoker@PROJECT_ID.iam.gserviceaccount.com \\\n # --role=roles/run.invoker\n\n service_account_email = json.load(open(self.gcp_credentials, \"r\"))[\n \"client_email\"\n ]\n\n logger.info(\"binding service account\")\n\n cmd = [\n \"gcloud\",\n \"run\",\n \"services\",\n \"add-iam-policy-binding\",\n f\"{self.user_id}-stacextractor\",\n f\"--member=serviceAccount:{service_account_email}\",\n \"--role=roles/run.invoker\",\n \"--region\",\n f\"{self.region}\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n # get the service endpoint url\n\n logger.info(\"get service endpoint url\")\n\n cmd = [\n \"gcloud\",\n \"run\",\n \"services\",\n \"describe\",\n f\"{self.user_id}-stacextractor\",\n \"--platform\",\n \"managed\",\n \"--region\",\n f\"{self.region}\",\n \"--format\",\n \"value(status.url)\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n url = p.stdout.strip()\n print(url)\n\n logger.info(\"bind the topic to the endpoint\")\n\n cmd = [\n \"gcloud\",\n \"pubsub\",\n \"subscriptions\",\n \"create\",\n f\"{self.user_id}-stacextractor\",\n \"--topic\",\n f\"{self.topic_name}\",\n f\"--push-endpoint={url}/\",\n f\"--push-auth-service-account={service_account_email}\",\n \"--ack-deadline\",\n \"600\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n logger.info(\"adding deadletter\")\n\n cmd = [\n \"gcloud\",\n \"pubsub\",\n \"subscriptions\",\n \"update\",\n f\"{self.user_id}-stacextractor\",\n \"--dead-letter-topic\",\n f\"{self.dlq_topic_name}\",\n \"--max-delivery-attempts\",\n \"5\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n logger.info(\"adding deadletter permissions\")\n cmd = [\n \"gcloud\",\n \"pubsub\",\n \"topics\",\n \"add-iam-policy-binding\",\n f\"{self.dlq_topic_name}\",\n f\"--member=serviceAccount:{service_account_email}\",\n \"--role=roles/pubsub.publisher\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n cmd = [\n \"gcloud\",\n \"pubsub\",\n \"subscriptions\",\n \"add-iam-policy-binding\",\n f\"{self.user_id}-stacextractor\",\n f\"--member=serviceAccount:{service_account_email}\",\n \"--role=roles/pubsub.subscriber\",\n ]\n\n p = run(cmd, capture_output=True, text=True)\n print(p.stdout)\n print(p.stderr)\n\n return 1\n" }, { "alpha_fraction": 0.380206435918808, "alphanum_fraction": 0.4417729079723358, "avg_line_length": 23.879154205322266, "blob_id": "a347068db3e5aaf2eebfca7e4fdc889609bae42c", "content_id": "272c26778f2e03096ae9af4b857f43868d0fd56a", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8235, "license_type": "permissive", "max_line_length": 88, "num_lines": 331, "path": "/src/satextractor/models/constellation_info.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from pystac import MediaType\nfrom pystac.extensions.eo import Band\n\nMEDIA_TYPES = {\n \"sentinel-2\": MediaType.JPEG2000,\n \"landsat-5\": MediaType.GEOTIFF,\n \"landsat-7\": MediaType.GEOTIFF,\n \"landsat-8\": MediaType.GEOTIFF,\n}\n\nLANDSAT_PROPERTIES = {\n \"landsat-5\": {\"DATA_TYPE\": \"L1TP\", \"SENSOR_ID\": \"TM\"},\n \"landsat-7\": {\"DATA_TYPE\": \"L1TP\", \"SENSOR_ID\": \"ETM\"},\n \"landsat-8\": {\"DATA_TYPE\": \"L1TP\", \"SENSOR_ID\": \"OLI_TIRS\"},\n}\n\n\nLANDSAT8_BAND_INFO = {\n \"B1\": {\n \"band\": Band.create(\n name=\"B1\",\n common_name=\"coastal\",\n center_wavelength=0.48,\n full_width_half_max=0.02,\n ),\n \"gsd\": 30.0,\n },\n \"B2\": {\n \"band\": Band.create(\n name=\"B2\",\n common_name=\"blue\",\n center_wavelength=0.44,\n full_width_half_max=0.06,\n ),\n \"gsd\": 30.0,\n },\n \"B3\": {\n \"band\": Band.create(\n name=\"B3\",\n common_name=\"green\",\n center_wavelength=0.56,\n full_width_half_max=0.06,\n ),\n \"gsd\": 30.0,\n },\n \"B4\": {\n \"band\": Band.create(\n name=\"B4\",\n common_name=\"red\",\n center_wavelength=0.65,\n full_width_half_max=0.04,\n ),\n \"gsd\": 30.0,\n },\n \"B5\": {\n \"band\": Band.create(\n name=\"B5\",\n common_name=\"nir\",\n center_wavelength=0.86,\n full_width_half_max=0.03,\n ),\n \"gsd\": 30.0,\n },\n \"B6\": {\n \"band\": Band.create(\n name=\"B6\",\n common_name=\"swir16\",\n center_wavelength=1.6,\n full_width_half_max=0.08,\n ),\n \"gsd\": 30.0,\n },\n \"B7\": {\n \"band\": Band.create(\n name=\"B7\",\n common_name=\"swir22\",\n center_wavelength=2.2,\n full_width_half_max=0.2,\n ),\n \"gsd\": 30.0,\n },\n \"B8\": {\n \"band\": Band.create(\n name=\"B8\",\n common_name=\"pan\",\n center_wavelength=0.59,\n full_width_half_max=0.18,\n ),\n \"gsd\": 15.0,\n },\n \"B9\": {\n \"band\": Band.create(\n name=\"B9\",\n common_name=\"cirrus\",\n center_wavelength=1.37,\n full_width_half_max=0.02,\n ),\n \"gsd\": 30.0,\n },\n \"B10\": {\n \"band\": Band.create(\n name=\"B10\",\n common_name=\"lwir11\",\n center_wavelength=10.9,\n full_width_half_max=0.8,\n ),\n \"gsd\": 100.0,\n },\n \"B11\": {\n \"band\": Band.create(\n name=\"B11\",\n common_name=\"lwir12\",\n center_wavelength=12.0,\n full_width_half_max=1.0,\n ),\n \"gsd\": 100.0,\n },\n}\n\nLANDSAT7_BAND_INFO = {\n \"B1\": {\n \"band\": Band.create(\n name=\"B1\",\n common_name=\"blue\",\n center_wavelength=0.485,\n full_width_half_max=0.035,\n ),\n \"gsd\": 30.0,\n },\n \"B2\": {\n \"band\": Band.create(\n name=\"B2\",\n common_name=\"green\",\n center_wavelength=0.56,\n full_width_half_max=0.04,\n ),\n \"gsd\": 30.0,\n },\n \"B3\": {\n \"band\": Band.create(\n name=\"B3\",\n common_name=\"red\",\n center_wavelength=0.66,\n full_width_half_max=0.03,\n ),\n \"gsd\": 30.0,\n },\n \"B4\": {\n \"band\": Band.create(\n name=\"B4\",\n common_name=\"near infrared\",\n center_wavelength=0.835,\n full_width_half_max=0.065,\n ),\n \"gsd\": 30.0,\n },\n \"B5\": {\n \"band\": Band.create(\n name=\"B5\",\n common_name=\"shortwave infrared 1\",\n center_wavelength=1.65,\n full_width_half_max=0.10,\n ),\n \"gsd\": 30.0,\n },\n \"B6_VCID_1\": {\n \"band\": Band.create(\n name=\"B6_VCID_1\",\n common_name=\"low-gain thermal infrared 1\",\n center_wavelength=11.45,\n full_width_half_max=1.05,\n ),\n \"gsd\": 60.0,\n },\n \"B6_VCID_2\": {\n \"band\": Band.create(\n name=\"B6_VCID_2\",\n common_name=\"high-gain thermal infrared 2\",\n center_wavelength=11.45,\n full_width_half_max=1.05,\n ),\n \"gsd\": 60.0,\n },\n \"B7\": {\n \"band\": Band.create(\n name=\"B7\",\n common_name=\"shortwave infrared 2\",\n center_wavelength=2.215,\n full_width_half_max=0.135,\n ),\n \"gsd\": 30.0,\n },\n \"B8\": {\n \"band\": Band.create(\n name=\"B8\",\n common_name=\"panchromatic\",\n center_wavelength=0.71,\n full_width_half_max=0.24,\n ),\n \"gsd\": 15.0,\n },\n}\n\nLANDSAT5_BAND_INFO = {\n \"B1\": {\n \"band\": Band.create(\n name=\"B1\",\n common_name=\"blue\",\n center_wavelength=0.485,\n full_width_half_max=0.035,\n ),\n \"gsd\": 30.0,\n },\n \"B2\": {\n \"band\": Band.create(\n name=\"B2\",\n common_name=\"green\",\n center_wavelength=0.56,\n full_width_half_max=0.04,\n ),\n \"gsd\": 30.0,\n },\n \"B3\": {\n \"band\": Band.create(\n name=\"B3\",\n common_name=\"red\",\n center_wavelength=0.66,\n full_width_half_max=0.03,\n ),\n \"gsd\": 30.0,\n },\n \"B4\": {\n \"band\": Band.create(\n name=\"B4\",\n common_name=\"near infrared\",\n center_wavelength=0.835,\n full_width_half_max=0.065,\n ),\n \"gsd\": 30.0,\n },\n \"B5\": {\n \"band\": Band.create(\n name=\"B5\",\n common_name=\"shortwave infrared 1\",\n center_wavelength=1.65,\n full_width_half_max=0.10,\n ),\n \"gsd\": 30.0,\n },\n \"B6\": {\n \"band\": Band.create(\n name=\"B6\",\n common_name=\"thermal infrared 1\",\n center_wavelength=11.45,\n full_width_half_max=1.05,\n ),\n \"gsd\": 60.0,\n },\n \"B7\": {\n \"band\": Band.create(\n name=\"B7\",\n common_name=\"shortwave infrared 2\",\n center_wavelength=2.215,\n full_width_half_max=0.135,\n ),\n \"gsd\": 30.0,\n },\n}\n\n\nSENTINEL2_BAND_INFO = {\n \"B01\": {\n \"band\": Band.create(name=\"B01\", common_name=\"coastal\", center_wavelength=0.443),\n \"gsd\": 60.0,\n },\n \"B02\": {\n \"band\": Band.create(name=\"B02\", common_name=\"blue\", center_wavelength=0.490),\n \"gsd\": 10.0,\n },\n \"B03\": {\n \"band\": Band.create(name=\"B03\", common_name=\"green\", center_wavelength=0.560),\n \"gsd\": 10.0,\n },\n \"B04\": {\n \"band\": Band.create(name=\"B04\", common_name=\"red\", center_wavelength=0.665),\n \"gsd\": 10.0,\n },\n \"B05\": {\n \"band\": Band.create(name=\"B05\", common_name=\"rededge\", center_wavelength=0.705),\n \"gsd\": 20.0,\n },\n \"B06\": {\n \"band\": Band.create(name=\"B06\", common_name=\"rededge\", center_wavelength=0.740),\n \"gsd\": 20.0,\n },\n \"B07\": {\n \"band\": Band.create(name=\"B07\", common_name=\"rededge\", center_wavelength=0.783),\n \"gsd\": 20.0,\n },\n \"B08\": {\n \"band\": Band.create(name=\"B08\", common_name=\"nir\", center_wavelength=0.842),\n \"gsd\": 10.0,\n },\n \"B8A\": {\n \"band\": Band.create(name=\"B8A\", common_name=\"nir08\", center_wavelength=0.865),\n \"gsd\": 20.0,\n },\n \"B09\": {\n \"band\": Band.create(name=\"B09\", common_name=\"nir09\", center_wavelength=0.945),\n \"gsd\": 60.0,\n },\n \"B10\": {\n \"band\": Band.create(name=\"B10\", common_name=\"cirrus\", center_wavelength=1.375),\n \"gsd\": 60.0,\n },\n \"B11\": {\n \"band\": Band.create(name=\"B11\", common_name=\"swir16\", center_wavelength=1.610),\n \"gsd\": 20.0,\n },\n \"B12\": {\n \"band\": Band.create(name=\"B12\", common_name=\"swir22\", center_wavelength=2.190),\n \"gsd\": 20.0,\n },\n}\n\nBAND_INFO = {\n \"sentinel-2\": SENTINEL2_BAND_INFO,\n \"landsat-5\": LANDSAT5_BAND_INFO,\n \"landsat-7\": LANDSAT7_BAND_INFO,\n \"landsat-8\": LANDSAT8_BAND_INFO,\n}\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 44, "blob_id": "4101dcc06cf0873cc0bdfee6d2c7d5a7d494424a", "content_id": "a8c60b4504feda9cee664f056ddda5977eca6949", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "permissive", "max_line_length": 44, "num_lines": 1, "path": "/output/README.md", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "Output directories (e.g. `cordoba`) go here.\n" }, { "alpha_fraction": 0.6157587766647339, "alphanum_fraction": 0.6211089491844177, "avg_line_length": 32.704917907714844, "blob_id": "e0787c3d6033943ce481aa88c9d60fcaf3382bcc", "content_id": "4ab5857b2c59512422876a596db92bae8300588c", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2056, "license_type": "permissive", "max_line_length": 85, "num_lines": 61, "path": "/src/satextractor/storer/storer.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import datetime\nfrom typing import Any\nfrom typing import List\n\nimport numpy as np\nimport zarr\nfrom satextractor.models import ExtractionTask\nfrom scipy.ndimage import zoom\n\n\ndef store_patches(\n fs_mapper: Any,\n storage_path: str,\n patches: List[np.ndarray],\n task: ExtractionTask,\n bands: List[str],\n patch_resolution: int,\n archive_resolution: int,\n):\n \"\"\"Store a list of patches in storage path.\n Assumes the target structure file is already created.\n\n Args:\n fs_mapper (Any): a file system mapper to map the path, e.x: gcsfs.get_mapper\n storage_path (str): The path where to store the patches\n patches (List[np.ndarray]): The patches as numpy arrays\n task (ExtractionTask): The extraction task containing the tiles\n bands (List[str]): the bands\n \"\"\"\n tiles = task.tiles\n for i, tile in enumerate(tiles):\n\n data_path = f\"{storage_path}/{tile.id}/{task.constellation}/data\"\n timestamps_path = f\"{storage_path}/{tile.id}/{task.constellation}/timestamps\"\n\n timestamps = zarr.open(fs_mapper(timestamps_path))[:]\n timestamps = [datetime.datetime.fromisoformat(el) for el in timestamps]\n\n size = (\n tile.bbox_size[0] // archive_resolution,\n tile.bbox_size[1] // archive_resolution,\n )\n arr = zarr.open_array(\n store=fs_mapper(data_path),\n dtype=np.uint16,\n )\n band_idx = bands.index(task.band.upper())\n timestamp_idx = timestamps.index(task.sensing_time)\n patch = patches[i]\n\n # maybe resize -> bicubic upsample\n if patch_resolution != archive_resolution:\n patch = zoom(patch, int(patch_resolution / archive_resolution), order=3)\n\n # in patch resolution\n if patch.shape != size:\n pad_x = int(size[0] - patch.shape[0])\n pad_y = int(size[1] - patch.shape[1])\n patch = np.pad(patch, [(0, pad_x), (0, pad_y)])\n assert patch.shape == size\n arr[timestamp_idx, band_idx, :, :] = patch\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 47, "blob_id": "33cbd62c56a2a9575bf8474d52ff932295b7b6b5", "content_id": "7d661f1385c7e4d7adf0c4dbd2296d3d6a9634ed", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "permissive", "max_line_length": 47, "num_lines": 1, "path": "/src/satextractor/stac/__init__.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from .stac import gcp_region_to_item_collection\n" }, { "alpha_fraction": 0.58167964220047, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 27.64974594116211, "blob_id": "e7b111b581d466689620bab408de10861515b1aa", "content_id": "e30e0b905db6e1c19cc469c2f843134bdb916348", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5644, "license_type": "permissive", "max_line_length": 100, "num_lines": 197, "path": "/providers/gcp/main.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import base64\nimport datetime\nimport io\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\n\nimport cattr\nimport gcsfs\nimport pystac\nfrom flask import Flask\nfrom flask import request\nfrom google.cloud import storage\nfrom loguru import logger\nfrom satextractor.extractor import task_mosaic_patches\nfrom satextractor.models import BAND_INFO\nfrom satextractor.models import ExtractionTask\nfrom satextractor.models import Tile\nfrom satextractor.monitor import GCPMonitor\nfrom satextractor.storer import store_patches\n\napp = Flask(__name__)\n\n\nif __name__ != \"__main__\":\n # Redirect Flask logs to Gunicorn logs\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n app.logger.info(\"Service started...\")\nelse:\n app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n\n\ndef get_bucket_name(url: str) -> str:\n \"\"\"Get the bucket for an url like:\n 'gs://gcp-public-data-sentinel-2/\n Args:\n url (str): The gs url\n Returns:\n str: the bucket name\n \"\"\"\n\n return url.split(\"/\")[2]\n\n\ndef get_blob_name(url: str) -> str:\n \"\"\"Get the blob for an url like:\n 'gs://gcp-public-data-sentinel-2/tiles/17/Q/QV/S2B_MSIL1C.jp2'\n Args:\n url (str): The gs url\n Returns:\n str: the blob name\n \"\"\"\n return \"/\".join(url.split(\"/\")[3:])\n\n\ndef download_blob(url: str) -> io.BytesIO:\n \"\"\"Download a blob as bytes\n Args:\n url (str): the url to download\n Returns:\n io.BytesIO: the content as bytes\n \"\"\"\n storage_client = storage.Client()\n bucket_name = get_bucket_name(url)\n source_blob_name = get_blob_name(url)\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n f = io.BytesIO(blob.download_as_bytes())\n return f\n\n\ndef format_stacktrace():\n parts = [\"Traceback (most recent call last):\\n\"]\n parts.extend(traceback.format_stack(limit=25)[:-2])\n parts.extend(traceback.format_exception(*sys.exc_info())[1:])\n return \"\".join(parts)\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef extract_patches():\n\n try:\n tic = time.time()\n\n envelope = request.get_json()\n if not envelope:\n msg = \"no Pub/Sub message received\"\n print(f\"error: {msg}\")\n return f\"Bad Request: {msg}\", 400\n\n if not isinstance(envelope, dict) or \"message\" not in envelope:\n msg = \"invalid Pub/Sub message format\"\n print(f\"error: {msg}\")\n return f\"Bad Request: {msg}\", 400\n\n request_json = envelope[\"message\"][\"data\"]\n\n if not isinstance(request_json, dict):\n json_data = base64.b64decode(request_json).decode(\"utf-8\")\n request_json = json.loads(json_data)\n # common data\n storage_gs_path = request_json[\"storage_gs_path\"]\n bands = request_json[\"bands\"]\n resolution = request_json[\"resolution\"]\n job_id = request_json[\"job_id\"]\n\n fs = gcsfs.GCSFileSystem()\n\n # ExtractionTask data\n extraction_task = request_json[\"extraction_task\"]\n tiles = [cattr.structure(t, Tile) for t in extraction_task[\"tiles\"]]\n item_collection = pystac.ItemCollection.from_dict(\n extraction_task[\"item_collection\"],\n )\n band = extraction_task[\"band\"]\n task_id = extraction_task[\"task_id\"]\n constellation = extraction_task[\"constellation\"]\n sensing_time = datetime.datetime.fromisoformat(extraction_task[\"sensing_time\"])\n task = ExtractionTask(\n task_id,\n tiles,\n item_collection,\n band,\n constellation,\n sensing_time,\n )\n\n logger.info(f\"Ready to extract {len(task.tiles)} tiles.\")\n\n # do monitor if possible\n if \"MONITOR_TABLE\" in os.environ:\n monitor = GCPMonitor(\n table_name=os.environ[\"MONITOR_TABLE\"],\n storage_path=storage_gs_path,\n job_id=job_id,\n task_id=task_id,\n constellation=constellation,\n )\n monitor.post_status(\n msg_type=\"STARTED\",\n msg_payload=f\"Extracting {len(task.tiles)}\",\n )\n else:\n logger.warning(\n \"Environment variable MONITOR_TABLE not set. Unable to push task status to Monitor\",\n )\n\n patches = task_mosaic_patches(\n cloud_fs=fs,\n download_f=download_blob,\n task=task,\n method=\"first\",\n resolution=resolution,\n )\n\n archive_resolution = int(\n min([b[\"gsd\"] for kk, b in BAND_INFO[constellation].items()]),\n )\n\n logger.info(f\"Ready to store {len(patches)} patches at {storage_gs_path}.\")\n store_patches(\n fs.get_mapper,\n storage_gs_path,\n patches,\n task,\n bands,\n resolution,\n archive_resolution,\n )\n\n toc = time.time()\n\n if \"MONITOR_TABLE\" in os.environ:\n monitor.post_status(\n msg_type=\"FINISHED\",\n msg_payload=f\"Elapsed time: {toc-tic}\",\n )\n\n logger.info(\n f\"{len(patches)} patches were succesfully stored in {storage_gs_path}.\",\n )\n\n return f\"Extracted {len(patches)} patches.\", 200\n\n except Exception as e:\n\n trace = format_stacktrace()\n\n if \"MONITOR_TABLE\" in os.environ:\n monitor.post_status(msg_type=\"FAILED\", msg_payload=trace)\n\n raise e\n" }, { "alpha_fraction": 0.7015503644943237, "alphanum_fraction": 0.7015503644943237, "avg_line_length": 22.454545974731445, "blob_id": "9b5c39e1c865d292ead28092672b4455dc6e2b07", "content_id": "4578585071c09bdccbdf3983ad4933e35c781bd9", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "permissive", "max_line_length": 67, "num_lines": 11, "path": "/ui/routes.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom flask import redirect\n\nroutes = Blueprint(\"routes\", __name__, template_folder=\"templates\")\n\n\[email protected](\"/\")\[email protected](\"/index\")\ndef index():\n \"\"\"Simple redirect to the dashboard\"\"\"\n return redirect(\"dashboard\")\n" }, { "alpha_fraction": 0.6562922596931458, "alphanum_fraction": 0.6644113659858704, "avg_line_length": 31.844444274902344, "blob_id": "cbaac3a4f6abc7be570ec97ab7122f00ceb8da66", "content_id": "b478b7a79b79736c1aaa7a05d0dd65b2376f7f36", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1478, "license_type": "permissive", "max_line_length": 81, "num_lines": 45, "path": "/src/satextractor/deployer/gcp_deployer.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import concurrent\nimport hashlib\nimport json\nfrom datetime import datetime\n\nfrom google.cloud import pubsub_v1\nfrom loguru import logger\nfrom satextractor.models.constellation_info import BAND_INFO\nfrom tqdm import tqdm\n\n\ndef deploy_tasks(credentials, extraction_tasks, storage_path, chunk_size, topic):\n\n user_id = topic.split(\"/\")[-1].split(\"-\")[0]\n\n job_id = hashlib.sha224(\n (user_id + str(datetime.now())).encode(),\n ).hexdigest()[:10]\n\n logger.info(f\"Deploying {len(extraction_tasks)} tasks with job_id: {job_id}\")\n\n publisher = pubsub_v1.PublisherClient.from_service_account_json(credentials)\n publish_futures = []\n for i, task in tqdm(enumerate(extraction_tasks)):\n extraction_task_data = task.serialize()\n data = dict(\n storage_gs_path=storage_path,\n job_id=job_id,\n extraction_task=extraction_task_data,\n bands=list(BAND_INFO[task.constellation].keys()),\n chunks=(1, 1, chunk_size, chunk_size),\n resolution=int(BAND_INFO[task.constellation][task.band][\"gsd\"]),\n )\n data = json.dumps(data, default=str)\n\n publish_future = publisher.publish(topic, data.encode(\"utf-8\"))\n publish_futures.append(publish_future)\n\n # Wait for all the publish futures to resolve before exiting.\n concurrent.futures.wait(\n publish_futures,\n return_when=concurrent.futures.ALL_COMPLETED,\n )\n\n logger.info(\"Done publishing tasks!\")\n" }, { "alpha_fraction": 0.6236727237701416, "alphanum_fraction": 0.6266905069351196, "avg_line_length": 29.85172462463379, "blob_id": "21ca8c600dc6504f5003634f9fc6e1fbaf4cd4a5", "content_id": "67773e233e810a87bacbbc8ced57bee0a69f0d17", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8947, "license_type": "permissive", "max_line_length": 107, "num_lines": 290, "path": "/src/satextractor/stac/stac.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from typing import List\nfrom typing import Union\n\nimport pandas as pd\nimport pystac\nimport shapely\nfrom google.cloud import bigquery\nfrom google.oauth2 import service_account\nfrom pystac.extensions.eo import AssetEOExtension\nfrom pystac.extensions.eo import EOExtension\nfrom pystac.extensions.projection import ProjectionExtension\nfrom satextractor.models.constellation_info import BAND_INFO\nfrom satextractor.models.constellation_info import LANDSAT_PROPERTIES\nfrom satextractor.models.constellation_info import MEDIA_TYPES\nfrom satextractor.utils import get_utm_epsg\n\n\ndef gcp_region_to_item_collection(\n credentials: str,\n region: Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon],\n start_date: str,\n end_date: str,\n constellations: List[str],\n) -> pystac.ItemCollection:\n \"\"\"Create stac ItemCollection for a given Sentinel 2\n Google Storage Region between dates.\n\n Args:\n credentials (str): The bigquery client credentials json path\n region (Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]): the region\n start_date (str): sensing start date\n end_date (str): sensing end date\n\n Returns:\n pystac.ItemCollection: a item collection for the given region and dates\n \"\"\"\n credentials = service_account.Credentials.from_service_account_file(credentials)\n\n # Construct a BigQuery client object.\n client = bigquery.Client(credentials=credentials)\n\n dfs = []\n\n for constellation in constellations:\n\n if constellation == \"sentinel-2\":\n df = get_sentinel_2_assets_df(client, region, start_date, end_date)\n else:\n df = get_landsat_assets_df(\n client,\n region,\n start_date,\n end_date,\n constellation,\n )\n\n df[\"constellation\"] = constellation\n dfs.append(df)\n\n df = pd.concat(dfs)\n\n return create_stac_item_collection_from_df(df)\n\n\ndef get_landsat_assets_df(\n client: bigquery.Client,\n shp: Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon],\n start_date: str,\n end_date: str,\n constellation: str,\n) -> pd.DataFrame:\n \"\"\"Perform a bigquery to obtain landsat assets as a dataframe.\n\n Args:\n client (bigquery.Client): The bigquery client with correct auth\n region (Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]): the region\n start_date (str): sensing start date\n end_date (str): sensing end date\n constellation (str): which constellation to retreive in ['landsat-5','landsat-7','landsat-8']\n\n Returns:\n [type]: a dataframe with the query results\n \"\"\"\n\n if shp.type == \"Polygon\":\n shp = [shp]\n\n dfs = []\n\n for subshp in shp:\n (\n region_west_lon,\n region_south_lat,\n region_east_lon,\n region_north_lat,\n ) = subshp.bounds # this won't work for multipolygons. need to manage this.\n\n query = f\"\"\"\n SELECT * FROM\n `bigquery-public-data.cloud_storage_geo_index.landsat_index`\n WHERE DATE(sensing_time) >= \"{start_date}\" and DATE(sensing_time) <= \"{end_date}\"\n AND spacecraft_id = \"{constellation.upper().replace('-','_')}\"\n AND data_type = \"{LANDSAT_PROPERTIES[constellation]['DATA_TYPE']}\"\n AND sensor_id = \"{LANDSAT_PROPERTIES[constellation]['SENSOR_ID']}\"\n AND west_lon <= {region_east_lon}\n AND east_lon >= {region_west_lon}\n AND north_lat >= {region_south_lat}\n AND south_lat <= {region_north_lat}\n \"\"\"\n query_job = client.query(query) # Make an API request.\n\n dfs.append(query_job.to_dataframe())\n\n df = pd.concat(dfs)\n\n # de-dup\n df = df.groupby(\"product_id\").nth(0).reset_index()\n\n return df\n\n\ndef get_sentinel_2_assets_df(\n client: bigquery.Client,\n shp: Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon],\n start_date: str,\n end_date: str,\n) -> pd.DataFrame:\n \"\"\"Perform a bigquery to obtain sentinel 2 assets as a dataframe.\n\n Be aware that west/east_lon and south/nort_lat in bigquery are the\n availaible pixels bounds and not the actual Granule bounds.\n\n Args:\n client (bigquery.Client): The bigquery client with correct auth\n region (Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]): the region\n start_date (str): sensing start date\n end_date (str): sensing end date\n\n Returns:\n [type]: a dataframe with the query results\n \"\"\"\n\n if shp.type == \"Polygon\":\n shp = [shp]\n\n dfs = []\n\n for subshp in shp:\n (\n region_west_lon,\n region_south_lat,\n region_east_lon,\n region_north_lat,\n ) = subshp.bounds # this won't work for multipolygons. need to manage this.\n\n query = f\"\"\"\n SELECT * FROM\n `bigquery-public-data.cloud_storage_geo_index.sentinel_2_index`\n WHERE DATE(sensing_time) >= \"{start_date}\" and DATE(sensing_time) <= \"{end_date}\"\n AND west_lon <= {region_east_lon}\n AND east_lon >= {region_west_lon}\n AND north_lat >= {region_south_lat}\n AND south_lat <= {region_north_lat}\n AND NOT REGEXP_CONTAINS(granule_id,\"S2A_OPER\")\n \"\"\"\n query_job = client.query(query) # Make an API request.\n\n dfs.append(query_job.to_dataframe())\n\n df = pd.concat(dfs)\n\n # de-dup\n df = df.groupby(\"product_id\").nth(0).reset_index()\n\n return df\n\n\ndef create_stac_item_collection_from_df(df: pd.DataFrame) -> pystac.ItemCollection:\n \"\"\"Given a df containing the results of a bigquery sentinel 2 job\n creates a stac item collection with all the assets\n\n Args:\n df (pd.DataFrame): The dataframe resulting from a bigquery job\n\n Returns:\n pystac.ItemCollection: a item collection for the given region and dates\n \"\"\"\n items = pystac.ItemCollection([create_stac_item(row) for _, row in df.iterrows()])\n return items\n\n\ndef get_landsat_asset_images_url(row: pd.Series, band: str) -> str:\n \"\"\"Given a bigquery df row and a band return the image url\n\n Args:\n row (pd.Series): a row from the bigquery job df\n band (str): the band names\n\n Returns:\n [str]: the url of the band tif file\n \"\"\"\n return f\"{row.base_url}/{row.base_url.split('/')[-1]}_{band}.TIF\"\n\n\ndef get_s2_asset_images_url(row: pd.Series, band: str) -> str:\n \"\"\"Given a bigquery df row and a band return the image url\n\n Args:\n row (pd.Series): a row from the bigquery job df\n band (str): the band names\n\n Returns:\n [str]: the url of the jp2 file\n \"\"\"\n datatake_sensing_time = row.product_id.split(\"_\")[2]\n base_url = f\"{row.base_url}/GRANULE/{row.granule_id}/IMG_DATA/T{row.mgrs_tile}_{datatake_sensing_time}\"\n return f\"{base_url}_{band}.jp2\"\n\n\ndef create_stac_item(row: pd.Series) -> pystac.Item:\n \"\"\"Creates a stac Item from a given bigquery job df row\n\n Args:\n row (pd.Series): a row from the bigquery job df\n\n Returns:\n pystac.Item: The resulting pystac Item\n \"\"\"\n coordinates = [\n [\n [row.west_lon, row.south_lat],\n [row.east_lon, row.south_lat],\n [row.east_lon, row.north_lat],\n [row.west_lon, row.north_lat],\n [row.west_lon, row.south_lat],\n ],\n ]\n geometry = {\"type\": \"Polygon\", \"coordinates\": coordinates}\n bbox = [row.west_lon, row.south_lat, row.east_lon, row.north_lat]\n\n if row.constellation == \"sentinel-2\":\n _id = row.granule_id\n else:\n _id = row.scene_id\n\n item = pystac.Item(\n id=_id,\n geometry=geometry,\n bbox=bbox,\n datetime=row.sensing_time,\n properties={},\n )\n\n # Set commo gsd to 10m, bands in different resolution will be explicit\n item.common_metadata.gsd = 10.0\n item.common_metadata.constellation = row.constellation\n\n # Enable eo\n EOExtension.add_to(item)\n eo_ext = EOExtension.ext(item)\n eo_ext.cloud_cover = row.cloud_cover\n\n # Enable proj\n ProjectionExtension.add_to(item)\n proj_ext = ProjectionExtension.ext(item)\n proj_ext.epsg = get_utm_epsg(\n row.north_lat,\n row.west_lon,\n ) # might need to make sure this comes from somewhere else.\n\n # Add bands\n for band_id, band_info in BAND_INFO[row.constellation].items():\n\n if row.constellation == \"sentinel-2\":\n band_url = get_s2_asset_images_url(row, band_id)\n else:\n band_url = get_landsat_asset_images_url(row, band_id)\n\n asset = pystac.Asset(\n href=band_url,\n media_type=MEDIA_TYPES[row.constellation],\n roles=[\"data\"],\n extra_fields={\"gsd\": band_info[\"gsd\"]},\n )\n eo_asset = AssetEOExtension.ext(asset)\n eo_asset.bands = [band_info[\"band\"]]\n\n item.add_asset(band_id, asset)\n\n return item\n" }, { "alpha_fraction": 0.5808823704719543, "alphanum_fraction": 0.5913865566253662, "avg_line_length": 27.84848403930664, "blob_id": "6d543a61a99b1a01052c778a922b2b44b05d4c7e", "content_id": "16dc1003fbb044e118e1a4326af88a77764b006d", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1904, "license_type": "permissive", "max_line_length": 84, "num_lines": 66, "path": "/src/satextractor/models/models.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "import datetime\nfrom typing import List\nfrom typing import Tuple\n\nimport attr\nimport pystac\nfrom satextractor.utils import get_transform_function\n\n\[email protected]\nclass Tile:\n id: str = attr.ib()\n epsg: str = attr.ib()\n bbox: Tuple[float, float, float, float] = attr.ib() # (xmin, ymin, xmax, ymax)\n\n def __attrs_post_init__(self):\n self.bbox_size = (\n self.bbox[2] - self.bbox[0],\n self.bbox[3] - self.bbox[1],\n )\n\n def contains(self, other):\n # type: (Tile)->bool\n return (\n self.epsg == other.epsg\n and self.bbox[0] <= other.bbox[0]\n and self.bbox[1] <= other.bbox[1]\n and self.bbox[2] >= other.bbox[2]\n and self.bbox[3] >= other.bbox[3]\n )\n\n @property\n def bbox_wgs84(self):\n reproj_src_wgs = get_transform_function(str(self.epsg), \"WGS84\")\n return (\n *reproj_src_wgs(self.bbox[0], self.bbox[1]),\n *reproj_src_wgs(self.bbox[2], self.bbox[3]),\n )\n\n\[email protected]\nclass ExtractionTask:\n \"\"\"Extraction task class\n\n Args:\n task_id (str): the task id\n tiles (List[Tile]): the tiles to extract\n item_collection (pystac.ItemCollection): the item collection with the assets\n band (str): the band to extract\n constellation (str): the satellite constellation from which to extract\n sensing_time (datetime.datetime): the assets starting sensing_time\n \"\"\"\n\n task_id: str = attr.ib()\n tiles: List[Tile] = attr.ib()\n item_collection: pystac.ItemCollection = attr.ib()\n band: str = attr.ib()\n constellation: str = attr.ib()\n sensing_time: datetime.datetime = attr.ib()\n\n def serialize(self):\n serialized_task = attr.asdict(self)\n serialized_task[\"item_collection\"] = serialized_task[\n \"item_collection\"\n ].to_dict()\n return serialized_task\n" }, { "alpha_fraction": 0.5784148573875427, "alphanum_fraction": 0.5868465304374695, "avg_line_length": 30.486724853515625, "blob_id": "c314454382f3d90bf9973aa80e20772572fa2f9a", "content_id": "d58a8f7613ef82ca6efaf59ba709009f680043ea", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3558, "license_type": "permissive", "max_line_length": 90, "num_lines": 113, "path": "/setup.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport io\nimport shlex\nfrom glob import glob\nfrom os.path import basename\nfrom os.path import dirname\nfrom os.path import join\nfrom os.path import splitext\nfrom subprocess import check_call\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\n\n\nclass PostDevelopCommand(develop):\n def run(self):\n try:\n check_call(shlex.split(\"pre-commit install\"))\n except Exception as e:\n print(f\"Unable to run 'pre-commit install' with exception {e}\")\n develop.run(self)\n\n\ndef read(*names, **kwargs):\n with io.open(\n join(dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as fh:\n return fh.read()\n\n\nsetup(\n name=\"satextractor\",\n version=\"0.1.0\",\n license=\"BSD-2-Clause\",\n description=\"SatExtractor. Extract everything from everywhere.\",\n url=\"https://github.com/FrontierDevelopmentLab/sat-extractor\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n py_modules=[splitext(basename(path))[0] for path in glob(\"src/*.py\")],\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n # complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Unix\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n # uncomment if you test on these interpreters:\n # 'Programming Language :: Python :: Implementation :: IronPython',\n # 'Programming Language :: Python :: Implementation :: Jython',\n # 'Programming Language :: Python :: Implementation :: Stackless',\n \"Topic :: Utilities\",\n ],\n project_urls={\n \"Documentation\": \"https://sat-extractor.readthedocs.io/\",\n \"Changelog\": \"https://sat-extractor.readthedocs.io/en/latest/changelog.html\",\n \"Issue Tracker\": \"https://github.com/FrontierDevelopmentLab/sat-extractor/issues\",\n },\n keywords=[\n # eg: 'keyword1', 'keyword2', 'keyword3',\n ],\n python_requires=\">=3.8\",\n install_requires=[\n \"loguru\",\n \"joblib\",\n \"numpy\",\n \"pandas\",\n \"shapely\",\n \"gunicorn\",\n \"pyproj\",\n \"geopandas\",\n \"dash\",\n \"dash-auth\",\n \"dash-bootstrap-components\",\n \"rasterio\",\n \"zarr\",\n \"tqdm\",\n \"google-api-python-client\",\n \"google-cloud-storage\",\n \"google-cloud-functions\",\n \"google-cloud-pubsub\",\n \"google-cloud-bigquery\",\n \"hydra-core\",\n \"gcsfs\",\n \"pystac[validation]\",\n \"sentinelhub\",\n # eg: 'aspectlib==1.1.1', 'six>=1.7',\n ],\n extras_require={\n \"pre-commit\": [\"pre-commit\"],\n \"test\": [\"pytest\", \"tox\"],\n # eg:\n # 'rst': ['docutils>=0.11'],\n # ':python_version==\"2.6\"': ['argparse'],\n },\n entry_points={\n \"console_scripts\": [\n \"satextractor = satextractor.cli:main\",\n ],\n },\n cmdclass={\"develop\": PostDevelopCommand},\n)\n" }, { "alpha_fraction": 0.5753471255302429, "alphanum_fraction": 0.5767016410827637, "avg_line_length": 29.132652282714844, "blob_id": "7010f95befd70f8c8a7d643dd613a8ee3967d715", "content_id": "c90a2b635838456839c6d7c648b9f0e3918c031e", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2953, "license_type": "permissive", "max_line_length": 81, "num_lines": 98, "path": "/src/satextractor/preparer/gcp_preparer.py", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom typing import Dict\nfrom typing import List\n\nimport numpy as np\nimport zarr\nfrom gcsfs import GCSFileSystem\nfrom joblib import delayed\nfrom joblib import Parallel\nfrom loguru import logger\nfrom satextractor.models import ExtractionTask\nfrom satextractor.models import Tile\nfrom satextractor.models.constellation_info import BAND_INFO\nfrom satextractor.preparer import create_zarr_patch_structure\nfrom satextractor.utils import tqdm_joblib\nfrom tqdm import tqdm\n\n\ndef gcp_prepare_archive(\n credentials: str,\n tasks: List[ExtractionTask],\n tiles: List[Tile],\n constellations: List[str],\n storage_root: str,\n patch_size: int,\n chunk_size: int,\n n_jobs: int = -1,\n verbose: int = 0,\n **kwargs,\n) -> bool:\n fs = GCSFileSystem(token=credentials)\n # make a dict of tiles and constellations sensing times\n tile_constellation_sensing_times: Dict[str, Dict[str, List[datetime]]] = {\n tt.id: {kk: [] for kk in BAND_INFO.keys()} for tt in tiles\n }\n\n for task in tasks:\n\n # check tiles meet spec\n assert isinstance(\n task,\n ExtractionTask,\n ), \"Task does not match ExtractionTask spec\"\n\n for tile in task.tiles:\n tile_constellation_sensing_times[tile.id][task.constellation].append(\n task.sensing_time,\n )\n\n # get the unique sensing times\n for tt in tiles:\n for kk in constellations:\n tile_constellation_sensing_times[tt.id][kk] = np.array(\n [\n np.datetime64(el)\n for el in sorted(\n list(set(tile_constellation_sensing_times[tt.id][kk])),\n )\n ],\n )\n\n items = tile_constellation_sensing_times.items()\n with tqdm_joblib(\n tqdm(\n desc=f\"parallel building zarr tile roots on {storage_root}\",\n total=len(items),\n ),\n ):\n Parallel(n_jobs=n_jobs, verbose=verbose, prefer=\"threads\")(\n [\n delayed(zarr.open)(fs.get_mapper(f\"{storage_root}/{tile_id}\"))\n for tile_id, _ in items\n ],\n )\n\n logger.info(f\"parallel building zarr archives on {storage_root}\")\n jobs = []\n for tile_id, vv in items:\n for constellation, sensing_times in vv.items():\n jobs.append(\n delayed(create_zarr_patch_structure)(\n fs.get_mapper,\n storage_root,\n tile_id,\n patch_size,\n chunk_size,\n sensing_times,\n constellation,\n BAND_INFO[constellation],\n ),\n )\n\n with tqdm_joblib(\n tqdm(desc=\"Building Archives.\", total=len(tiles) * len(constellations)),\n ):\n Parallel(n_jobs=n_jobs, verbose=verbose, prefer=\"threads\")(jobs)\n\n return True\n" }, { "alpha_fraction": 0.764915406703949, "alphanum_fraction": 0.7702582478523254, "avg_line_length": 35.225807189941406, "blob_id": "050840c912965bf36baf8ddff1707bc0785a1e51", "content_id": "77f7099fd6773efdfc6b4e999cd13c895243249e", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1123, "license_type": "permissive", "max_line_length": 136, "num_lines": 31, "path": "/Dockerfile", "repo_name": "frandorr/sat-extractor", "src_encoding": "UTF-8", "text": "# Use the official osgeo/gdal image.\nFROM osgeo/gdal:ubuntu-small-latest\n\n# Allow statements and log messages to immediately appear in the Knative logs\nENV PYTHONUNBUFFERED True\n\n# Set provider dir path\nENV PROVIDER providers/gcp\n\nENV APP_HOME /app\n\nRUN if [ -z \"$MONITOR_TABLE\" ]; then echo 'WARNING: Environment variable MONITOR_TABLE not specified. Task statuses wont be output.'; fi\n\n\nWORKDIR $APP_HOME\n# Copy local code to the container image.\nCOPY . ./satextractor\nCOPY $PROVIDER ./\n# Install GDAL dependencies\nRUN apt-get update\nRUN apt-get install -y python3-pip\n# Install production dependencies.\nRUN pip install --no-cache-dir ./satextractor\nRUN pip install --no-cache-dir -r requirements.txt\n\n# Run the web service on container startup. Here we use the gunicorn\n# webserver, with one worker process and 8 threads.\n# For environments with multiple CPU cores, increase the number of workers\n# to be equal to the cores available.\n# Timeout is set to 0 to disable the timeouts of the workers to allow Cloud Run to handle instance scaling.\nCMD exec gunicorn --bind :$PORT --workers 1 --threads 8 --timeout 0 main:app\n" } ]
32
Zhangjifa/Django_mall
https://github.com/Zhangjifa/Django_mall
0bfe71294466efb172bd5fa4b613907c7d3b82f4
df9eecc823609b15e47080ca8a1cfa445995d817
5e5a3e6eab69261661a13bea31a9f19a27cd84e4
refs/heads/master
2020-04-24T23:30:12.288788
2019-02-24T14:34:32
2019-02-24T14:34:32
172,346,577
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.60215824842453, "alphanum_fraction": 0.6050359606742859, "avg_line_length": 26.95833396911621, "blob_id": "5d9cc26e9a793882fe11a5fc5efab33f5afa9383", "content_id": "057ff32dc8ce466b67956c34b7e6924e7295bd90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3048, "license_type": "no_license", "max_line_length": 101, "num_lines": 96, "path": "/myproject/web/views/users.py", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import redirect\r\nfrom django.core.urlresolvers import reverse\r\nfrom common.models import Users, Types, Orders, Detail, Goods\r\nfrom datetime import datetime\r\nfrom django.db.models import Q\r\n\r\n\r\n# 公共信息加载\r\ndef loadinfo(request):\r\n '''公共信息加载'''\r\n context = {}\r\n lists = Types.objects.filter(pid=0)\r\n context['typelist'] = lists\r\n return context\r\n\r\n\r\ndef index(request):\r\n context = loadinfo(request)\r\n ob = Users.objects.get(id=request.session[\"vipuser\"][\"id\"])\r\n context[\"userlist\"] = ob\r\n return render(request, \"web/vipuser.html\", context)\r\n\r\n\r\ndef update(request, uid):\r\n try:\r\n context = loadinfo(request)\r\n ob = Users.objects.get(id=uid)\r\n ob.name = request.POST[\"name\"]\r\n ob.sex = request.POST[\"sex\"]\r\n ob.address = request.POST[\"address\"]\r\n ob.email = request.POST[\"email\"]\r\n ob.code = request.POST['code']\r\n ob.phone = request.POST['phone']\r\n ob.save()\r\n context = {\"info\": \"修改个人信息成功 !\"}\r\n\r\n\r\n except Exception as err:\r\n print(err)\r\n context = {\"info\": \"修改用户信息失败 !\"}\r\n\r\n return render(request, \"web/vipuserinfo.html\", context)\r\n\r\n\r\ndef orders(request):\r\n print(1111)\r\n context = loadinfo(request)\r\n # 获取当前用户的订单列表\r\n odlist = Orders.objects.filter(uid=request.session['vipuser']['id'])\r\n # 遍历当前用户的所有订单,添加他的订单详情\r\n for od in odlist:\r\n delist = Detail.objects.filter(orderid=od.id)\r\n # 遍历每个商品详情,从Goods中获取对应的图片\r\n for og in delist:\r\n og.picname = Goods.objects.only('picname').get(id=og.goodsid).picname\r\n print(og.picname)\r\n od.detaillist = delist\r\n # 将整理好的订单信息放置到模板遍历中\r\n context['orderslist'] = odlist\r\n return render(request,\"web/viporders.html\", context)\r\n\r\n\r\ndef state(request):\r\n ''' 修改订单状态 '''\r\n try:\r\n oid = request.GET.get(\"oid\",'0')\r\n ob = Orders.objects.get(id=oid)\r\n ob.state = request.GET['state']\r\n ob.save()\r\n return redirect(reverse('viporders'))\r\n except Exception as err:\r\n print(err)\r\n return HttpResponse(\"订单处理失败!\")\r\n\r\n\r\ndef reset(request):\r\n return render(request, \"web/reset.html\")\r\n\r\n\r\ndef update_password(request):\r\n try:\r\n ob = Users.objects.get(Q(username=request.POST[\"username\"]) & Q(phone=request.POST[\"phone\"]))\r\n\r\n import hashlib\r\n m = hashlib.md5()\r\n m.update(bytes(request.POST['password'], encoding=\"utf8\"))\r\n ob.password = m.hexdigest()\r\n ob.save()\r\n context = {'info': '重置密码成功, 请登陆!'}\r\n\r\n except Exception as err:\r\n print(err)\r\n context = {\"info\": \"未找到该用户,请核对后重新输入\"}\r\n return render(request, \"web/reset.html\", context)\r\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 37, "blob_id": "4401fefcc71e6121763b5712e69256325ce3ca31", "content_id": "1a01985129f0fa637ddf88a865f02fb1e0f27ba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 70, "num_lines": 4, "path": "/README.md", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "# Django_mall\nuse django to realize online mall font-end and back-end management<br>\nsuper user login:<br>\nusername: admin password:admin\n" }, { "alpha_fraction": 0.5362095236778259, "alphanum_fraction": 0.5477657914161682, "avg_line_length": 30.860759735107422, "blob_id": "afc39de88cea8afd923f6256560aadd7500ca045", "content_id": "80bfbdde8861e251c8f3fb6e59777c130799746a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5614, "license_type": "no_license", "max_line_length": 90, "num_lines": 158, "path": "/myproject/myadmin/views/goods.py", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import redirect\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.core.paginator import Paginator\r\nfrom common.models import Goods, Types\r\nfrom PIL import Image\r\nimport time, os\r\n\r\n\r\ndef index(request, pIndex):\r\n goods = Goods.objects.all()\r\n for ob in goods:\r\n ty = Types.objects.get(id=ob.typeid)\r\n ob.typename = ty.name\r\n\r\n # 执行分页处理\r\n pIndex = int(pIndex)\r\n page = Paginator(goods, 5) # 以5条每页创建分页对象\r\n maxpages = page.num_pages # 最大页数\r\n # 判断页数是否越界\r\n if pIndex > maxpages:\r\n pIndex = maxpages\r\n if pIndex < 1 or pIndex is None:\r\n pIndex = 1\r\n list2 = page.page(pIndex) # 当前页数据\r\n plist = page.page_range # 页码数列表\r\n context = {\"goodslist\": list2, 'maxpages': maxpages, 'pIndex': pIndex, \"plist\": plist}\r\n return render(request, \"myadmin/goods/index.html\", context)\r\n\r\n\r\ndef add(request):\r\n tlist = Types.objects.extra(select={'_has': 'concat(path,id)'}).order_by('_has')\r\n for ob in tlist:\r\n ob.pname ='. . .'*(ob.path.count(',')-1)\r\n\r\n context = {\"typeslist\": tlist}\r\n return render(request, \"myadmin/goods/add.html\", context)\r\n\r\n\r\ndef insert(request):\r\n try:\r\n upfile = request.FILES.get('picname', None)\r\n if not upfile:\r\n return HttpResponse(\"没有上传的文件\")\r\n # 随机生成一个文件名\r\n filename = str(time.time()) + \".\" + upfile.name.split('.').pop()\r\n ob = open(\"./static/goods/\" + filename, \"wb+\")\r\n for chunk in upfile.chunks(): # 分块写入文件\r\n ob.write(chunk)\r\n ob.close()\r\n\r\n im = Image.open(\"./static/goods/\"+filename)\r\n # 缩放到375*375:\r\n im.thumbnail((375, 375))\r\n # 把缩放后的图像用jpeg格式保存:\r\n im.save(\"./static/goods/\"+filename, 'jpeg')\r\n # 缩放到220*220:\r\n im.thumbnail((220, 220))\r\n # 把缩放后的图像用jpeg格式保存:\r\n im.save(\"./static/goods/m_\"+filename, 'jpeg')\r\n # 缩放到75*75:\r\n im.thumbnail((75, 75))\r\n # 把缩放后的图像用jpeg格式保存:\r\n im.save(\"./static/goods/s_\"+filename, 'jpeg')\r\n\r\n # 执行信息添加\r\n ob = Goods()\r\n ob.goods = request.POST['goods']\r\n ob.typeid = request.POST['typeid']\r\n ob.company = request.POST['company']\r\n ob.price = request.POST['price']\r\n ob.store = request.POST['store']\r\n ob.content = request.POST['content']\r\n ob.picname = filename\r\n ob.save()\r\n context = {'info': \"添加商品信息成功\"}\r\n except Exception as err:\r\n context = {'info': err}\r\n\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef delete(request, gid):\r\n try:\r\n goods = Goods.objects.get(id=gid)\r\n goods.delete()\r\n context = {\"info\": \"删除商品成功\"}\r\n except Exception as err:\r\n context = {\"info\": err}\r\n\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef edit(request, gid):\r\n try:\r\n tlist = Types.objects.extra(select={'_has': 'concat(path,id)'}).order_by('_has')\r\n for ob in tlist:\r\n ob.pname = '. . .' * (ob.path.count(',') - 1)\r\n\r\n goods = Goods.objects.get(id=gid)\r\n context = {\"goodslist\": goods, \"typeslist\": tlist}\r\n return render(request, \"myadmin/goods/edit.html\", context)\r\n except Exception as err:\r\n print(err)\r\n context = {\"info\": err}\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef update(request, gid):\r\n try:\r\n oldpicname = request.POST['oldpicname']\r\n flag = False\r\n if request.FILES.get('picname') is not None:\r\n upfile = request.FILES.get('picname', None)\r\n if not upfile:\r\n return HttpResponse(\"没有上传的文件\")\r\n # 随机生成一个文件名\r\n filename = str(time.time()) + \".\" + upfile.name.split('.').pop()\r\n ob = open(\"./static/pics/\" + filename, \"wb+\")\r\n for chunk in upfile.chunks(): # 分块写入文件\r\n ob.write(chunk)\r\n ob.close()\r\n\r\n # 执行图片缩放\r\n im = Image.open(\"./static/pics/\" + filename)\r\n # 缩放到375*375:\r\n im.thumbnail((375, 375))\r\n # 把缩放后的图像用jpeg格式保存:\r\n im.save(\"./static/pics/\" + filename, None)\r\n # 缩放到75*75:\r\n im.thumbnail((75, 75))\r\n # 把缩放后的图像用jpeg格式保存:\r\n im.save(\"./static/pics/s_\" + filename, None)\r\n flag = True\r\n else:\r\n filename = oldpicname\r\n\r\n # 执行信息添加\r\n ob = Goods.objects.get(id=gid)\r\n ob.goods = request.POST['goods']\r\n ob.typeid = request.POST['typeid']\r\n ob.company = request.POST['company']\r\n ob.price = request.POST['price']\r\n ob.store = request.POST['store']\r\n ob.content = request.POST['content']\r\n ob.picname = filename\r\n ob.state = request.POST['state']\r\n ob.save()\r\n context = {'info': \"修改商品信息成功\"}\r\n if flag is True:\r\n os.remove(\"./static/goods/s_\" + oldpicname) # 执行老图片删除\r\n os.remove(\"./static/goods/\" + oldpicname) # 执行老图片删除\r\n\r\n except Exception as err:\r\n context = {'info': err}\r\n\r\n return render(request, \"myadmin/info.html\", context)\r\n" }, { "alpha_fraction": 0.620246410369873, "alphanum_fraction": 0.6234601140022278, "avg_line_length": 25.086956024169922, "blob_id": "13e1269f285a2c568c0652b6bcc0eb70973babc3", "content_id": "f2381efabd01f92fafdc2d8a05dc979c2bd374f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2099, "license_type": "no_license", "max_line_length": 52, "num_lines": 69, "path": "/myproject/web/views/cart.py", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import redirect\r\nfrom django.core.urlresolvers import reverse\r\n\r\nfrom common.models import Goods,Types\r\n\r\n# 公共信息加载\r\ndef loadinfo(request):\r\n '''公共信息加载'''\r\n context = {}\r\n lists = Types.objects.filter(pid=0)\r\n context['typelist'] = lists\r\n return context\r\n\r\ndef index(request):\r\n '''浏览购物车'''\r\n context = loadinfo(request)\r\n if \"shoplist\" not in request.session:\r\n request.session[\"shoplist\"] = {}\r\n return render(request, \"web/cart.html\", context)\r\n\r\n\r\ndef add(request,gid):\r\n '''在购物车中放入商品信息'''\r\n # 获取要放入购物车中的商品信息\r\n ob = Goods.objects.get(id=gid)\r\n shop = ob.toDic()\r\n shop[\"num\"] = int(request.POST.get(\"num\", 1))\r\n print(type(shop[\"num\"]))\r\n # 获取购物车中已存在商品的列表\r\n shoplist = request.session.get(\"shoplist\", {})\r\n # 判断当前购买的商品是否已存在于购物车中\r\n if gid in shoplist:\r\n shoplist[gid][\"num\"] += shop[\"num\"]\r\n else:\r\n shoplist[gid] = shop\r\n # 将购物车列表写回到session\r\n request.session[\"shoplist\"] = shoplist\r\n return redirect(reverse('cart_index'))\r\n\r\n\r\ndef delete(request,gid):\r\n '''删除一个商品'''\r\n context = loadinfo(request)\r\n shoplist = request.session[\"shoplist\"]\r\n del shoplist[gid]\r\n request.session[\"shoplist\"] = shoplist\r\n return redirect(reverse('cart_index'))\r\n\r\n\r\ndef clear(request):\r\n '''清空购物车'''\r\n context = loadinfo(request)\r\n request.session[\"shoplist\"] = {}\r\n return render(request,\"web/cart.html\",context)\r\n\r\n\r\ndef change(request):\r\n '''更改购物车中的商品信息'''\r\n shoplist = request.session['shoplist']\r\n #获取信息\r\n shopid = request.GET.get('gid','0')\r\n num = int(request.GET.get('num', 1))\r\n if num < 1:\r\n num = 1\r\n shoplist[shopid]['num'] = num #更改商品数量\r\n request.session['shoplist'] = shoplist\r\n return redirect(reverse('cart_index'))" }, { "alpha_fraction": 0.5176056623458862, "alphanum_fraction": 0.522635817527771, "avg_line_length": 44.23255920410156, "blob_id": "bdcd5b267239f49cc6fe8bad1845ea0a02533702", "content_id": "9905c1f308c922f054d4ea59afee89ecf8d1dcaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2242, "license_type": "no_license", "max_line_length": 86, "num_lines": 43, "path": "/myproject/web/urls.py", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\r\nfrom web.views import index, cart, orders, users\r\n\r\nurlpatterns = [\r\n url(r'^$', index.index, name='index'), # 商城前台首页\r\n url(r'^list$', index.lists, name='list'), # 商品列表\r\n url(r'^list/(?P<pIndex>[0-9]+)$', index.lists, name='list'), # 商品列表\r\n url(r'^detail/(?P<gid>[0-9]+)$', index.detail, name='detail'), # 商品详情\r\n\r\n # 会员登陆,退出\r\n url(r'^login$', index.login, name='login'),\r\n url(r'^dologin$', index.dologin, name='dologin'),\r\n url(r'^logout$', index.logout, name='logout'),\r\n url(r'^verify$', index.verify, name=\"verify\"), # 验证码\r\n\r\n # 用户注册\r\n url(r'^register$', index.register, name='register'),\r\n url(r'^doreg$', index.doreg, name='doreg'),\r\n\r\n # 忘记密码\r\n url(r'^reset/$', users.reset, name='reset'),\r\n url(r'^update_pass/$', users.update_password, name='update_pass'),\r\n\r\n # 购物车路由\r\n url(r'^cart$', cart.index,name='cart_index'), # 浏览购物车\r\n url(r'^cart/add/(?P<gid>[0-9]+)$', cart.add,name='cart_add'), # 添加购物车\r\n url(r'^cart/del/(?P<gid>[0-9]+)$', cart.delete,name='cart_del'), # 从购物车中删除一个商品\r\n url(r'^cart/clear$', cart.clear,name='cart_clear'), # 清空购物车\r\n url(r'^cart/change$', cart.change,name='cart_change'), # 更改购物车中商品数量\r\n\r\n # 订单管理\r\n url(r'^orders/add$', orders.add, name='orders_add'), # 订单的表单页\r\n url(r'^orders/confirm$', orders.confirm, name='orders_confirm'), # 订单确认页\r\n url(r'^orders/insert$', orders.insert, name='orders_insert'), # 执行订单添加操作\r\n\r\n # 个人中心\r\n url(r'^vip/index$', users.index, name='vipuser'),\r\n url(r'^vip/update/(?P<uid>[0-9]+)$', users.update, name='vipupdate'),\r\n # 个人订单\r\n url(r'^vip/orders$', users.orders,name='viporders'), # 会员中心我的订单\r\n url(r'^vip/odstate$', users.state,name='vipodstate'), # 修改订单状态(确认收货\r\n\r\n]\r\n" }, { "alpha_fraction": 0.565763533115387, "alphanum_fraction": 0.5699507594108582, "avg_line_length": 28.26865577697754, "blob_id": "797ff2d6c77be4e9b4b564a4411d842cb5ec15ab", "content_id": "c83c54f2860817052a50f38f2c182cacb069ac5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4304, "license_type": "no_license", "max_line_length": 110, "num_lines": 134, "path": "/myproject/myadmin/views/users.py", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, reverse, redirect\r\nfrom django.http import HttpResponse\r\nfrom django.core.paginator import Paginator\r\nfrom django.db.models import Q\r\nfrom common.models import Users\r\nfrom datetime import datetime\r\n\r\n\r\ndef index(request, pIndex):\r\n ob = Users.objects\r\n # 定义一个存放搜索条件的列表\r\n mywhere = []\r\n keyword = request.GET.get(\"keyword\", None)\r\n if keyword:\r\n list1 = ob.filter(Q(username__contains=keyword)|Q(name__contains=keyword))\r\n mywhere.append(\"keyword=\" + keyword)\r\n else:\r\n list1 = ob.filter()\r\n\r\n sex = request.GET.get(\"sex\", \"\")\r\n if sex != \"\":\r\n list1 = list1.filter(sex=int(sex))\r\n mywhere.append(\"sex=\" + sex)\r\n # 执行分页处理\r\n pIndex = int(pIndex)\r\n page = Paginator(list1, 5) # 以5条每页创建分页对象\r\n maxpages = page.num_pages # 最大页数\r\n # 判断页数是否越界\r\n if pIndex > maxpages:\r\n pIndex = maxpages\r\n if pIndex < 1 or pIndex is None:\r\n pIndex = 1\r\n list2 = page.page(pIndex) # 当前页数据\r\n plist = page.page_range # 页码数列表\r\n context = {\"userslist\": list2, 'maxpages': maxpages, 'pIndex': pIndex, 'mywhere': mywhere, 'plist': plist}\r\n\r\n return render(request, 'myadmin/users/index.html', context)\r\n\r\n\r\ndef add(request):\r\n return render(request, 'myadmin/users/add.html')\r\n\r\n\r\ndef insert(request):\r\n try:\r\n ob = Users()\r\n ob.username = request.POST['username']\r\n ob.name = request.POST['name']\r\n # 获取密码并md5\r\n import hashlib\r\n m = hashlib.md5()\r\n m.update(bytes(request.POST['password'], encoding=\"utf8\"))\r\n ob.password = m.hexdigest()\r\n ob.sex = request.POST['sex']\r\n ob.address = request.POST['address']\r\n ob.code = request.POST['code']\r\n ob.phone = request.POST['phone']\r\n ob.email = request.POST['email']\r\n ob.state = 1\r\n ob.addtime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n ob.save()\r\n context = {'info': '添加成功!'}\r\n except Exception as err:\r\n print(err)\r\n context = {'info': '添加失败!'}\r\n\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef delete(request, uid):\r\n try:\r\n ob = Users.objects.get(id=uid)\r\n ob.delete()\r\n context = {'info': '删除成功!'}\r\n except:\r\n context = {'info': '删除失败!'}\r\n\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef edit(request, uid):\r\n try:\r\n ob = Users.objects.get(id=uid)\r\n context = {'user': ob}\r\n return render(request, \"myadmin/users/edit.html\", context)\r\n except Exception as err:\r\n print(err)\r\n context = {'info': '没有找到要修改的信息!'}\r\n\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef update(request, uid):\r\n try:\r\n ob = Users.objects.get(id=uid)\r\n ob.name = request.POST['name']\r\n ob.sex = request.POST['sex']\r\n ob.address = request.POST['address']\r\n ob.code = request.POST['code']\r\n ob.phone = request.POST['phone']\r\n ob.email = request.POST['email']\r\n ob.state = request.POST['state']\r\n ob.save()\r\n context = {'info': '修改成功!'}\r\n except Exception as err:\r\n print(err)\r\n context = {'info': '修改失败!'}\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef update_passwd(request, uid):\r\n try:\r\n ob = Users.objects.get(id=uid)\r\n import hashlib\r\n m = hashlib.md5()\r\n m.update(bytes(request.POST['password'], encoding=\"utf8\"))\r\n ob.password = m.hexdigest()\r\n ob.save()\r\n context = {'info': '重置密码成功!'}\r\n except Exception as err:\r\n print(err)\r\n context = {'info': '重置密码失败!'}\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\ndef reset(request, uid):\r\n try:\r\n ob = Users.objects.get(id=uid)\r\n context = {'user': ob}\r\n return render(request, \"myadmin/users/reset.html\", context)\r\n except Exception as err:\r\n print(err)\r\n context = {'info': '没有找到要修改的信息!'}\r\n return render(request, \"myadmin/info.html\", context)\r\n\r\n\r\n" }, { "alpha_fraction": 0.5478988885879517, "alphanum_fraction": 0.6750389337539673, "avg_line_length": 46.90550994873047, "blob_id": "be23a20e60367f36ed95a099fac33027e136d847", "content_id": "0f63ef527e0033af9570deb21eba074586971b5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 19485, "license_type": "no_license", "max_line_length": 759, "num_lines": 381, "path": "/shopdb.sql", "repo_name": "Zhangjifa/Django_mall", "src_encoding": "UTF-8", "text": "/*\r\nSQLyog Ultimate v12.5.0 (64 bit)\r\nMySQL - 5.7.17-log : Database - shopdb\r\n*********************************************************************\r\n*/\r\n\r\n/*!40101 SET NAMES utf8 */;\r\n\r\n/*!40101 SET SQL_MODE=''*/;\r\n\r\n/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\r\n/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\r\n/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;\r\n/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\r\nCREATE DATABASE /*!32312 IF NOT EXISTS*/`shopdb` /*!40100 DEFAULT CHARACTER SET utf8 */;\r\n\r\nUSE `shopdb`;\r\n\r\n/*Table structure for table `auth_group` */\r\n\r\nDROP TABLE IF EXISTS `auth_group`;\r\n\r\nCREATE TABLE `auth_group` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `name` varchar(80) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `name` (`name`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `auth_group` */\r\n\r\n/*Table structure for table `auth_group_permissions` */\r\n\r\nDROP TABLE IF EXISTS `auth_group_permissions`;\r\n\r\nCREATE TABLE `auth_group_permissions` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `group_id` int(11) NOT NULL,\r\n `permission_id` int(11) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `auth_group_permissions_group_id_permission_id_0cd325b0_uniq` (`group_id`,`permission_id`),\r\n KEY `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` (`permission_id`),\r\n CONSTRAINT `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`),\r\n CONSTRAINT `auth_group_permissions_group_id_b120cbf9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `auth_group_permissions` */\r\n\r\n/*Table structure for table `auth_permission` */\r\n\r\nDROP TABLE IF EXISTS `auth_permission`;\r\n\r\nCREATE TABLE `auth_permission` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `name` varchar(255) NOT NULL,\r\n `content_type_id` int(11) NOT NULL,\r\n `codename` varchar(100) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `auth_permission_content_type_id_codename_01ab375a_uniq` (`content_type_id`,`codename`),\r\n CONSTRAINT `auth_permission_content_type_id_2f476e4b_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`)\r\n) ENGINE=InnoDB AUTO_INCREMENT=19 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `auth_permission` */\r\n\r\ninsert into `auth_permission`(`id`,`name`,`content_type_id`,`codename`) values \r\n(1,'Can add log entry',1,'add_logentry'),\r\n(2,'Can change log entry',1,'change_logentry'),\r\n(3,'Can delete log entry',1,'delete_logentry'),\r\n(4,'Can add permission',2,'add_permission'),\r\n(5,'Can change permission',2,'change_permission'),\r\n(6,'Can delete permission',2,'delete_permission'),\r\n(7,'Can add group',3,'add_group'),\r\n(8,'Can change group',3,'change_group'),\r\n(9,'Can delete group',3,'delete_group'),\r\n(10,'Can add user',4,'add_user'),\r\n(11,'Can change user',4,'change_user'),\r\n(12,'Can delete user',4,'delete_user'),\r\n(13,'Can add content type',5,'add_contenttype'),\r\n(14,'Can change content type',5,'change_contenttype'),\r\n(15,'Can delete content type',5,'delete_contenttype'),\r\n(16,'Can add session',6,'add_session'),\r\n(17,'Can change session',6,'change_session'),\r\n(18,'Can delete session',6,'delete_session');\r\n\r\n/*Table structure for table `auth_user` */\r\n\r\nDROP TABLE IF EXISTS `auth_user`;\r\n\r\nCREATE TABLE `auth_user` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `password` varchar(128) NOT NULL,\r\n `last_login` datetime(6) DEFAULT NULL,\r\n `is_superuser` tinyint(1) NOT NULL,\r\n `username` varchar(150) NOT NULL,\r\n `first_name` varchar(30) NOT NULL,\r\n `last_name` varchar(30) NOT NULL,\r\n `email` varchar(254) NOT NULL,\r\n `is_staff` tinyint(1) NOT NULL,\r\n `is_active` tinyint(1) NOT NULL,\r\n `date_joined` datetime(6) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `username` (`username`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `auth_user` */\r\n\r\n/*Table structure for table `auth_user_groups` */\r\n\r\nDROP TABLE IF EXISTS `auth_user_groups`;\r\n\r\nCREATE TABLE `auth_user_groups` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `user_id` int(11) NOT NULL,\r\n `group_id` int(11) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `auth_user_groups_user_id_group_id_94350c0c_uniq` (`user_id`,`group_id`),\r\n KEY `auth_user_groups_group_id_97559544_fk_auth_group_id` (`group_id`),\r\n CONSTRAINT `auth_user_groups_group_id_97559544_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`),\r\n CONSTRAINT `auth_user_groups_user_id_6a12ed8b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `auth_user_groups` */\r\n\r\n/*Table structure for table `auth_user_user_permissions` */\r\n\r\nDROP TABLE IF EXISTS `auth_user_user_permissions`;\r\n\r\nCREATE TABLE `auth_user_user_permissions` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `user_id` int(11) NOT NULL,\r\n `permission_id` int(11) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `auth_user_user_permissions_user_id_permission_id_14a6b632_uniq` (`user_id`,`permission_id`),\r\n KEY `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` (`permission_id`),\r\n CONSTRAINT `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`),\r\n CONSTRAINT `auth_user_user_permissions_user_id_a95ead1b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `auth_user_user_permissions` */\r\n\r\n/*Table structure for table `detail` */\r\n\r\nDROP TABLE IF EXISTS `detail`;\r\n\r\nCREATE TABLE `detail` (\r\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\r\n `orderid` int(11) unsigned DEFAULT NULL,\r\n `goodsid` int(11) unsigned DEFAULT NULL,\r\n `name` varchar(32) DEFAULT NULL,\r\n `price` double(6,2) DEFAULT NULL,\r\n `num` int(11) unsigned DEFAULT NULL,\r\n PRIMARY KEY (`id`)\r\n) ENGINE=MyISAM AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `detail` */\r\n\r\ninsert into `detail`(`id`,`orderid`,`goodsid`,`name`,`price`,`num`) values \r\n(2,5,7,'ThinkPad_P52s',9999.00,1),\r\n(3,6,10,'iPhone XS Max',9599.00,1),\r\n(4,7,12,'恒源祥秋冬季夹克衫',298.00,2),\r\n(5,8,8,'ROG 冰刃',9999.99,1),\r\n(6,9,15,'Python编程 从入门到实践',72.70,3);\r\n\r\n/*Table structure for table `django_admin_log` */\r\n\r\nDROP TABLE IF EXISTS `django_admin_log`;\r\n\r\nCREATE TABLE `django_admin_log` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `action_time` datetime(6) NOT NULL,\r\n `object_id` longtext,\r\n `object_repr` varchar(200) NOT NULL,\r\n `action_flag` smallint(5) unsigned NOT NULL,\r\n `change_message` longtext NOT NULL,\r\n `content_type_id` int(11) DEFAULT NULL,\r\n `user_id` int(11) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n KEY `django_admin_log_content_type_id_c4bce8eb_fk_django_co` (`content_type_id`),\r\n KEY `django_admin_log_user_id_c564eba6_fk` (`user_id`),\r\n CONSTRAINT `django_admin_log_content_type_id_c4bce8eb_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`),\r\n CONSTRAINT `django_admin_log_user_id_c564eba6_fk` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `django_admin_log` */\r\n\r\n/*Table structure for table `django_content_type` */\r\n\r\nDROP TABLE IF EXISTS `django_content_type`;\r\n\r\nCREATE TABLE `django_content_type` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `app_label` varchar(100) NOT NULL,\r\n `model` varchar(100) NOT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `django_content_type_app_label_model_76bd3d3b_uniq` (`app_label`,`model`)\r\n) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `django_content_type` */\r\n\r\ninsert into `django_content_type`(`id`,`app_label`,`model`) values \r\n(1,'admin','logentry'),\r\n(3,'auth','group'),\r\n(2,'auth','permission'),\r\n(4,'auth','user'),\r\n(5,'contenttypes','contenttype'),\r\n(6,'sessions','session');\r\n\r\n/*Table structure for table `django_migrations` */\r\n\r\nDROP TABLE IF EXISTS `django_migrations`;\r\n\r\nCREATE TABLE `django_migrations` (\r\n `id` int(11) NOT NULL AUTO_INCREMENT,\r\n `app` varchar(255) NOT NULL,\r\n `name` varchar(255) NOT NULL,\r\n `applied` datetime(6) NOT NULL,\r\n PRIMARY KEY (`id`)\r\n) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `django_migrations` */\r\n\r\ninsert into `django_migrations`(`id`,`app`,`name`,`applied`) values \r\n(1,'contenttypes','0001_initial','2018-11-18 14:04:18.744053'),\r\n(2,'auth','0001_initial','2018-11-18 14:04:25.088783'),\r\n(3,'admin','0001_initial','2018-11-18 14:04:26.491863'),\r\n(4,'admin','0002_logentry_remove_auto_add','2018-11-18 14:04:26.563684'),\r\n(5,'contenttypes','0002_remove_content_type_name','2018-11-18 14:04:27.487171'),\r\n(6,'auth','0002_alter_permission_name_max_length','2018-11-18 14:04:28.077689'),\r\n(7,'auth','0003_alter_user_email_max_length','2018-11-18 14:04:28.560415'),\r\n(8,'auth','0004_alter_user_username_opts','2018-11-18 14:04:28.627238'),\r\n(9,'auth','0005_alter_user_last_login_null','2018-11-18 14:04:29.232604'),\r\n(10,'auth','0006_require_contenttypes_0002','2018-11-18 14:04:29.276451'),\r\n(11,'auth','0007_alter_validators_add_error_messages','2018-11-18 14:04:29.327314'),\r\n(12,'auth','0008_alter_user_username_max_length','2018-11-18 14:04:30.355174'),\r\n(13,'sessions','0001_initial','2018-11-18 14:04:30.859427');\r\n\r\n/*Table structure for table `django_session` */\r\n\r\nDROP TABLE IF EXISTS `django_session`;\r\n\r\nCREATE TABLE `django_session` (\r\n `session_key` varchar(40) NOT NULL,\r\n `session_data` longtext NOT NULL,\r\n `expire_date` datetime(6) NOT NULL,\r\n PRIMARY KEY (`session_key`),\r\n KEY `django_session_expire_date_a5c62663` (`expire_date`)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `django_session` */\r\n\r\ninsert into `django_session`(`session_key`,`session_data`,`expire_date`) values \r\n('4l19z3uyvoql3l7e2kmp7lnk8y17f0oi','Y2UyZGNjMDE4M2QzMzAwNjJiOGFhYWQwNTUxZTgyNDQ0NGIzYjlhZTp7InZlcmlmeWNvZGUiOiJFWEQ5IiwidmlwdXNlciI6eyJpZCI6MTcsInVzZXJuYW1lIjoiempmX2hlYXJ0IiwibmFtZSI6Ilx1NWYyMFx1N2VlN1x1NmNkNSIsInBhc3N3b3JkIjoiMjAyY2I5NjJhYzU5MDc1Yjk2NGIwNzE1MmQyMzRiNzAiLCJjb2RlIjoiNDAwMDEwIiwic2V4IjowLCJhZGRyZXNzIjoiXHU2ZTI5XHU2Y2M5XHU5NTQ3XHU0ZTJkXHU1MTczXHU2NzUxXHU1MjFiXHU1YmEyXHU1YzBmXHU5NTQ3MTNcdTUzZjdcdTY5N2MyXHU1MzU1XHU1MTQzOTA0IiwicGhvbmUiOiIxNzYwMDIyMDIwNSIsImVtYWlsIjoiODc3NjIyNTgxQHFxLmNvbSIsInN0YXRlIjoxfSwic2hvcGxpc3QiOnsiOCI6eyJpZCI6OCwidHlwZWlkIjo3LCJnb29kcyI6IlJPRyBcdTUxYjBcdTUyMDMiLCJjb21wYW55IjoiXHU1MzRlXHU3ODU1IiwiY29udGVudCI6IjxoMiBzdHlsZT1cIm1hcmdpbjogMHB4OyBmb250LWZhbWlseTogJnF1b3Q7TWljcm9zb2Z0IFlhSGVpJnF1b3Q7LCBTVEhlaXRpLCBzYW5zLXNlcmlmOyBmb250LXdlaWdodDogbm9ybWFsOyBwYWRkaW5nOiAwcHg7IC13ZWJraXQtdGFwLWhpZ2hsaWdodC1jb2xvcjogcmdiYSgwLCAwLCAwLCAwKTsgb3V0bGluZTogbm9uZSAwcHg7IGZvbnQtc2l6ZTogMjRweDsgY29sb3I6IHJnYigzOCwgMzgsIDM4KTsgbGluZS1oZWlnaHQ6IDM2cHg7IHdoaXRlLXNwYWNlOiBub3JtYWw7IGJhY2tncm91bmQtY29sb3I6IHJnYigyNTUsIDI1NSwgMjU1KTtcIj5ST0dcdTUxYjBcdTUyMDMgMTUuNlx1ODJmMVx1NWJmOCAxMjBIeiAyNW1zIFx1OTYzMlx1NzBhYlx1NTE0OVx1OTZmZVx1OTc2Mlx1NWM0Zlx1NmUzOFx1NjIwZlx1N2IxNFx1OGJiMFx1NjcyY1x1NzUzNVx1ODExMTwvaDI+PHA+PGJyLz48L3A+IiwicHJpY2UiOjI2OTk5Ljk5LCJwaWNuYW1lIjoiMTU0MzQ3ODQ3Ni44MzI0NDE4LmpwZyIsInN0b3JlIjoxNjcwLCJudW0iOjQsImNsaWNrIjozLCJzdGF0ZSI6MX19LCJhZG1pbnVzZXIiOiJhZG1pbiIsIl9zZXNzaW9uX2V4cGlyeSI6MH0=','2018-12-18 10:09:39.951287'),\r\n('dotm4uo2bv0a48gbp40dsubv4eq9le1y','OTY0MjU5MjYwODgwZDVjYmU5ZTU3NzhjNjIwZDY2YjgwYTZmMmZjNjp7InZlcmlmeWNvZGUiOiIwVEc1Iiwic2hvcGxpc3QiOnt9LCJfc2Vzc2lvbl9leHBpcnkiOjAsImFkbWludXNlciI6ImFkbWluIiwidmlwdXNlciI6eyJpZCI6MTcsInVzZXJuYW1lIjoiempmX2hlYXJ0IiwibmFtZSI6Ilx1NWYyMFx1N2VlN1x1NmNkNSIsInBhc3N3b3JkIjoiMjAyY2I5NjJhYzU5MDc1Yjk2NGIwNzE1MmQyMzRiNzAiLCJjb2RlIjoiNDAwMDEwIiwic2V4IjoxLCJhZGRyZXNzIjoiXHU2ZTI5XHU2Y2M5XHU5NTQ3XHU0ZTJkXHU1MTczXHU2NzUxXHU1MjFiXHU1YmEyXHU1YzBmXHU5NTQ3MTNcdTUzZjdcdTY5N2MyXHU1MzU1XHU1MTQzOTA0IiwicGhvbmUiOiIxNzYwMDIyMDIwNSIsImVtYWlsIjoiODc3NjIyNTgxQHFxLmNvbSIsInN0YXRlIjoxfX0=','2018-12-17 16:09:16.356480'),\r\n('yt2liv5qvqd5bl5behejw02wwx71q58g','ZThiOTRiZTFlMGM0ZjJiNzQxMWE4YWQ2OWE5NjE3MWRiYWZlNGUzMTp7InZlcmlmeWNvZGUiOiIxN09NIiwiYWRtaW51c2VyIjoiYWRtaW4iLCJfc2Vzc2lvbl9leHBpcnkiOjAsInZpcHVzZXIiOnsiaWQiOjE3LCJ1c2VybmFtZSI6InpqZl9oZWFydCIsIm5hbWUiOiJcdTVmMjBcdTdlZTdcdTZjZDUiLCJwYXNzd29yZCI6IjgxZGM5YmRiNTJkMDRkYzIwMDM2ZGJkODMxM2VkMDU1IiwiY29kZSI6IjQwMDAxMCIsInNleCI6MSwiYWRkcmVzcyI6Ilx1NmUyOVx1NmNjOVx1OTU0N1x1NGUyZFx1NTE3M1x1Njc1MVx1NTIxYlx1NWJhMlx1NWMwZlx1OTU0NzEzXHU1M2Y3XHU2OTdjMlx1NTM1NVx1NTE0MzkwNCIsInBob25lIjoiMTc2MDAyMjAyMDUiLCJlbWFpbCI6Ijg3NzYyMjU4MUBxcS5jb20iLCJzdGF0ZSI6MX0sInNob3BsaXN0Ijp7fX0=','2018-12-18 21:07:29.941065');\r\n\r\n/*Table structure for table `goods` */\r\n\r\nDROP TABLE IF EXISTS `goods`;\r\n\r\nCREATE TABLE `goods` (\r\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\r\n `typeid` int(11) unsigned NOT NULL,\r\n `goods` varchar(32) NOT NULL,\r\n `company` varchar(50) DEFAULT NULL,\r\n `content` text,\r\n `price` double(8,2) unsigned NOT NULL,\r\n `picname` varchar(255) DEFAULT NULL,\r\n `store` int(11) unsigned NOT NULL DEFAULT '0',\r\n `num` int(11) unsigned NOT NULL DEFAULT '0',\r\n `clicknum` int(11) unsigned NOT NULL DEFAULT '0',\r\n `state` tinyint(1) unsigned NOT NULL DEFAULT '1',\r\n `addtime` datetime DEFAULT NULL,\r\n PRIMARY KEY (`id`),\r\n KEY `typeid` (`typeid`)\r\n) ENGINE=MyISAM AUTO_INCREMENT=17 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `goods` */\r\n\r\ninsert into `goods`(`id`,`typeid`,`goods`,`company`,`content`,`price`,`picname`,`store`,`num`,`clicknum`,`state`,`addtime`) values \r\n(7,7,'ThinkPad_P52s','Lenovo','<pre style=\\\"box-sizing: border-box; -webkit-tap-highlight-color: transparent; text-size-adjust: none; -webkit-font-smoothing: antialiased; overflow: auto; font-family: Consolas, &quot;Liberation Mono&quot;, Menlo, Courier, monospace; font-size: 14px; padding: 0.85em 1em; margin-top: 0px; margin-bottom: 1.275em; line-height: 1.42857; word-break: break-all; overflow-wrap: normal; color: rgb(51, 51, 51); background-color: rgb(247, 247, 247); border: none; border-radius: 4px; white-space: pre-wrap; break-inside: avoid; direction: ltr; letter-spacing: 0.2px;\\\">联想ThinkPad&nbsp;P52s&nbsp;20LBA007CD&nbsp;&nbsp;15.6英寸轻薄固态笔记本电脑&nbsp;独显商务移动图形工作站</pre><p><br/></p>',9999.00,'1543474873.452332.jpg',250,0,8,1,'2018-11-29 07:01:14'),\r\n(10,5,'iPhone XS Max','APPLE','<h1 data-spm=\\\"1000983\\\" data-spm-max-idx=\\\"1\\\" data-spm-anchor-id=\\\"a220o.1000855.0.1000983.22ce37dc9vj6cK\\\" style=\\\"margin: 0px; padding: 0px 0px 0.2em; font-size: 16px; font-family: &quot;microsoft yahei&quot;; line-height: 1; white-space: normal; background-color: rgb(255, 255, 255);\\\"><a target=\\\"_blank\\\" href=\\\"https://detail.tmall.com/item.htm?spm=a220o.1000855.1000983.1.22ce37dc9vj6cK&id=577084336527&standard=1\\\" data-spm-anchor-id=\\\"a220o.1000855.1000983.1\\\" style=\\\"margin: 0px; padding: 0px; text-decoration-line: none; color: rgb(0, 0, 0); outline: 0px; vertical-align: middle;\\\">Apple/苹果 iPhone XS Max</a></h1><p><br/></p>',9599.00,'1543479468.7035391.jpg',6666,0,2,1,'2018-11-29 08:17:49'),\r\n(8,7,'ROG 冰刃','华硕','<h2 style=\\\"margin: 0px; font-family: &quot;Microsoft YaHei&quot;, STHeiti, sans-serif; font-weight: normal; padding: 0px; -webkit-tap-highlight-color: rgba(0, 0, 0, 0); outline: none 0px; font-size: 24px; color: rgb(38, 38, 38); line-height: 36px; white-space: normal; background-color: rgb(255, 255, 255);\\\">ROG冰刃 15.6英寸 120Hz 25ms 防炫光雾面屏游戏笔记本电脑</h2><p><br/></p>',26999.99,'1543478476.8324418.jpg',1670,1,7,1,'2018-11-29 08:01:17'),\r\n(9,6,'iPad Pro 2018','APPLE','<p>iPad Pro 2018全新全面屏平板电脑</p>',6499.00,'1543479066.2592497.jpg',10000,0,2,1,'2018-11-29 08:11:07'),\r\n(11,2,'雅鹿连帽羽绒服','雅鹿','<h1 data-spm=\\\"1000983\\\" data-spm-anchor-id=\\\"a220o.1000855.0.1000983.47615270XEadUY\\\" style=\\\"margin: 0px; padding: 0px 0px 0.2em; font-size: 16px; font-family: &quot;microsoft yahei&quot;; line-height: 1; white-space: normal; background-color: rgb(255, 255, 255);\\\">雅鹿反季男士加厚羽绒服狐狸毛领中长款中年爸爸冬季新款连帽外套</h1><p><br/></p>',699.00,'1543479610.4562628.jpg',450,0,8,1,'2018-11-29 08:20:11'),\r\n(12,2,'恒源祥秋冬季夹克衫','恒源祥','<h1 data-spm=\\\"1000983\\\" data-spm-anchor-id=\\\"a220o.1000855.0.1000983.4d9c494e29GQA4\\\" style=\\\"margin: 0px; padding: 0px 0px 0.2em; font-size: 16px; font-family: &quot;microsoft yahei&quot;; line-height: 1; white-space: normal; background-color: rgb(255, 255, 255);\\\">恒源祥秋冬季男士夹克衫中年立领加绒加厚休闲保暖棉衣外套爸爸装</h1><p><br/></p>',298.00,'1543479763.35928.jpg',700,0,2,1,'2018-11-29 08:22:44'),\r\n(13,2,'恒源祥羊毛衫女','恒源祥','<h1 data-spm=\\\"1000983\\\" data-spm-anchor-id=\\\"a220o.1000855.0.1000983.12246238D8Zugo\\\" style=\\\"margin: 0px; padding: 0px 0px 0.2em; font-size: 16px; font-family: &quot;microsoft yahei&quot;; line-height: 1; white-space: normal; background-color: rgb(255, 255, 255);\\\">恒源祥羊毛衫女半高领秋冬纯羊毛套头针织衫纯色毛衣女休闲打底衫</h1><p><br/></p>',298.00,'1543479985.677891.jpg',777,0,9,1,'2018-11-29 08:26:26'),\r\n(15,17,'Python编程 从入门到实践','人民邮电出版社','<p><span style=\\\"color: rgb(227, 57, 60); font-family: arial, &quot;microsoft yahei&quot;; font-size: 12px; background-color: rgb(255, 255, 255);\\\"><span style=\\\"color: rgb(227, 57, 60); font-family: arial, &quot;microsoft yahei&quot;; font-size: 12px; background-color: rgb(255, 255, 255);\\\">Python3.5编程入门图书</span></span></p>',72.70,'1543925196.9734619.jpg',990,3,1,1,'2018-12-04 20:06:37');\r\n\r\n/*Table structure for table `orders` */\r\n\r\nDROP TABLE IF EXISTS `orders`;\r\n\r\nCREATE TABLE `orders` (\r\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\r\n `uid` int(11) unsigned DEFAULT NULL,\r\n `linkman` varchar(32) DEFAULT NULL,\r\n `address` varchar(255) DEFAULT NULL,\r\n `code` char(6) DEFAULT NULL,\r\n `phone` varchar(16) DEFAULT NULL,\r\n `addtime` datetime DEFAULT NULL,\r\n `total` double(8,2) unsigned DEFAULT NULL,\r\n `state` tinyint(1) unsigned DEFAULT NULL,\r\n PRIMARY KEY (`id`)\r\n) ENGINE=MyISAM AUTO_INCREMENT=10 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `orders` */\r\n\r\ninsert into `orders`(`id`,`uid`,`linkman`,`address`,`code`,`phone`,`addtime`,`total`,`state`) values \r\n(7,17,'张继法','温泉镇中关村创客小镇13号楼2单元904','400010','17600220205','2018-12-03 16:04:38',596.00,0),\r\n(6,17,'张继法','温泉镇中关村创客小镇13号楼2单元904','400010','17600220205','2018-12-03 16:00:56',9599.00,0),\r\n(5,1,'管理员','北京市朝阳区大山子007号','100086','13566686868','2018-12-02 16:20:06',9999.00,3),\r\n(8,17,'张继法','温泉镇中关村创客小镇13号楼2单元904','400010','17600220205','2018-12-03 16:09:16',26999.99,0),\r\n(9,17,'张继法','温泉镇中关村创客小镇13号楼2单元904','400010','17600220205','2018-12-04 21:07:29',218.10,3);\r\n\r\n/*Table structure for table `type` */\r\n\r\nDROP TABLE IF EXISTS `type`;\r\n\r\nCREATE TABLE `type` (\r\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\r\n `name` varchar(32) DEFAULT NULL,\r\n `pid` int(11) unsigned DEFAULT '0',\r\n `path` varchar(255) DEFAULT NULL,\r\n PRIMARY KEY (`id`)\r\n) ENGINE=MyISAM AUTO_INCREMENT=18 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `type` */\r\n\r\ninsert into `type`(`id`,`name`,`pid`,`path`) values \r\n(1,'服装',0,'0,'),\r\n(2,'男装',1,'0,1,'),\r\n(3,'女装',1,'0,1,'),\r\n(4,'数码电子',0,'0,'),\r\n(5,'智能手机',4,'0,4,'),\r\n(6,'便携平板',4,'0,4,'),\r\n(7,'笔记本电脑',4,'0,4,'),\r\n(8,'台式机',4,'0,4,'),\r\n(9,'生活家电',0,'0,'),\r\n(10,'平板电视',9,'0,9,'),\r\n(11,'空调',9,'0,9,'),\r\n(12,'冰箱',9,'0,9,'),\r\n(13,'洗衣机',9,'0,9,'),\r\n(16,'学习书籍',0,'0,'),\r\n(17,'IT编程',16,'0,16,');\r\n\r\n/*Table structure for table `users` */\r\n\r\nDROP TABLE IF EXISTS `users`;\r\n\r\nCREATE TABLE `users` (\r\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\r\n `username` varchar(32) NOT NULL,\r\n `name` varchar(16) DEFAULT NULL,\r\n `password` char(32) NOT NULL,\r\n `sex` tinyint(1) unsigned NOT NULL DEFAULT '1',\r\n `address` varchar(255) DEFAULT NULL,\r\n `code` char(6) DEFAULT NULL,\r\n `phone` varchar(16) DEFAULT NULL,\r\n `email` varchar(50) DEFAULT NULL,\r\n `state` tinyint(1) unsigned NOT NULL DEFAULT '1',\r\n `addtime` datetime DEFAULT NULL,\r\n PRIMARY KEY (`id`),\r\n UNIQUE KEY `username` (`username`)\r\n) ENGINE=MyISAM AUTO_INCREMENT=20 DEFAULT CHARSET=utf8;\r\n\r\n/*Data for the table `users` */\r\n\r\ninsert into `users`(`id`,`username`,`name`,`password`,`sex`,`address`,`code`,`phone`,`email`,`state`,`addtime`) values \r\n(1,'admin','admin','21232f297a57a5a743894a0e4a801fc3',0,'北京市朝阳区大山子007号','100086','13566686868','[email protected]',0,'2018-04-07 21:20:08'),\r\n(5,'zjf_0205','zhangsan','96e79218965eb72c92a549dd5a330112',1,'温泉镇中关村创客小镇13号楼2单元904','400010','17093570255','[email protected]',1,'2018-11-18 10:56:38'),\r\n(6,'testUser','lisi','e10adc3949ba59abbe56e057f20f883e',0,'111','1111','99999999999','11111',1,'2018-11-18 10:57:12'),\r\n(7,'X90zHn','wangwu','81dc9bdb52d04dc20036dbd8313ed055',1,'温泉镇中关村创客小镇13号楼2单元904','400010','11111111111','[email protected]',1,'2018-11-18 10:57:46'),\r\n(8,'[email protected]','ZhangJif','202cb962ac59075b964b07152d234b70',0,'温泉镇中关村创客13号楼2单元904','4000','1760022','877622581@q',1,'2018-11-20 13:00:57'),\r\n(12,'12345','么玛西亚','202cb962ac59075b964b07152d234b70',0,'北京市朝阳区霄云路48号','1111','99999999999','[email protected]',1,'2018-11-21 11:23:35'),\r\n(19,'zhanghui','','202cb962ac59075b964b07152d234b70',1,'','','','',1,'2018-12-04 21:04:56'),\r\n(17,'zjf_heart','张继法','81dc9bdb52d04dc20036dbd8313ed055',1,'温泉镇中关村创客小镇13号楼2单元904','400010','17600220205','[email protected]',1,'2018-12-03 15:45:31');\r\n\r\n/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\r\n/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\r\n/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\r\n/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\r\n" } ]
7
benhe119/PyPDNS
https://github.com/benhe119/PyPDNS
20c68d5ce01b8d5a2a1a93eef8726905db2db03a
f6119e3b399b8fc570f3ec3457ca9714f894be7b
cc0bce08b5cdcd4eb431a2b9a33f45f9f2056ead
refs/heads/master
2020-07-05T15:39:33.056793
2019-05-03T12:14:28
2019-05-03T12:14:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8474576473236084, "alphanum_fraction": 0.8474576473236084, "avg_line_length": 58, "blob_id": "5b758c7556fefe57c97f877c34fa52c98737da5e", "content_id": "8474e7f2b45daff3f1a3076894cd51ff7a6a7085", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "permissive", "max_line_length": 93, "num_lines": 2, "path": "/pypdns/__init__.py", "repo_name": "benhe119/PyPDNS", "src_encoding": "UTF-8", "text": "from .api import PyPDNS\nfrom .errors import PDNSError, RateLimitError, UnauthorizedError, ForbiddenError, ServerError\n" } ]
1
llhbll/basic_source
https://github.com/llhbll/basic_source
ae4b1c077bf01b17135f9c93b1a1e8f1e113d4b7
92dff504834641f98e6ae3c740161841db35e2a4
a345773fd8f5930a39696c33ad768969f371fcb5
refs/heads/master
2021-05-19T05:27:37.129476
2020-11-10T01:42:34
2020-11-10T01:42:34
251,547,716
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6450963020324707, "alphanum_fraction": 0.6612181067466736, "avg_line_length": 34.11811065673828, "blob_id": "eae3ed84f99c95ff90d63e7ab245f5fda8cd0fd9", "content_id": "884192beb36fe8efa4769ace6c55ce6be1cec53a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4900, "license_type": "no_license", "max_line_length": 149, "num_lines": 127, "path": "/joonggonara_crawling.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport requests, re\nfrom bs4 import BeautifulSoup\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException\nimport time\nfrom openpyxl import Workbook, load_workbook\nimport pyperclip\nfrom selenium.webdriver.common.keys import Keys\nimport sys\n\nwb = Workbook()\nsheet1 = wb.active\nsheet1.title = '중고나라 상품' #시트명\nsheet1.cell(row=1, column=2).value = '제목'\nsheet1.cell(row=1, column=3).value = '작성자'\nsheet1.cell(row=1, column=4).value = '날짜'\nsheet1.cell(row=1, column=5).value = '가격'\n\ndef uf_get_idpw(siteName):\n f = open(\"./pw.txt\", 'r') # 실행할 폴더(바탕화면이면 바탕화면에)에 pw.txt file이 있어야 함.\n for data in f.readlines():\n site = data.split(':')[0]\n id = data.split(':')[1]\n pw = data.split(':')[2].strip('\\n')\n pw = 'lch' + pw #pw file이 노출되었을때를 대비하여 앞 3글자는 여기서\n if site == siteName:\n return id, pw\nkeyword = input(\"검색 키워드 입력 : \")\npage_cnt = input(\"검색할 page수 : \")\n#\nif getattr(sys, 'frozen', False):\n chromedriver_path = os.path.join(sys._MEIPASS, \"chromedriver.exe\")\n driver = webdriver.Chrome(chromedriver_path)\n driver2 = webdriver.Chrome(chromedriver_path)\nelse:\n driver = webdriver.Chrome()\n driver2 = webdriver.Chrome()\n\n# driver = webdriver.Chrome('./chromedriver.exe')\n# driver2 = webdriver.Chrome('./chromedriver.exe')\n#\n#\n# 자동입력 방지를 뚫기위해 pyperclip을 사용 로긴\nuser_id, user_pw = uf_get_idpw('naver')\nnaverlogin_url = 'https://nid.naver.com/nidlogin.login?mode=form&url=https%3A%2F%2Fwww.naver.com'\ndriver.get(naverlogin_url)\ntime.sleep(1)\nnaver_id_input = driver.find_element_by_css_selector('#id')\nnaver_id_input.clear()\ntime.sleep(1)\n\nnaver_id_input.click()\npyperclip.copy(user_id)\nnaver_id_input.send_keys(Keys.CONTROL, 'v')\ntime.sleep(1)\n\nnaver_pw_input = driver.find_element_by_css_selector('#pw')\nnaver_pw_input.click()\npyperclip.copy(user_pw)\nnaver_pw_input.send_keys(Keys.CONTROL, 'v')\ntime.sleep(1)\n\nsearch_button = driver.find_element_by_css_selector(\"#log\\.login\")\nsearch_button.click()\ntime.sleep(3)\n\njoonggonara_url = 'https://cafe.naver.com/joonggonara'\n# joonggonara_url = 'https://cafe.naver.com/joonggonara.cafe?iframe_url=/ArticleList.nhn%3Fsearch.clubid=10050146%26search.boardtype=L%26viewType=pc'\n\ndriver.get(joonggonara_url)\n\nsearch_input = driver.find_element_by_css_selector('input#topLayerQueryInput')\n# search_input.send_keys('갤럭시 탭')\nsearch_input.send_keys(keyword)\nsearch_button = driver.find_element_by_css_selector(\"form[name='frmBoardSearch'] > button\")\nsearch_button.click()\n\ntime.sleep(1)\ndriver.switch_to.frame(\"cafe_main\")\n\nexcept_str = [ \"중나협력\", \"삽니다\", \"매입\", \"구입\", \"사요\", \"구매\" ]\nrow = 2\n\nfor page in range(1, int(page_cnt) + 1):\n list = driver.find_elements_by_css_selector('#main-area > div:nth-child(7) > table > tbody > tr')\n for item in list:\n title = item.find_element_by_css_selector('a.article').text.strip()\n # title = re.sub('[^0-9a-zA-Zㄱ-힗]', '', title)\n if any(format in title for format in except_str):\n continue\n strip_title = str(title).replace(' ', '')\n aa = title.find(keyword)\n if strip_title.find(keyword.replace(' ', '')) == -1: #양쪽다 공백없이 키워드 비교\n continue\n writer = item.find_element_by_css_selector('a.m-tcol-c').text.strip()\n ddate = item.find_element_by_css_selector('td.td_date').text.strip()\n link = item.find_element_by_css_selector('a.article').get_attribute('href')\n\n time.sleep(2)\n driver2.get(link) #로긴하고 상세페이지 들어가면 모두 접근가능할 줄 알았는데 중고나라회원에 허용한 page는 접근이 안됨! 추후 방법을 찾아야 함. 현재 실력으로는 불가\n time.sleep(2) # 리턴data에 noindex, nofollow 그래서 안되는것으로 파악... 그래도 우회하는 방법이 없을려나???\n\n try:\n driver2.switch_to.frame(\"cafe_main\")\n cost = driver2.find_element_by_css_selector('span.cost').text\n except NoSuchElementException:\n cost = 'X'\n\n sheet1.cell(row=row, column=2).value = title\n sheet1.cell(row=row, column=2).hyperlink = link\n sheet1.cell(row=row, column=3).value = writer\n sheet1.cell(row=row, column=4).value = ddate\n sheet1.cell(row=row, column=5).value = cost\n\n row = row + 1\n\n\n if page % 10 == 0:\n driver.find_element_by_link_text('다음').click()\n else:\n next = str(page + 1)\n driver.find_element_by_link_text(next).click()\n# driver.quit()\n# driver2.quit()\nwb.save(keyword + \".xlsx\")\n\n #item.find_element_by_css_selector('a').click() #가격을 구하기 위해서\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5694444179534912, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 30.66666603088379, "blob_id": "043264ae5e509ffa6ef5f5faf8d86fff9531741b", "content_id": "f2c0a052411f23b9819ce8a30120a88b811186a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "no_license", "max_line_length": 59, "num_lines": 9, "path": "/study_source.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "import os\n\nfor dirpath, dirnames, filenames in os.walk(r\"E:\\2020학년도\"):\n\n for dirname in dirnames:\n new_dir = dirpath.replace(\"2020\", \"2021\")\n new_dirname = os.path.join(new_dir, dirname)\n os.makedirs(new_dirname)\n print (\"\\t\", dirname, \" \", new_dirname)\n\n\n\n" }, { "alpha_fraction": 0.48102468252182007, "alphanum_fraction": 0.5645161271095276, "avg_line_length": 21.382978439331055, "blob_id": "5d10866c61ea2e14aaf0dad36f951a603577ed24", "content_id": "2cc031a8e6465297d2768177b0065ce6168df122", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 65, "num_lines": 47, "path": "/first_try.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "\n# 파일 읽고 쓰기\n# import sys\n#\n# option = sys.argv[1]\n#\n# if option == '-a':\n# memo = sys.argv[2]\n# f = open('memo.txt', 'a')\n# f.write(memo)\n# f.write('\\n')\n# f.close()\n# elif option == '-v':\n# f = open('memo.txt', 'r')\n# memo = f.read()\n# f.close()\n# print(memo)\n\n# pandas Seris\n# from pandas import Series\n#\n# data = [100, 200, 300, 400]\n# date = ['2018-08-01', '2019-08-01', '2020-08-01', '2020-08-02']\n# s = Series(data, index = date)\n# print(s[[0, 2]])\n\n# pandas dataframe\n# from pandas import DataFrame\n#\n# data = {'open':[100, 200], 'high':[110, 210]}\n# df = DataFrame(data, index=['2018-01-01', '2018-01-02'])\n#\n# print(df)\n\n# import pandas as pd 데이타프레임 data추가 엑셀에 읽고 쓰기\n#\n# df = pd.read_excel(\"test.xlsx\")\n# df = df.set_index('date')\n# f_title = df[\"d_title\"] * 2\n# df[\"f_title\"] = f_title\n# df.to_excel(\"out.xlsx\")\n# print(df.iloc[2])\n\n# import pandas as pd\n# url = \"https://finance.naver.com/item/sise_day.nhn?code=066570\"\n# df = pd.read_html(url)\n# df[0] = df[0].dropna(axis=0)\n# df[0].to_excel(\"out.xlsx\")\n\n" }, { "alpha_fraction": 0.6631578803062439, "alphanum_fraction": 0.7087719440460205, "avg_line_length": 27.399999618530273, "blob_id": "452c2c6984b856acb41a8b18f4f432edaf878b07", "content_id": "48c75d2b988011fb3f298fcad4446fa759c92abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/first_file.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "import sqlite3\n\ncon = sqlite3.connect('./test.db')\ncur = con.cursor()\n#cur.execute('CREATE TABLE PhoneBook(Name text, PhoneNum text);')\ncur.execute('INSERT INTO PhoneBook VALUES(\"Lim ChanHyuk\", \"010-8443-8473\");')\ncur.execute('SELECT * FROM PhoneBook;')\ncon.commit()\n\nprint(cur.fetchall())\n\n" }, { "alpha_fraction": 0.45258477330207825, "alphanum_fraction": 0.4871583878993988, "avg_line_length": 33.70857238769531, "blob_id": "6dc308fa63a7547c7e88bea69c9a4721c2347de8", "content_id": "fffa4e79a787c262963968da4b11482fc445f8a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6606, "license_type": "no_license", "max_line_length": 122, "num_lines": 175, "path": "/lotto.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport requests\nfrom tqdm import tqdm\nimport json\nimport random\n\ndef getLottoWinInfo(minDrwNo, maxDrwNo):\n drwtNo1 = []\n drwtNo2 = []\n drwtNo3 = []\n drwtNo4 = []\n drwtNo5 = []\n drwtNo6 = []\n bnusNo = []\n totSellamnt = []\n drwNoDate = []\n firstAccumamnt = []\n firstPrzwnerCo = []\n firstWinamnt = []\n\n for i in tqdm(range(minDrwNo, maxDrwNo+1, 1)):\n req_url = \"https://www.dhlottery.co.kr/common.do?method=getLottoNumber&drwNo=\" + str(i)\n req_lotto = requests.get(req_url)\n\n# lottoNo = req_lotto.json()\n lottoNo = json.loads(req_lotto.text)\n\n drwtNo1.append(lottoNo['drwtNo1'])\n drwtNo2.append(lottoNo['drwtNo2'])\n drwtNo3.append(lottoNo['drwtNo3'])\n drwtNo4.append(lottoNo['drwtNo4'])\n drwtNo5.append(lottoNo['drwtNo5'])\n drwtNo6.append(lottoNo['drwtNo6'])\n bnusNo.append(lottoNo['bnusNo'])\n totSellamnt.append(lottoNo['totSellamnt'])\n drwNoDate.append(lottoNo['drwNoDate'])\n firstAccumamnt.append(lottoNo['firstAccumamnt'])\n firstPrzwnerCo.append(lottoNo['firstPrzwnerCo'])\n firstWinamnt.append(lottoNo['firstWinamnt'])\n\n lotto_dict = {\"추첨일\":drwNoDate, \"Num1\":drwtNo1, \"Num2\":drwtNo2, \"Num3\":drwtNo3, \"Num4\":drwtNo4,\n \"Num5\":drwtNo5, \"Num6\":drwtNo6, \"bnsNum\":bnusNo, \"총판매금액\":totSellamnt,\n \"총1등당첨금\":firstAccumamnt, \"1등당첨인원\":firstPrzwnerCo, \"1등수령액\":firstWinamnt}\n df_lotto = pd.DataFrame(lotto_dict)\n\n return df_lotto\n ##출처## [솜씨좋은장씨]\n\ndef Num_Chk(nums):\n for num in nums:\n if(int(num) <= 0) or (int(num) >= 46):\n return \"잘못된 번호를 입력\"\n return '1'\n\ndef input_my_lotte_num():\n print(\"1부터 46까지 구간 숫자를 중복없이 스페이스로 구분하여 6개 입력하세요 : (랜덤 6개 : r, 빠져나가기 : q)\")\n num_str = input()\n six_list = num_str.split()\n# aa = map(int, num_str.split()) #리스트의 문자열을 int형으로\n\n return six_list\n\ndef lotto_match(my_num, lotto_list):\n cnt_1 = cnt_2 = cnt_3 = cnt_4 = cnt_5 = cnt_6 = 0\n for lotto_num in lotto_list:\n match_num = my_num.intersection(set(lotto_num[2:8]))\n if len(match_num) >= 3:\n print(lotto_num[0], \"회차 : 추첨일자 = \", lotto_num[1:2], \" 당첨번호 = \", lotto_num[2:8], \" 보너스번호 = \",\n lotto_num[8:9], \" 1등 당첨인원 = \",lotto_num[11:12], \" 1등 수령액 = \", format(int(lotto_num[12:13][0]), ','))\n\n if len(match_num) == 6:\n print(\"-----------------1등입니다.------------------\")\n cnt_1 += 1\n elif len(match_num) == 5 and len(my_num.intersection(set(lotto_num[8:9]))) == 1:\n print(\"-----------------2등입니다.------------------\")\n cnt_2 += 1\n elif len(match_num) == 5:\n print(\"------------------3등입니다.----------------- : \", match_num)\n cnt_3 += 1\n elif len(match_num) == 4:\n print(\"----------4등입니다.----------\", match_num)\n cnt_4 += 1\n elif len(match_num) == 3:\n print(\"5등입니다.\", match_num)\n cnt_5 += 1\n else:\n # print(\"꽝입니다.\")\n cnt_6 += 1\n print(\"\\n 꽝 : \", cnt_6, \"회\", \" 5등 :\", cnt_5, \"회 \", \" 4등 :\", cnt_4, \"회\", \" 3등 :\", cnt_3, \"회\",\n \" 2등: \", cnt_2, \"회\", \" 1등 :\", cnt_1, \"회\\n\")\n if cnt_1 != 0 or cnt_2 != 0 or cnt_3 != 0:\n print(\"와우 3등이상 당첨되셨을 번호였네요!\\n\")\n\ncnt = 0\nwhile 1:\n my_num = input_my_lotte_num()\n if my_num[0][0] == 'q':\n break\n if my_num[0][0] == 'r':\n aaa = '1'\n tmp_set = set()\n while len(tmp_set) < 6:\n tmp_set.add(random.randrange(1, 46))\n my_num = list(tmp_set)\n else:\n aaa = Num_Chk(my_num)\n if aaa == '1':\n\n my_num_int = map(int, my_num)\n set_my_num = set(my_num_int)\n print(set_my_num)\n # df_aa = getLottoWinInfo(1, 919) 로또사이트의api를 이용하여 로또당첨번호 읽어와서 df로\n # df_aa.to_excel(\"lotto_list.xlsx\")\n df_aa = pd.read_excel('lotto_list.xlsx', sheet_name='Sheet1')\n aa_list = df_aa.values.tolist()\n lotto_match(set_my_num, aa_list)\n else:\n print(aaa)\n\n# 999 지난 로또 당첨 번호와 비교하여 엑셀에서 읽어와\n# import pandas as pd\n# import requests\n# from tqdm import tqdm\n# import json\n# import random\n#\n# def lotto_match(my_num, lotto_list):\n# cnt_1 = cnt_2 = cnt_3 = cnt_4 = cnt_5 = cnt_6 = 0\n# for lotto_num in lotto_list:\n# match_num = my_num.intersection(set(lotto_num[2:8]))\n# if len(match_num) >= 5:\n# print(my_num)\n# print(lotto_num[0], \"회차 : 추첨일자 = \", lotto_num[1:2], \" 당첨번호 = \", lotto_num[2:8], \" 보너스번호 = \",\n# lotto_num[8:9], \" 1등 당첨인원 = \",lotto_num[11:12], \" 1등 수령액 = \", format(int(lotto_num[12:13][0]), ','))\n#\n# if len(match_num) == 6:\n# print(\"-----------------1등입니다.------------------\")\n# cnt_1 += 1\n# return 1\n# elif len(match_num) == 5 and len(my_num.intersection(set(lotto_num[8:9]))) == 1:\n# print(\"-----------------2등입니다.------------------\")\n# cnt_2 += 1\n# elif len(match_num) == 5:\n# print(\"------------------3등입니다.----------------- : \", match_num)\n# cnt_3 += 1\n# elif len(match_num) == 4:\n# # print(\"----------4등입니다.----------\", match_num)\n# cnt_4 += 1\n# elif len(match_num) == 3:\n# # print(\"5등입니다.\", match_num)\n# cnt_5 += 1\n# else:\n# # print(\"꽝입니다.\")\n# cnt_6 += 1\n# # print(\"\\n 꽝 : \", cnt_6, \"회\", \" 5등 :\", cnt_5, \"회 \", \" 4등 :\", cnt_4, \"회\", \" 3등 :\", cnt_3, \"회\",\n# # \" 2등: \", cnt_2, \"회\", \" 1등 :\", cnt_1, \"회\\n\")\n#\n#\n# cnt = 0\n# df_aa = pd.read_excel('lotto_list.xlsx', sheet_name='Sheet1')\n# aa_list = df_aa.values.tolist()\n# while 1:\n# cnt += 1\n# tmp_set = set()\n# while len(tmp_set) < 6:\n# tmp_set.add(random.randrange(1, 46))\n# my_num = list(tmp_set)\n#\n# my_num_int = map(int, my_num)\n# set_my_num = set(my_num_int)\n#\n# aa = lotto_match(set_my_num, aa_list)\n# if aa == 1:\n# print(cnt, \" 째만에 1등\")\n# break\n" }, { "alpha_fraction": 0.5299334526062012, "alphanum_fraction": 0.5460088849067688, "avg_line_length": 27.58730125427246, "blob_id": "4316fde756b206d3e6ce4cb94710d066feda5df3", "content_id": "09cf0ff605b3bc7802c8db6d3826e7002d441448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1888, "license_type": "no_license", "max_line_length": 86, "num_lines": 63, "path": "/file_name_ctl.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "\n# 숫자로만 된 일시 file명을 보기좋은 file명으로 변경\n# from glob import glob\n# import os\n#\n# # file_list = glob(r\"C:\\Users\\pc\\Downloads\\20200712\\2020*.jpg\")\n# Base_Dir = (r\"C:\\Users\\pc\\Downloads\\source\")\n# file_list = os.listdir(Base_Dir)\n\n# for file in file_list :\n# new_name = file[0:4] + '년 ' + file[4:6] + '월 ' + file[6:8] + '일 ' + file[8:]\n# to_file = os.path.join(Base_Dir, new_name)\n# from_file = os.path.join(Base_Dir, file)\n# os.rename(from_file, to_file)\n\n# for (path, dir, files) in os.walk(r\"C:\\Users\\pc\\Downloads\\Photos\"):\n# for file in files:\n# new_name = file[0:4] + '년 ' + file[4:6] + '월 ' + file[6:8] + '일 ' + file[8:]\n# to_file = os.path.join(path, new_name)\n# from_file = os.path.join(path, file)\n# os.rename(from_file, to_file)\n\n\n# 폴더 이동\n# idx = 2 # 폴더이름\n# for file in file_list :\n# new_dir = \"C:\\\\Users\\\\pc\\\\Downloads\\\\Photos\\\\\" + str(idx)\n# idx = idx + 1\n# to_file = os.path.join(new_dir, file)\n# from_file = os.path.join(Base_Dir, file)\n# # print(from_file, to_file)\n# os.rename(from_file, to_file)\n\n# from glob import glob\n# import os\n# \n# file_list = glob(\"./data/*.xlsx\")\n# \n# for file in file_list :\n# new_name = file.replace(\"복사복\", \"복\").replace(\" \", \"\")\n# os.rename(file, new_name)\n#\n\n\n# 회귀로 file명 찾기\n# import sys\n# \n# def search(dirname):\n# try:\n# filenames = os.listdir(dirname)\n# for filename in filenames:\n# full_filename = os.path.join(dirname, filename)\n# if os.path.isdir(full_filename):\n# search(full_filename)\n# else:\n# ext = os.path.splitext(full_filename)[-1]\n# if ext == '.py':\n# print(full_filename)\n# except PermissionError:\n# pass\n# \n# search(\"c:/\")\n# \n# \n\n" }, { "alpha_fraction": 0.6117058992385864, "alphanum_fraction": 0.6288365721702576, "avg_line_length": 24.01785659790039, "blob_id": "67b59b3c46eb1aa789ce2b108d87380fee027525", "content_id": "696edff28b7b1739231f111f342b44ed643977a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1433, "license_type": "no_license", "max_line_length": 53, "num_lines": 56, "path": "/naver_webtoon1.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup as bs\nfrom pprint import pprint\nimport requests, re, os\nfrom urllib.request import urlretrieve\nfrom openpyxl.drawing.image import Image\nfrom openpyxl import Workbook\n\nwb = Workbook()\nsheet1 = wb.active\nsheet1.title = '네이버 웹툰 완결편'\n\ntry:\n if not (os.path.isdir('image')):\n os.mkdir(os.path.join('image'))\nexcept OSError as e:\n if e.errno != errno.EEXITS:\n print(\"폴더생성실패\")\n exit()\nurl = 'https://comic.naver.com/webtoon/finish.nhn'\nhtml = requests.get(url)\nsoup = bs(html.text, 'html.parser')\nall_data = soup.find('div', {'class':'list_area'})\n\ndata_list = all_data.findAll('li')\n\ncol = 1\nrow = 1\nfor data in data_list:\n img_data = data.find('img')\n img_src = img_data['src']\n a_list = data.find('a')\n title = a_list['title']\n title = re.sub('[^0-9a-zA-Zㄱ-힗]', '', title)\n link = \"https//commic.naver.com\" + a_list['href']\n strong = data.find('strong').text\n\n# urlretrieve(img_src, './image/'+title+'.gif')\n img_title = './image/'+title+'.gif'\n img_file = Image(img_title)\n print(img_title)\n img_file.anchor= 'C10'\n #cell = sheet1.cell(row=10, column=1)\n sheet1.add_image(img_file)\n# sheet1.cell()\n# col = col + 1\n row = row + 1\n break\nwb.save(\"./webtoon.xlsx\")\n #print(title, strong, link)\n'''\n\ndata2 = data1.findAll('dd')\npprint(data2)\ndata3 = data2[0].find('span')\nprint(data3.text)\n'''\n" }, { "alpha_fraction": 0.5468451380729675, "alphanum_fraction": 0.6252390146255493, "avg_line_length": 57, "blob_id": "41411ebb787e8b7b273820ec745272dd73f3e1f0", "content_id": "0b756df93e39fc15cf0ace3cb487df48f530ab4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 322, "num_lines": 9, "path": "/imsi.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "data = {}\ndata['영희'] = (80, 95, 90)\ndata['철수'] = (90, 80, 70)\ndata['혜영'] = (70, 100, 90)\nprint(data) # sorted(data.items(), key=lambda x: x[1][0], reverse=False) # sort(1, 2, 3) --> 1 번째 iteratable 파라미터가 하나씩 2번째 수식에 대입되어 돌아 간다. # key=lambda x: x[0] 은 key 로 소팅을 의미 x[1] 은 데이터를 의미 x[1][0] 은 데이터 중에 0번째 항목 print('tuple 의 1번째 항목으로 소팅') data2 = sorted(data.items(), key=lambda x: x[1][0], reverse=False) print(data2)\n\nprint('tuple 의 1번째 항목으로 소팅')\ndata2 = sorted(data.items(), key=lambda x: x[1][0], reverse=False)\nprint(data2)\n\n" }, { "alpha_fraction": 0.6040100455284119, "alphanum_fraction": 0.6127819418907166, "avg_line_length": 19.921052932739258, "blob_id": "3716f310e9e85454f0da02a1eaaf0bb7f05ced11", "content_id": "9ee7e44e41854c54a19bb0b7c1e0ee518e1a0b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 47, "num_lines": 38, "path": "/bithum.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "import pybithumb as bt\nimport datetime\n\n# tickers = bt.get_tickers()\n#\n# for i in str(1)*3:\n# price = bt.get_current_price(\"BTC\")\n# print(price)\n# time.sleep(1)\n\ndetail = bt.get_market_detail(\"BTC\")\nprint(detail)\n\n# orderbook = bt.get_orderbook(\"BTC\")\n# bids = orderbook['bids']\n# asks = orderbook['asks']\n#\n# for bid in bids:\n# price = bid['price']\n# quant = bid['quantity']\n# print(\"매수호가:\", price, \"매수잔량:\", quant)\n#\n# for ask in asks:\n# print(ask)\n# ms = int(orderbook[\"timestamp\"])\n# dt = datetime.datetime.fromtimestamp(ms/1000)\n# print(dt)\n# for k in orderbook:\n# print(k)\n#\n# all = bt.get_current_price(\"ALL\")\n#\n# for ticker, data in all.items():\n# print(ticker, data[\"closing_price\"])\n\n# btc = bt.get_ohlcv(\"BTC\")\n# close = btc['close']\n# print(close)\n\n\n\n" }, { "alpha_fraction": 0.668597936630249, "alphanum_fraction": 0.7230591177940369, "avg_line_length": 46.97222137451172, "blob_id": "426a7e7329193f1de6593771aa7be0c83c3dc91a", "content_id": "2291709da3db4d705002cec3f34095687778f387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1774, "license_type": "no_license", "max_line_length": 265, "num_lines": 36, "path": "/crawling_우회샘플_noindex_아직구현못함.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "# # 999 브라우저 띄우지 않고 data 가져오기\nimport requests # import urllib.request\nfrom bs4 import BeautifulSoup\n\n# url = 'http://cafe.daum.net/_c21_/bbs_read?grpid=2ouV&fldid=Erpk&contentval=00040zzzzzzzzzzzzzzzzzzzzzzzzz&datanum=248&page=1&prev_page=0&firstbbsdepth=&lastbbsdepth=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzz&listnum=20'\nurl = 'https://cafe.naver.com/ca-fe/ArticleRead.nhn?clubid=10050146&page=1&inCafeSearch=true&searchBy=0&query=%EA%B0%A4%EB%9F%AD%EC%8B%9C+%ED%83%AD&includeAll=&exclude=&include=&exact=&searchdate=all&media=0&sortBy=date&articleid=758683319&referrerAllArticles=true'\n# url = 'https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query=%ED%99%98%EC%9C%A8'\n\n# 출처: https://hanswsw.tistory.com/7 [Han's Dev Log]'\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}\n# webpage = requests.get(url).text # webpage = urllib.request.urlopen(url)와 같음\nwebpage = requests.get(url, headers=headers) # webpage = urllib.request.urlopen(url)와 같음\nsource = BeautifulSoup(webpage.text, 'html5lib')\nprint(source)\nreviews = source.find_all('span', {'class': 'cost'})\n\nfor review in reviews:\n print(review)\n print(review.get_text().strip())\n\n\n\nimport requests # import urllib.request\nfrom bs4 import BeautifulSoup\n\n# base_url = 'http://movie.daum.net/moviedb/grade?movieId=2725&type=netizen&page={}'\n#\n# for n in range(77):\n# url = base_url.format(n + 1)\n# webpage = requests.get(url).text # webpage = urllib.request.urlopen(url)와 같음\n# source = BeautifulSoup(webpage, 'html5lib')\n# reviews = source.find_all('p', {'class': 'desc_review'})\n#\n# for review in reviews:\n# print(review.get_text().strip())" }, { "alpha_fraction": 0.6250433325767517, "alphanum_fraction": 0.6441206932067871, "avg_line_length": 27.829999923706055, "blob_id": "990af77cec51c058b242fc9e451cb1e0c2e7fe34", "content_id": "651e2196da43f369602695a0a3bfe46e53ee3a24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3283, "license_type": "no_license", "max_line_length": 97, "num_lines": 100, "path": "/basic.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "\n# 999 한글 file 첨부하기\n# import os\n# import win32com.client as win32\n# hwp = win32.gencache.EnsureDispatch(\"HWPFrame.HwpObject\")\n# hwp.Run(\"FileNew\")\n# hwp.RegisterModule(\"FilePathCheckDLL\", \"SecurityMoudle\")\n# hwp.Open(r\"C:\\\\Users\\\\sungshin\\\\Documents\\\\성신가족 여러분께.hwp\")\n# \n# BASE_DIR = r\"C:\\\\Users\\\\sungshin\\\\Documents\"\n# 첨부파일리스트 = os.listdir(BASE_DIR)\n# \n# def addfile(path):\n# hwp.HAction.GetDefault(\"InsertFile\", hwp.HParameterSet.HInsertFile.HSet)\n# hwp.HParameterSet.HInsertFile.filename = path\n# hwp.HParameterSet.HInsertFile.KeepSection = 1\n# hwp.HParameterSet.HInsertFile.KeepCharshape = 1\n# hwp.HParameterSet.HInsertFile.KeepParashape = 1\n# hwp.HParameterSet.HInsertFile.KeepStyle = 1\n# hwp.HAction.Execute(\"InsertFile\", hwp.HParameterSet.HInsertFile.HSet)\n# hwp.HAction.Run(\"MoveTopLevelEnd\")\n# return\n# \n# hwp.MovePos(3)\n# \n# for i in 첨부파일리스트:\n# if i.endswith(\"hwp\"):\n# addfile(os.path.join(BASE_DIR, i))\n\n\n# 999 클래스 이름으로 해당html 가져오기\n# import time\n# # 크롬 브라우저 조작을 위한 모듈\n# from selenium import webdriver\n\n# # 크롤링할 url 주소\n# url = \"https://www.instagram.com/explore/tags/python/\"\n#\n# driver = webdriver.Chrome('chromedriver.exe')\n# # 암묵적으로 웹 자원을 (최대) 5초 기다리기\n# driver.implicitly_wait(5)\n# # 크롬 브라우저가 실행되며 해당 url로 이동한다.\n# driver.get(url)\n# # 총 게시물 수를 클래스 이름으로 찾기\n# totalCount = driver.find_element_by_class_name('g47SY').text\n# print(\"총 게시물:\", totalCount)\n\n#999 웹상의 요소값 가져오기 selector 특정 text 값을 쉽게\n#from selenium import webdriver\n#\n# driver = webdriver.Chrome('./chromedriver')\n#\n# driver.get('http://v.media.daum.net/v/20170202180355822')\n#\n# # 클래스가 tit_view인 h3태그\n# title = driver.find_element_by_css_selector(\"h3.tit_view\")\n# print (title.text)\n# driver.quit()\n\n# 999 웹 입력 클릭\n# from selenium import webdriver\n# import time\n#\n# driver = webdriver.Chrome('./chromedriver')\n#\n# # 파파고 접속\n# driver.get('https://papago.naver.com/')\n#\n# # 번역할 문장 입력\n# driver.find_element_by_xpath('//*[@id=\"sourceEditArea\"]').send_keys('II I love koala study')\n# # 가끔 실행오류가 나는 경우가 생기고, send key에서 첫글자는 전송이 안되는 문제 있음.\n# # 번역 버튼 클릭\n# driver.find_element_by_css_selector('#btnTranslate').click()\n\n# 999 폴더 file 핸들링\n# import os\n#\n# def search(dirname):\n# try:\n# filenames = os.listdir(dirname)\n# for filename in filenames:\n# full_file_name = os.path.join(dirname, filename)\n# if os.path.isdir(full_file_name):\n# search(full_file_name)\n# else:\n# ext = os.path.splitext(full_file_name)[-1]\n# if ext == '.py':\n# print(full_file_name)\n# except PermissionError:\n# pass\n#\n# search(\"c:/\")\n\n# 999 폴더 file 핸들링2\n# import os\n#\n# for(path, dir, filenames) in os.walk(\"c:/\"):\n# for filename in filenames:\n# ext = os.path.splitext(filename)[-1]\n# if(ext == \".py\"):\n# print(\"%s%s\" %(path, filename))" }, { "alpha_fraction": 0.5804612636566162, "alphanum_fraction": 0.5998963713645935, "avg_line_length": 29.871999740600586, "blob_id": "1cb283537b7ba0040d0917e6f8d82bebb1657c30", "content_id": "0fe6d81e3dda6be1b2e2bb6d8f6d5e8acc7afee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4069, "license_type": "no_license", "max_line_length": 95, "num_lines": 125, "path": "/image_ctl.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "# image file 줄여서 다른 폴더에 넣기\n# import os\n# from PIL import Image\n#\n# import openpyxl\n# from xlsxwriter.utility import xl_col_to_name\n# import glob\n#\n# #img_list = glob.glob(r\"C:\\Users\\sungshin\\Documents\\bbb\\*.jpg\")\n#\n# Source_Dir = r\"C:\\Users\\sungshin\\Documents\\bbb\"\n# img_list = os.listdir(Source_Dir)\n# #\n# Target_Dir = r\"C:\\Users\\sungshin\\Documents\\ccc\"\n# num = 0\n# for img_file in img_list:\n# img = Image.open(os.path.join(Source_Dir, img_file))\n# num = num + 1\n# file_name = str(num) + \".jpg\"\n# img.resize((200,200)).save(os.path.join(Target_Dir, file_name))\n#\n\n\n# 999 이미지 file 불러와 엑셀file에 집어 넣기\n# import os\n# #from PIL import Image\n# from openpyxl.drawing.image import Image\n# import openpyxl\n# from xlsxwriter.utility import xl_col_to_name\n# import glob\n#\n# #img_list = glob.glob(r\"C:\\Users\\sungshin\\Documents\\bbb\\*.jpg\")\n#\n# Source_Dir = r\"C:\\Users\\sungshin\\Documents\\bbb\"\n# # img_list = os.listdir(Source_Dir)\n# #\n# Target_Dir = r\"C:\\Users\\sungshin\\Documents\\ccc\"\n# # num = 0\n# # for img_file in img_list:\n# # img = Image.open(os.path.join(Source_Dir, img_file))\n# # num = num + 1\n# # file_name = str(num) + \".jpg\"\n# # img.resize((200,200)).save(os.path.join(Target_Dir, file_name))\n#\n# wb = openpyxl.load_workbook(r\"E:\\\\2020학년도\\\\0. 공통업무\\\\기타\\\\운캠 지하101호 자산현황.xlsx\")\n# sheet = wb.active\n#\n# row = 2\n# col = 12\n#\n# for row_data in sheet.rows:\n# val = sheet.cell(row, col).value\n# val1 = sheet.cell(row, col+1).value\n# if val is not None and sheet.cell(row, 14).value is None: # xl_col(0부터시작)....보다 +1 1부터 시작\n# file_name = str(val) + \".jpg\"\n# img_file = Image(os.path.join(Target_Dir, file_name))\n# img_file.anchor = xl_col_to_name(13) + str(row)\n# # pprint('A' + str(col))\n# sheet.add_image(img_file)\n# if val1 is not None and sheet.cell(row, 15).value is None:\n# file_name = str(val1) + \".jpg\"\n# img_file = Image(os.path.join(Target_Dir, file_name))\n# img_file.anchor = xl_col_to_name(14) + str(row)\n# # pprint('A' + str(col))\n# sheet.add_image(img_file)\n# row = row + 1\n# wb.save(r\"E:\\\\2020학년도\\\\0. 공통업무\\\\기타\\\\운캠 지하101호 자산현황.xlsx\")\n#\n#\n##999 이미지가 up 및 down시 회전오류 발생 시 잡아주기\n# from PIL import Image, ExifTags\n# import os\n#\n# for (path, dir, files) in os.walk(r\"C:\\Users\\sungshin\\Documents\\aaaa\"):\n# for file in files:\n# filename = os.path.join(path, file)\n# try:\n# image = Image.open(filename)\n# for orientation in ExifTags.TAGS.keys():\n# if ExifTags.TAGS[orientation] == 'Orientation':\n# break\n# exif = dict(image._getexif().items())\n#\n# if exif[orientation] == 3:\n# image = image.rotate(180, expand=True)\n# elif exif[orientation] == 6:\n# image = image.rotate(270, expand=True)\n# elif exif[orientation] == 8:\n# image = image.rotate(90, expand=True)\n#\n# image.save(filename)\n#\n# except (AttributeError, KeyError, IndexError):\n# # cases: image don't have getexif\n# pass\n\n#image file에 텍스트 넣기\nimport os\nfrom PIL import Image, ImageDraw, ImageFont\n\nSrcDir = \"E:\\개인\\python\\사진\"\nimg_list = os.listdir(SrcDir)\n#\nTgtDir = r\"E:\\개인\\python\\사진\\결과물\"\n\ndef wrtImg(file, data, w, h):\n\n srcImg = Image.open(os.path.join(SrcDir, file))\n TgtImgfile = os.path.join(TgtDir, file)\n txtPosiX =w\n txtPosiY = h\n\n fntSz = 15\n # fnt = ImageFont.truetype(\"sans-serif.ttf\", fntSz, encoding=\"UTF-8\")\n fnt = ImageFont.load_default()\n draw = ImageDraw.Draw(srcImg)\n draw.text((txtPosiY, txtPosiX), data, fill=\"white\", font=fnt)\n srcImg.save(TgtImgfile)\n\n\nfor img_file in img_list:\n if os.path.isdir(os.path.join(SrcDir, img_file)):\n print(\"폴더\")\n else:\n wrtImg(img_file, img_file, 10, 10)\n" }, { "alpha_fraction": 0.6044690012931824, "alphanum_fraction": 0.620594322681427, "avg_line_length": 28.57921600341797, "blob_id": "486f0e9d6c9722805ec6fca02f1727867c11a053", "content_id": "e2118e906edf7f231620d3f7ea4aa69d259c17ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18583, "license_type": "no_license", "max_line_length": 147, "num_lines": 587, "path": "/web_handle.py", "repo_name": "llhbll/basic_source", "src_encoding": "UTF-8", "text": "999 브라우저 띄우지 않고 data 가져오기\nimport requests # import urllib.request\nfrom bs4 import BeautifulSoup\n\nbase_url = 'http://movie.daum.net/moviedb/grade?movieId=2725&type=netizen&page={}'\n\nfor n in range(77):\n url = base_url.format(n + 1)\n webpage = requests.get(url).text # webpage = urllib.request.urlopen(url)와 같음\n source = BeautifulSoup(webpage, 'html5lib')\n reviews = source.find_all('p', {'class': 'desc_review'})\n\n for review in reviews:\n print(review.get_text().strip())\n\n\n999 포탈 자동로그인 IE는 에러가 계속나서 전자결재는 OPEN안함\nimport os\nimport sys\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\n#pyinstaller --noconsole --onefile --add-binary \"chromedriver.exe\";\".\" gmail_auto.py\nif getattr(sys, 'frozen', False):\n chromedriver_path = os.path.join(sys._MEIPASS, \"chromedriver.exe\")\n driver = webdriver.Chrome(chromedriver_path)\nelse:\n driver = webdriver.Chrome()\n\nurl = 'https://portal.sungshin.ac.kr/sso/login.jsp'\ndriver.get(url)\naction = ActionChains(driver)\ndriver.implicitly_wait(10)\ndriver.find_element_by_css_selector('#loginId_mobile').click()\n\naction.send_keys('2970021').key_down(Keys.TAB).send_keys('lchlshp12*').key_down(Keys.TAB).key_down(Keys.TAB).key_down(Keys.ENTER).perform()\ndriver.implicitly_wait(10)\ntime.sleep(1)\ndriver.get('https://tis.sungshin.ac.kr/comm/nxui/staff/sso.do?menuUid=PORTAL_3201&connectDiv=1')\ndriver.maximize_window()\n\n\n\n999 LHB 중고나라 검색 후 상세리스트(가격) 가져오기 2개의 driver로 coding\nfrom selenium import webdriver\nimport requests, re\nfrom bs4 import BeautifulSoup\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException\nimport time\nfrom openpyxl import Workbook, load_workbook\n\nwb = Workbook()\nsheet1 = wb.active\nsheet1.title = '중고나라 상품' # 시트명\nsheet1.cell(row=1, column=2).value = '제목'\nsheet1.cell(row=1, column=3).value = '작성자'\nsheet1.cell(row=1, column=4).value = '날짜'\nsheet1.cell(row=1, column=5).value = '가격'\n\ndriver = webdriver.Chrome('./chromedriver.exe')\ndriver2 = webdriver.Chrome('./chromedriver.exe')\n\njoonggonara_url = 'https://cafe.naver.com/joonggonara.cafe?iframe_url=/ArticleList.nhn%3Fsearch.clubid=10050146%26search.boardtype=L%26viewType=pc'\n\ndriver.get(joonggonara_url)\n\n# keyword = input(\"키워드 입력 : \")\nsearch_input = driver.find_element_by_css_selector('input#topLayerQueryInput')\nsearch_input.send_keys('갤럭시 탭')\n# search_input.send_keys(keyword)\nsearch_button = driver.find_element_by_css_selector(\"form[name='frmBoardSearch'] > button\")\nsearch_button.click()\n\ntime.sleep(1)\ndriver.switch_to.frame(\"cafe_main\")\n\nrow = 2\n\nfor page in range(1, 2):\n list = driver.find_elements_by_css_selector('#main-area > div:nth-child(7) > table > tbody > tr')\n for item in list:\n title = item.find_element_by_css_selector('a.article').text.strip()\n title = re.sub('[^0-9a-zA-Zㄱ-힗]', '', title)\n writer = item.find_element_by_css_selector('a.m-tcol-c').text.strip()\n ddate = item.find_element_by_css_selector('td.td_date').text.strip()\n link = item.find_element_by_css_selector('a.article').get_attribute('href')\n\n driver2.get(link)\n time.sleep(1)\n driver2.switch_to.frame(\"cafe_main\")\n try:\n cost = driver2.find_element_by_css_selector('span.cost').text\n except NoSuchElementException:\n cost = 'X'\n\n sheet1.cell(row=row, column=2).value = title\n sheet1.cell(row=row, column=2).hyperlink = link\n sheet1.cell(row=row, column=3).value = writer\n sheet1.cell(row=row, column=4).value = ddate\n sheet1.cell(row=row, column=5).value = cost\n\n row = row + 1\n\n if page % 10 == 0:\n driver.find_element_by_link_text('다음').click()\n else:\n next = str(page + 1)\n driver.find_element_by_link_text(next).click()\n\nwb.save(\"./test4.xlsx\")\n\nitem.find_element_by_css_selector('a').click() #가격을 구하기 위해서\n\n\n\n 999 중고나라, 웹크롤링, headless, 키워드 입력, 제외할 문자\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport datetime\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\n\n# 고정 변수\n\nutc_time = datetime.datetime.utcnow()\ntime_gap = datetime.timedelta(hours=9)\nkor_time = utc_time + time_gap\ntoday = kor_time.strftime(\"%Y.%m.%d.\")\n\n\nnaver_login_url = 'https://nid.naver.com/nidlogin.login'\njoonggonara_url = 'https://cafe.naver.com/joonggonara.cafe?iframe_url=/ArticleList.nhn%3Fsearch.clubid=10050146%26search.boardtype=L%26viewType=pc'\nkeyword_list = [\n '소니 a5100'\n]\nresult_dic = {}\nfor keyword in keyword_list:\n result_dic[keyword] = {\n 'title': [],\n 'writer': [],\n 'href': [],\n 'date': []\n }\nexception_flag = 0\nexception_title_keyword_list = ['삽니다', '사기', '부산', '대전', '대구', '사 기', '사 기', 'ㅅ ㅏㄱ ㅣ', '완료', '경남', '창원']\nexception_writer_keyword_list = []\n\n# Headless chrome 사용법에 대해서는 아래 URL을 참고한다.\n# https://beomi.github.io/gb-crawling/posts/2017-09-28-HowToMakeWebCrawler-Headless-Chrome.html\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('headless')\noptions.add_argument('window-size=1920x1080')\noptions.add_argument(\"disable-gpu\")\n\n# driver = webdriver.Chrome('./chromedriver', options=options)\ndriver = webdriver.Chrome('./chromedriver')\n\n# driver.get('http://naver.com')\n# driver.implicitly_wait(3)\n# driver.get_screenshot_as_file('naver_main_headless.png')\n\n# 네이버 로그인\n# 생략\n\n# 중고나라 접속 및 검색어 크롤링\ndriver.get(joonggonara_url)\ndriver.implicitly_wait(3)\n# driver.get_screenshot_as_file('naver_main_headless.png')\n\nsearch_input = driver.find_element_by_css_selector('input#topLayerQueryInput')\nfor keyword in keyword_list:\n search_input.send_keys(keyword)\n search_button = driver.find_element_by_css_selector(\"form[name='frmBoardSearch'] > button\")\n search_button.click()\n driver.implicitly_wait(3)\n time.sleep(2) # 이것을 안해줄시는 page source가 바로 전 page를 끌어와서 진행됨.\n # driver.get_screenshot_as_file('naver_main_headless.png')\n iframe = driver.find_element_by_css_selector('iframe#cafe_main')\n driver.switch_to.frame(iframe)\n\n # show_element = driver.find_element_by_xpath(\"\"\"//*[@id=\"listSizeSelectDiv\"]/a\"\"\")\n # show_element.click()\n # show_50_element = driver.find_element_by_xpath(\"\"\"//*[@id=\"listSizeSelectDiv\"]/ul/li[7]/a\"\"\")\n # show_50_element.click()\n\n req = driver.page_source\n html = BeautifulSoup(req, 'html.parser')\n print(html)\n title_list = []\n writer_list = []\n href_list = []\n date_list = []\n # driver.get_screenshot_as_file('naver_main_headless.png')\n for tag in html.select('div#content-area div#main-area table tbody tr'):\n if len(tag.select('div.inner_list > a.article')) < 1:\n continue\n\n title = tag.select('div.inner_list > a.article')[0].text.strip()\n print(title)\n number = tag.select('div.inner_number')[0].text.strip()\n writer = tag.select('td.p-nick > a.m-tcol-c')[0].text.strip()\n date = tag.select('td.td_date')[0].text.strip()\n\n if ':' in date:\n date = today\n\n # 제목 예외처리\n for exception_title_keyword in exception_title_keyword_list:\n if exception_title_keyword in title:\n exception_flag = 1\n break\n # 글쓴이 예외처리\n for exception_writer_keyword in exception_writer_keyword_list:\n if exception_writer_keyword == writer:\n exception_flag = 1\n break\n\n if exception_flag == 1:\n exception_flag = 0\n continue\n\n href = 'https://cafe.naver.com/joonggonara/' + number\n # print(title,\"//\",writer,\"//\",date)\n # print(href)\n\n if writer in writer_list:\n pass\n else:\n title_list.append(title)\n writer_list.append(writer)\n href_list.append(href)\n date_list.append(date)\n result_dic[keyword]['title'] = title_list\n result_dic[keyword]['writer'] = writer_list\n result_dic[keyword]['href'] = href_list\n result_dic[keyword]['date'] = date_list\n driver.switch_to.default_content()\n# driver.quit()\n# print(result_dic)\n\n# # 파일로 기록\n#\n#\n# # 1주일 이전 파일 삭제\n#\n#\n# # 메일 발송\n# # 메일 본문 작성 먼저\n# mail_html = \"<html><head></head><body>\" + kor_time.strftime(\n# \"%Y/%m/%d %H:%M:%S\") + \"<br>\" # YYYY/mm/dd HH:MM:SS 형태의 시간 출력\n# for keyword in keyword_list:\n# mail_html += \"<h1>\" + keyword + \" 크롤링 결과</h1>\"\n# for i, v in enumerate(result_dic[keyword]['title']):\n# mail_html += \"<p><a href='\" + result_dic[keyword]['href'][i] + \"'>\" + v + \" _ \" + result_dic[keyword]['writer'][\n# i] + \"</a></p>\"\n# mail_html += \"<p>\" + result_dic[keyword]['date'][i] + \"</p><br>\"\n# mail_html += \"<br>\"\n# mail_html += \"</body></html>\"\n# print(mail_html)\n\n# # Create .env file path.\n# dotenv_path = join(dirname(__file__), '.env')\n# # Load file from the path.\n# load_dotenv(dotenv_path)\n# # open SMTP\n# smtp = smtplib.SMTP('smtp.gmail.com', 587)\n# smtp.ehlo() # say Hello\n# smtp.starttls() # TLS 사용시 필요\n# smtp.login('[email protected]', os.getenv('PASSWORD'))\n#\n# # main html\n# msg = MIMEText(mail_html, 'html')\n# # title\n# msg['Subject'] = '중고나라 크롤링 결과 보고'\n# # from\n# msg['From'] = os.getenv(\"FROM\")\n# # to\n# msg['To'] = os.getenv(\"TO\")\n# # from / to / msg\n# smtp.sendmail(msg['From'], msg['To'].split(','), msg.as_string())\n#\n# smtp.quit()\n#\n# # © 2020\n# # GitHub, Inc.\n# # Terms\n# # Privacy\n# # Security\n# # Status\n# # Help\n# # Contact\n# # GitHub\n# # Pricing\n# # API\n# # Training\n# # Blog\n# # About\n\n\n\n# 999 여러페이지 클릭 크롤링 중고나라에서 크롤링으로 데이터 뽑기\nfrom selenium import webdriver\nimport time\n\nkeyword = input(\" 키워드 입력 : \")\ndriver = webdriver.Chrome('./chromedriver')\ndriver.get(\n 'https://cafe.naver.com/joonggonara?iframe_url=/ArticleList.nhn%3Fsearch.clubid=10050146%26search.menuid=1900%26search.boardtype=L')\nbb2 = []\ndataset = []\ntime.sleep(2)\nsearch_input = driver.find_element_by_css_selector('input#topLayerQueryInput')\nsearch_input.send_keys(keyword)\n\ntime.sleep(1)\ndriver.switch_to.frame(\"cafe_main\")\nfor l in range(1, 71):\n tag = driver.find_elements_by_xpath('//div[@class=\"article-board m-tcol-c\"]//table/tbody/tr')\n bb2 = tag\n time.sleep(2)\n\n for i in range(len(bb2)):\n dataset.append(bb2[i].text)\n\n if l % 10 == 0:\n c10 = driver.find_element_by_link_text('다음')\n c10.click()\n else:\n a = str(l + 1)\n c = driver.find_element_by_link_text(a)\n c.click()\n time.sleep(2)\n\ndriver.quit()\nprint(dataset[:20])\n\n\n# 999 엑셀읽기 - 가수명 노래명으로 구글검색 - 해당하는 결과값이 있는지 점검 - 해당결과값 클릭하여 상세정보 읽기\nimport time\nfrom selenium import webdriver\n# import xlrd\nfrom openpyxl import Workbook, load_workbook\nfrom xlutils.copy import copy\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException\nimport os\n\n# 엑셀 파일 열기\nwb = load_workbook(\"test2.xlsx\")\nsheet = wb.active\n\n# driver로 wiki 접속\ndriver = webdriver.Chrome(\"./chromedriver.exe\")\n\nfor i in range(2, sheet.max_row+1): # 1행은 구분자 2행부터 data\n driver.get(\"https://google.com\")\n\n # 검색 키워드 셀에서 가져오기\n song = sheet.cell(row=i, column=2).value\n artist = sheet.cell(row=i, column=3).value\n keyword = str(song) + ' ' + str(artist) # 숫자인 경우가 있어서 str()\n\n # 구글 검색\n elem = driver.find_element_by_xpath('//*[@id=\"tsf\"]/div[2]/div[1]/div[1]/div/div[2]/input')\n\n # 공통\n elem.send_keys(keyword)\n elem.submit()\n\n ##### 구글 검색 결과 페이지 #####\n try:\n #box = driver.find_element_by_xpath(\"//*[@id='rso']\")\n # list = box.find_elements_by_tag_name('h3')\n list = driver.find_elements_by_css_selector('div.g')\n for item in list:\n # print(item.text)\n if ('Wikipedia' in item.text):\n p = item.find_element_by_tag_name('a')\n p.click()\n break\n\n\n except NoSuchElementException:\n\n box = driver.find_element_by_xpath(\"//div[@id='rso']/div/div\")\n # list = box.find_elements_by_tag_name('h3')\n list = box.find_elements_by_css_selector('div.g')\n for item in list:\n # print(item.text)\n if ('Wikipedia' in item.text):\n p = item.find_element_by_tag_name('a')\n p.click()\n break\n\n ##### 위키 곡정보 페이지 #####\n # writer,producer\n print(\"//////////////////////////// \" + str(i) + \"행을 크롤링중입니다.\")\n print(\"//////////////////////////// \" + \"노래명, 가수 : \" + keyword)\n\n try:\n table = driver.find_element_by_tag_name('table')\n tbody = table.find_element_by_tag_name(\"tbody\")\n trs = tbody.find_elements_by_tag_name(\"tr\")\n\n except NoSuchElementException:\n print(\" [예외 발생] 표 없음 \")\n continue\n\n for tr in trs:\n\n if \"Songwriter\" in tr.text:\n print(tr.text)\n a = \"\"\n if tr.find_elements_by_tag_name(\"li\"):\n lis = tr.find_elements_by_tag_name(\"li\")\n for li in lis:\n a = a + \",\" + li.text\n else:\n o = tr.find_elements_by_tag_name(\"td\")\n a = a + \",\" + o[0].text\n\n a = a[1:]\n sheet.cell(i, 4).value = a\n\n if \"Producer\" in tr.text:\n print(tr.text)\n a = \"\"\n if tr.find_elements_by_tag_name(\"li\"):\n lis = tr.find_elements_by_tag_name(\"li\")\n for li in lis:\n a = a + \",\" + li.text\n else:\n o = tr.find_elements_by_tag_name(\"td\")\n a = a + \",\" + o[0].text\n\n a = a[1:]\n sheet.cell(i, 5).value = a\n\n# 저장\nwb.save('test2.xlsx')\nview\nrawwiki_crawler\nhosted\nwith ❤ by GitHub\n\n\n999 웹상의 반복되는 찾고자하는 요소들을 찾아 핸들링 색 찾기 게임 [btn.value_of_css_property('background-color') for btn in btns]\nfrom selenium import webdriver\nfrom pprint import pprint\nimport time\nfrom collections import Counter\n\ndriver = webdriver.Chrome('chromedriver.exe')\ndriver.get('http://zzzscore.com/color/')\ndriver.implicitly_wait(300)\n\nbtns = driver.find_elements_by_xpath('//*[@id=\"grid\"]/div')\n\ndef analysis():\n btns_rgba = [btn.value_of_css_property('background-color') for btn in btns]\n #pprint(btns_rgba)\n result = Counter(btns_rgba)\n #print(result)\n for key, value in result.items():\n if value == 1:\n answer = key\n break\n else:\n answer = None\n print(\"정답을 찾을 수 없습니다.\")\n\n if answer :\n index = btns_rgba.index(answer)\n btns[index].click()\n\nif __name__ == \"__main__\":\n start = time.time()\n while time.time() - start <= 60:\n analysis()\n\n\n유튜브 동영상 다운받기\nfrom pytube import YouTube\nyt = YouTube('https://www.youtube.com/watch?v=C0xIwnh8fOs') # 유튜브 영상 URL 입력\nyt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download()\n\n\n\n999 웹상의 반복되는 찾고자하는 요소들을 찾아 핸들링 1to50게임 숫자 빨리 찾기 find_elements_by_xpath\nfrom selenium import webdriver\nimport time\n\ndriver = webdriver.Chrome('chromedriver.exe')\ndriver.get('https://zzzscore.com/1to50')\ndriver.implicitly_wait(300)\nnum = 1\n\nbtns = driver.find_elements_by_xpath('//*[@id=\"grid\"]/div[*]')\n\nfor i in range(0, 50):\n for j in range(0, len(btns)):\n print('j = ', j)\n if num == int(btns[j].text):\n btn = btns[j]\n btn.click()\n\n time.sleep(0.1)\n # print(num, ' ', j)\n num = num + 1\n break\n btns = driver.find_elements_by_xpath('//*[@id=\"grid\"]/div[*]')\n print(num, ' ', j, ' ', btns[j-1].text)\n\n전체에서 필요한 list를 얻고 list를 가지고 하나씩 뽑아서 분석\nimport requests\nfrom bs4 import BeautifulSoup\n\nreq = requests.get('http://tv.naver.com/r')\nraw = req.text\nhtml = BeautifulSoup(raw, 'html.parser')\n\ninfos = html.select('div.cds_area > div')\nprint(infos[0])\nfor info in infos:\n title = info.select_one('dt.title > a').text\n chn = info.select_one('dd.chn').text[0:-1]\n hit = info.select_one('span.hit').text\n like = info.select_one('span.like').text\n\n print(chn, '/', title, '/', hit, '/', like)\n\n999 전체에서 통으로 원하는data를 얻고 거기서 list를 받아서 하나씩 분석 / 특수문자처리 / 이미지 웹에서 끌어와 저장 / 이미지 폴더에서 끌어오기 이미지 엑셀에 저장\nfrom bs4 import BeautifulSoup as bs\nfrom pprint import pprint\nimport requests, re, os\nfrom urllib.request import urlretrieve\nfrom openpyxl.drawing.image import Image\nfrom openpyxl import Workbook\n\nwb = Workbook()\nsheet1 = wb.active\nsheet1.title = '네이버 웹툰 완결편'\n\ntry:\n if not (os.path.isdir('image')):\n os.mkdir(os.path.join('image'))\nexcept OSError as e:\n if e.errno != errno.EEXITS:\n print(\"폴더생성실패\")\n exit()\nurl = 'https://comic.naver.com/webtoon/finish.nhn'\nhtml = requests.get(url)\nsoup = bs(html.text, 'html.parser')\nall_data = soup.find('div', {'class':'list_area'})\n\ndata_list = all_data.findAll('li')\n\ncol = 1\nrow = 1\nfor data in data_list:\n img_data = data.find('img')\n img_src = img_data['src']\n a_list = data.find('a')\n title = a_list['title']\n title = re.sub('[^0-9a-zA-Zㄱ-힗]', '', title)\n link = \"https//commic.naver.com\" + a_list['href']\n strong = data.find('strong').text\n\n# urlretrieve(img_src, './image/'+title+'.gif')\n img_title = './image/'+title+'.gif'\n img_file = Image(img_title)\n print(img_title)\n img_file.anchor= 'C10'\n #cell = sheet1.cell(row=10, column=1)\n sheet1.add_image(img_file)\n# sheet1.cell()\n# col = col + 1\n row = row + 1\n break\nwb.save(\"./webtoon.xlsx\")\n\n" } ]
13
katevolkova1991/geektasks
https://github.com/katevolkova1991/geektasks
421a55e4755eb06f35ae43649db6dd462d5d7e82
8c3902865a7f2c355beaedcd1b5e4108f22be963
efcbdba8d222d3cd5b8faa098aba59473e818da7
refs/heads/master
2021-07-08T15:52:49.466644
2020-11-17T18:35:02
2020-11-17T18:35:02
218,962,875
0
0
null
2019-11-01T10:18:01
2019-12-05T15:16:41
2020-10-13T20:20:46
Python
[ { "alpha_fraction": 0.5992217659950256, "alphanum_fraction": 0.6046692728996277, "avg_line_length": 27.577777862548828, "blob_id": "2729425a740b948885f415c500301e480a7f2992", "content_id": "009b1f3aa7fab568bd6acfd003f95f8874ee46b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1285, "license_type": "no_license", "max_line_length": 84, "num_lines": 45, "path": "/volkova_kate_lesson_8_dz456.py", "repo_name": "katevolkova1991/geektasks", "src_encoding": "UTF-8", "text": "class Storage:\n my_list_of_equipment = []\n def __init__(self, storage_spaces):\n self.storage_spaces = storage_spaces\n\n def free_storage_spaces(self, spaces):\n return self.storage_spaces - spaces\n\n\nclass OfficeEquipment:\n def __init__(self, **kwargs):\n self.color = kwargs.get('color', None)\n self.works = kwargs.get('works', None)\n\n\nclass Printer(OfficeEquipment):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.duplex_printing = kwargs.get('duplex_printing', None)\n self.type = kwargs.get('type', None)\n\n\n\nclass Scanner(OfficeEquipment):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.pixels_per_inch = kwargs.get('pixels_per_inch', None)\n\n\nclass Copier(OfficeEquipment):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.duplex_printing = kwargs.get('duplex_printing', None)\n self.speed = kwargs.get('speed', None)\n\n\nif __name__ == '__main__':\n a = Storage(20)\n printer = Printer(color='yes', works='yes', duplex_printing='yes', type='lazer')\n scanner = Scanner(color='yes', works='no', pixels_per_inch=450)\n copier = Copier(color='yes', works='yes', speed=60)\n\n print(a)\n for el in printer:\n print(el)" } ]
1
yanbiaoyi/python_get_weather
https://github.com/yanbiaoyi/python_get_weather
3fce2c526df4a1ab1aa7446aaf8ed2017dbdbfe3
82c5cdfdcfd1d56ffcf40e167a8028d339127bac
0d99a078814ae17a04f67819248e785d2233c132
refs/heads/master
2022-11-03T03:32:48.539468
2020-06-19T09:02:52
2020-06-19T09:02:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6288998126983643, "alphanum_fraction": 0.6371099948883057, "avg_line_length": 19, "blob_id": "9f34a9c9d33230e6574a890d80245a4c452eb9a4", "content_id": "f7301755cdd6d502bb55e0559331d414565f6a00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 115, "num_lines": 29, "path": "/insert_mysql.py", "repo_name": "yanbiaoyi/python_get_weather", "src_encoding": "UTF-8", "text": "#coding:utf-8\r\nimport pymysql\r\n\r\ndef insertTable(datas):\r\n\t\r\n\t#创建数据库连接\r\n\tconnection = pymysql.connect(\"localhost\", \"root\", \"rootroot\", \"test\")\r\n\r\n\t#创建 cursor游标\r\n\tcursor = connection.cursor()\r\n\r\n\t#建设sql语句\r\n\tsql=\"insert into weather(w_id, w_date, w_detail, w_temperature_low, w_temperature_high) value(null, %s,%s,%s,%s)\"\r\n\tcursor.executemany(sql,datas)\r\n\tconnection.commit()\r\n\tprint('sql was insert success')\t\r\n\r\n\t\"\"\"\r\n\texcept Exception as e:\r\n\t\tprint('insert error')\r\n\t\tconnection.rollback()\r\n\t\"\"\"\r\n\tcursor.close()\r\n\tconnection.close()\r\n\"\"\"\r\nif __name__=='__main__':\r\n\t#data=[2,2,2,2]\r\n\tinsertTable()\t\r\n\"\"\"\r\n" }, { "alpha_fraction": 0.5544417500495911, "alphanum_fraction": 0.5834773778915405, "avg_line_length": 29.40217399597168, "blob_id": "287169fbc8b3e315a74695045fb4dcaf6946d1d0", "content_id": "c28d6e49e9f651ac5addac051229db9272f5a42d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3091, "license_type": "no_license", "max_line_length": 114, "num_lines": 92, "path": "/getweather.py", "repo_name": "yanbiaoyi/python_get_weather", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 18 09:35:13 2020\r\n\r\n@author: admin\r\n\"\"\"\r\n\r\nimport requests \r\nimport time\r\nimport random\r\nimport socket\r\nimport http.client\r\nimport pymysql\r\nfrom bs4 import BeautifulSoup\r\nimport csv\r\n#import cursor_mysql\r\nimport insert_mysql\r\n\r\ndef getContent(url , data = None):\r\n header={\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate, sdch',\r\n 'Accept-Language': 'zh-CN,zh;q=0.8',\r\n 'Connection': 'keep-alive',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'\r\n } # request 的请求头\r\n timeout = random.choice(range(80, 180))\r\n while True:\r\n try:\r\n rep = requests.get(url,headers = header,timeout = timeout) #请求url地址,获得返回 response 信息\r\n rep.encoding = 'utf-8'\r\n break\r\n except socket.timeout as e: # 以下都是异常处理\r\n print( '3:', e)\r\n time.sleep(random.choice(range(8,15)))\r\n\r\n except socket.error as e:\r\n print( '4:', e)\r\n time.sleep(random.choice(range(20, 60)))\r\n\r\n except http.client.BadStatusLine as e:\r\n print( '5:', e)\r\n time.sleep(random.choice(range(30, 80)))\r\n\r\n except http.client.IncompleteRead as e:\r\n print( '6:', e)\r\n time.sleep(random.choice(range(5, 15)))\r\n print('request success,get weather data')\r\n return rep.text # 返回的 Html 全文\r\n\r\nif __name__ == '__main__':\r\n url ='http://www.weather.com.cn/weather/101210101.shtml'\r\n html = getContent(url) # 调用获取网页信息\r\n print('my frist python file-get weather data')\r\n\r\n\r\ndef getdata(html_test):\r\n final = []\r\n bs = BeautifulSoup(html_test, \"html.parser\") # 创建BeautifulSoup对象\r\n body = bs.body #获取body\r\n data = body.find('div',{'id': '7d'})\r\n ul = data.find('ul')\r\n li = ul.find_all('li')\r\n\r\n for day in li:\r\n temp = []\r\n date = day.find('h1').string\r\n temp.append(date) #添加日期\r\n inf = day.find_all('p')\r\n weather = inf[0].string #天气\r\n temp.append(weather)\r\n t_high = inf[1].find('span').string#最高温度,夜间可能没有这个元素,需要注意\r\n t_low = inf[1].find('i').string# 最低温度\r\n temp.append(t_low)\r\n temp.append(t_high)\r\n final.append(temp)\r\n print('getDate success')\r\n return final\r\n\r\ndef writeData(data,name):\r\n\twith open(name,'a',errors='ignore',newline='') as f:\r\n\t\t\tf_csv = csv.writer(f)\r\n\t\t\tf_csv.writerows(data)\r\n\tprint('write_csv success')\r\n\r\nif __name__ == '__main__':A\r\n url = 'http://www.weather.com.cn/weather/101210101.shtml'\r\n html = getContent(url)\r\n result = getdata(html)#解析网页信息,拿到需要的数据\r\n writeData(result, 'D:\\py_work\\weather.csv')#数据写入到 csv文档中\r\n insert_mysql.insertTable(result)#创建写入数据\r\n print('my frist python file-write to csv')\r\n\r\n\r\n" }, { "alpha_fraction": 0.8074073791503906, "alphanum_fraction": 0.8074073791503906, "avg_line_length": 26, "blob_id": "6960a60b8a05631f1257f2fb0fad822d35640ab3", "content_id": "0181e16d769509b2fb6a5d47af82e137b9bd778d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 219, "license_type": "no_license", "max_line_length": 53, "num_lines": 5, "path": "/README.md", "repo_name": "yanbiaoyi/python_get_weather", "src_encoding": "UTF-8", "text": "# I am rocky \nwelcome to my codeworld,let me become to more better.\n抓取未来天气文件。\ngetweather是主模块,负责数据的抓取。\ninsert_mysql是写入数据库模块,负责数据的写入和存储。\n" } ]
3
pombredanne/OSSMonitor
https://github.com/pombredanne/OSSMonitor
7b346e708214b50e73cd48d4c520077dbaf8a2c3
36fed4092df61926d976716690cc40bdf9ecce3e
de50bf516e26c63c6d19dcb1df4d9ca6fee65d6f
refs/heads/master
2021-09-09T14:56:22.265508
2018-03-17T05:50:55
2018-03-17T05:50:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40991082787513733, "alphanum_fraction": 0.41760262846946716, "avg_line_length": 41.92982482910156, "blob_id": "c82670a936872e3933aae5e48649bf1e775a2e38", "content_id": "e09db11d479824c6a4aac4aa088b66fff47ea39c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14691, "license_type": "no_license", "max_line_length": 185, "num_lines": 342, "path": "/crawler/OcamlSpider/ocamel/spiders/OcamelSpider.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport requests,time,random\n\nimport scrapy\n\nimport json\nimport urllib\n\n\nfrom ocamel.items import OcamelItem\nfrom ocamel.items import OcamlVersionItem\n\n\n\n\nclass OcamelSpider(scrapy.Spider):\n name = 'OcamelSpider'\n # custom_settings = {'DOWNLOAD_DELAY': 1, 'DOWNLOAD_TIMEOUT':30000}\n # start_urls = ['http://go-search.org/search']\n # host_prefix = \"http://go-search.org\"\n\n def __init__(self, **kwargs):\n self.start_urls = ['http://opam.ocaml.org/packages/']\n self.package_prefix = 'http://opam.ocaml.org/packages/'\n\n super(OcamelSpider, self).__init__(self, **kwargs)\n\n\n\n def __str__(self):\n return \"OcamelSpider\"\n\n def start_requests(self):\n for unit in self.start_urls:\n print unit\n yield scrapy.Request(url = unit, callback = self.parseAllLinks)\n\n\n\n # parse all the ocaml package's urls\n def parseAllLinks(self, response):\n trs = response.xpath(\"//table[@id='packages']/tbody/tr\")\n prefix = 'http://opam.ocaml.org/packages/'\n print len(trs), ' len'\n for tr in trs:\n packageNames = tr.xpath('./td[1]/a/text()').extract()\n latestVersions = tr.xpath('./td[2]/text()').extract()\n descs = tr.xpath('./td[3]/text()').extract()\n if len(packageNames) == len(latestVersions) and len(packageNames) == len(descs) and len(packageNames) == 1:\n nts = prefix + packageNames[0].strip()\n yield scrapy.Request(url = nts,callback = self.parseFirstDetails, meta = {'name':packageNames[0].strip(),'desc':descs[0].strip(), 'latest_v':latestVersions[0].strip()})\n\n # parse the ocaml package detail page\n def parseFirstDetails(self, response):\n nextLinkMap = {} #store other versions' url (version name is the key, url for the value)\n tts = {}\n vinfo = {}\n projectUrl = response.url\n tts['project_url'] = projectUrl\n tts['project_name'] = response.meta['name']\n tts['project_desc'] = response.meta['desc']\n tts['latest_version'] = response.meta['latest_v']\n tts['authors'] = ''\n tts['official_license'] = ''\n tts['homepage'] = ''\n tts['issue_tracker'] = ''\n tts['maintainer'] = ''\n tts['versions'] = []\n tts['other_urls'] = []\n\n trs = response.xpath('//table[@class=\"table\"]/tbody/tr')\n vname = ''\n\n\n\n\n ntlinks = response.xpath('//ul[@class=\"nav nav-pills\"]/li/a')\n for ntlink in ntlinks:\n linkUrls= ntlink.xpath('./@href').extract()\n linkNames = ntlink.xpath('./text()').extract()\n if len(linkUrls) == len(linkNames) and len(linkUrls) == 1:\n linkUrl = linkUrls[0].strip().replace('../','')\n\n linkName = linkNames[0].strip()\n print 'linkName ', linkName\n if linkUrl != '#':\n linkUrl = self.package_prefix + linkUrl\n nextLinkMap[linkName] = linkUrl\n\n else:\n vname = linkName.replace('version', '').strip()\n\n tts['latest_version'] = vname\n vinfo['name'] = vname\n\n vinfo['available'] = ''\n vinfo['published_date'] = ''\n vinfo['source_url'] = ''\n vinfo['edit_url'] = ''\n vinfo['statistics'] = 0\n vinfo['depends'] = {}\n vinfo['optional_depends'] = {}\n\n print len(trs), ' ~~~~~~'\n for tr in trs:\n titles = tr.xpath('./th/text()').extract()\n if len(titles) > 0:\n ttsp = titles[0].strip()\n if ttsp == 'Author':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n tts['authors'] = contents[0].strip()\n elif ttsp == 'License':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n tts['official_license'] = contents[0].strip()\n elif ttsp == 'Homepage':\n contents = tr.xpath('./td/a/text()').extract()\n if len(contents) > 0:\n tts['homepage'] = contents[0].strip()\n elif ttsp == 'Issue Tracker':\n contents = tr.xpath('./td/a/text()').extract()\n if len(contents) > 0:\n tts['issue_tracker'] = contents[0].strip()\n tts['other_urls'] = [contents[0].strip().replace('/issues', '')]\n elif ttsp == 'Maintainer':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n tts['maintainer'] = contents[0].strip()\n elif ttsp == 'Available':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n vinfo['available'] = contents[0].strip()\n elif ttsp == 'Published':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n vinfo['published_date'] = contents[0].strip()\n elif ttsp == 'Source [http]':\n contents = tr.xpath('./td/a/text()').extract()\n if len(contents) > 0:\n vinfo['source_url'] = contents[0].strip()\n elif ttsp == 'Statistics':\n contents = tr.xpath('./td/strong/text()').extract()\n if len(contents) > 0:\n gh = contents[0].strip().replace(',', '')\n if gh == 'once':\n vinfo['statistics'] = 1\n else:\n vinfo['statistics'] = int(gh)\n\n\n elif ttsp == 'Edit':\n contents = tr.xpath('./td/a/text()').extract()\n if len(contents) > 0:\n vinfo['edit_url'] = contents[0].strip()\n elif ttsp == 'Optional dependencies':\n outTable = tr.xpath('./td/table')\n dependObj = {}\n dependsList = []\n if len(outTable) > 0:\n cot = 0\n innerTrs = outTable[0].xpath('./tr/td/table/tr')\n for itr in innerTrs:\n if cot == 0:\n nss = itr.xpath('./th/text()').extract()\n dependObj['operator'] = ''\n if len(nss) > 0:\n dependObj['operator'] = nss[0].strip()\n else:\n artin = itr.xpath('./td[1]/a/text()').extract()\n rangeN = itr.xpath('./td[2]/text()').extract()\n artivm = itr.xpath('./td[2]/a/text()').extract()\n if len(artin) == len(rangeN) and len(artin) == len(artivm) and len(artin) == 1:\n newdict = {}\n newdict['name'] = artin[0].strip()\n newdict['range'] = rangeN[0].strip()\n newdict['version'] = artivm[0].strip()\n dependsList.append(newdict)\n\n cot = cot + 1\n dependObj['depends_list'] = dependsList\n\n vinfo['optional_depends'] = dependObj\n\n\n elif ttsp == 'Dependencies':\n outTable = tr.xpath('./td/table')\n dependObj = {}\n dependsList = []\n if len(outTable) > 0:\n cot = 0\n innerTrs = outTable[0].xpath('./tr/td/table/tr')\n for itr in innerTrs:\n if cot == 0:\n nss = itr.xpath('./th/text()').extract()\n dependObj['operator'] = ''\n if len(nss) > 0:\n dependObj['operator'] = nss[0].strip()\n else:\n artin = itr.xpath('./td[1]/a/text()').extract()\n rangeN = itr.xpath('./td[2]/text()').extract()\n artivm =itr.xpath('./td[2]/a/text()').extract()\n if len(artin) == len(rangeN) and len(artin) == len(artivm) and len(artin) == 1:\n newdict = {}\n newdict['name'] = artin[0].strip()\n newdict['range'] = rangeN[0].strip()\n newdict['version'] = artivm[0].strip()\n dependsList.append(newdict)\n\n\n cot = cot + 1\n dependObj['depends_list'] = dependsList\n\n vinfo['depends'] = dependObj\n\n tts['versions'] = [vinfo]\n\n item = OcamelItem()\n item['data'] = tts\n yield item\n print len(nextLinkMap.keys()), ' nextLinkMap'\n for ky in nextLinkMap.keys():\n ky = ky.strip()\n lk = nextLinkMap.get(ky)\n yield scrapy.Request(url = lk, callback = self.parseVersionDetails, meta = {'vname':ky.strip(), \"prev_url\":tts['project_url']})\n\n\n\n\n\n\n\n def parseVersionDetails(self, response):\n trs = response.xpath('//table[@class=\"table\"]/tr')\n vinfo = {}\n\n\n vinfo['available'] = ''\n vinfo['published_date'] = ''\n vinfo['source_url'] = ''\n vinfo['edit_url'] = ''\n vinfo['statistics'] = 0\n vinfo['depends'] = {}\n vinfo['optional_depends'] = {}\n vinfo['name'] = response.meta['vname']\n vinfo['project_url'] = response.meta['prev_url']\n\n\n\n\n trs = response.xpath('//table[@class=\"table\"]/tbody/tr')\n for tr in trs:\n titles = tr.xpath('./th/text()').extract()\n if len(titles) > 0:\n ttsp = titles[0].strip()\n if ttsp == 'Available':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n vinfo['available'] = contents[0].strip()\n elif ttsp == 'Published':\n contents = tr.xpath('./td/text()').extract()\n if len(contents) > 0:\n vinfo['published_date'] = contents[0].strip()\n elif ttsp == 'Source [http]':\n contents = tr.xpath('./td/a/text()').extract()\n if len(contents) > 0:\n vinfo['source_url'] = contents[0].strip()\n elif ttsp == 'Statistics':\n contents = tr.xpath('./td/strong/text()').extract()\n if len(contents) > 0:\n gh = contents[0].strip().replace(',','')\n if gh == 'once':\n vinfo['statistics'] = 1\n else:\n vinfo['statistics'] = int(gh)\n elif ttsp == 'Edit':\n contents = tr.xpath('./td/a/text()').extract()\n if len(contents) > 0:\n vinfo['edit_url'] = contents[0].strip()\n elif ttsp == 'Optional dependencies':\n outTable = tr.xpath('./td/table')\n dependObj = {}\n dependsList = []\n if len(outTable) > 0:\n cot = 0\n innerTrs = outTable[0].xpath('./tr/td/table/tr')\n for itr in innerTrs:\n if cot == 0:\n nss = itr.xpath('./th/text()').extract()\n dependObj['operator'] = ''\n if len(nss) > 0:\n dependObj['operator'] = nss[0].strip()\n else:\n artin = itr.xpath('./td[1]/a/text()').extract()\n rangeN = itr.xpath('./td[2]/text()').extract()\n artivm = itr.xpath('./td[2]/a/text()').extract()\n if len(artin) == len(rangeN) and len(artin) == len(artivm) and len(artin) == 1:\n newdict = {}\n newdict['name'] = artin[0].strip()\n newdict['range'] = rangeN[0].strip()\n newdict['version'] = artivm[0].strip()\n dependsList.append(newdict)\n\n cot = cot + 1\n dependObj['depends_list'] = dependsList\n\n vinfo['optional_depends'] = dependObj\n\n\n elif ttsp == 'Dependencies':\n outTable = tr.xpath('./td/table')\n dependObj = {}\n dependsList = []\n if len(outTable) > 0:\n cot = 0\n innerTrs = outTable[0].xpath('./tr/td/table/tr')\n for itr in innerTrs:\n if cot == 0:\n nss = itr.xpath('./th/text()').extract()\n dependObj['operator'] = ''\n if len(nss) > 0:\n dependObj['operator'] = nss[0].strip()\n else:\n artin = itr.xpath('./td[1]/a/text()').extract()\n rangeN = itr.xpath('./td[2]/text()').extract()\n artivm =itr.xpath('./td[2]/a/text()').extract()\n if len(artin) == len(rangeN) and len(artin) == len(artivm) and len(artin) == 1:\n newdict = {}\n newdict['name'] = artin[0].strip()\n newdict['range'] = rangeN[0].strip()\n newdict['version'] = artivm[0].strip()\n dependsList.append(newdict)\n\n\n cot = cot + 1\n dependObj['depends_list'] = dependsList\n\n vinfo['depends'] = dependObj\n\n versionDetailItem = OcamlVersionItem()\n versionDetailItem['data'] = vinfo\n yield versionDetailItem\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5330370664596558, "alphanum_fraction": 0.5444358587265015, "avg_line_length": 34.81944274902344, "blob_id": "53e054c78e8490462e510d3e341d0e3ae28fdb26", "content_id": "d470884734b3e7885677d14566882b14da43763b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5176, "license_type": "no_license", "max_line_length": 128, "num_lines": 144, "path": "/crawler/LuaSpider/lua/spiders/LuaSpider.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport requests,time,random\n\nimport scrapy\n\nimport json\nimport urllib\n\n\nfrom lua.items import LuaItem\n\n\n\n\nclass LuaSpider(scrapy.Spider):\n name = 'LuaSpider'\n custom_settings = {'DOWNLOAD_DELAY': 1, 'DOWNLOAD_TIMEOUT':30000}\n # start_urls = ['http://go-search.org/search']\n # host_prefix = \"http://go-search.org\"\n\n def __init__(self, **kwargs):\n self.start_urls = 'https://luarocks.org/m/root'\n self.host_prefix = 'https://luarocks.org'\n\n super(LuaSpider, self).__init__(self, **kwargs)\n\n\n\n\n def __str__(self):\n return \"LuaSpider\"\n\n def start_requests(self):\n\n\n\n yield scrapy.Request(url = self.start_urls, callback = self.parseLuaLinks)\n\n\n\n\n\n def parseLuaLinks(self, response):\n unitLinks = response.xpath('/html/body/div[1]/div[2]/div/div[3]/div/div[1]/a/@href').extract()\n for unit in unitLinks:\n unitUrl = self.host_prefix + unit.strip()\n yield scrapy.Request(url = unitUrl, callback = self.parseLuaDetails)\n\n\n pageIndi = response.xpath(\"//div[@class='pager']/span/text()\").extract()\n if len(pageIndi) > 0:\n text = pageIndi[0].strip()\n splits = text.replace('Page','').strip().split('of')\n curPage = int(splits[0].strip())\n wholePage = int(splits[1].strip())\n if curPage < wholePage:\n curPage = curPage + 1\n urlNext = 'https://luarocks.org/m/root?page='+str(curPage)\n yield scrapy.Request(url = urlNext, callback = self.parseLuaLinks)\n\n def parseLuaDetails(self, response):\n basicInfo = {}\n stripedUrl = response.url.strip()\n indi = stripedUrl.rfind('/')\n pname = stripedUrl[indi+1:]\n pname = pname.strip()\n basicInfo['official_license'] = ''\n basicInfo['project_name'] = pname\n basicInfo['official_license'] = ''\n basicInfo['project_desc'] = ''\n basicInfo['homepage'] = ''\n basicInfo['total_downloads'] = 0\n basicInfo['project_url'] = response.url.strip()\n basicInfo['uploader'] = response.url.replace(pname,'').strip()\n basicInfo['labels'] = []\n basicInfo['depends'] = []\n basicInfo['versions'] = []\n basicInfo['other_urls'] = []\n\n licenseTexts = response.xpath('/html/body/div[1]/div[2]/div[1]/div[2]/div/div[2]/text()').extract()\n descs = response.xpath('/html/body/div[1]/div[2]/div[3]/div[1]/p/text()').extract()\n homepages = response.xpath('/html/body/div[1]/div[2]/div[1]/div[2]/div/div[3]/a/@href').extract()\n downloads = response.xpath('/html/body/div[1]/div[2]/div[1]/div[2]/div/div[4]/text()').extract()\n if len(downloads) > 0:\n basicInfo['total_downloads'] = int(downloads[0].replace(',','').strip())\n\n\n if len(homepages) > 0:\n basicInfo['homepage'] = homepages[0].strip()\n if len(descs) > 0:\n basicInfo['project_desc'] = descs[0].strip()\n if len(licenseTexts) > 0:\n basicInfo['official_license'] = licenseTexts[0].strip()\n\n labels = response.xpath('//div[@class=\"label_row\"]/a/text()').extract()\n lableList = []\n for ll in labels:\n lableList.append(ll.strip())\n basicInfo['labels'] = lableList\n\n depends = response.xpath('//div[@class=\"dependency_row\"]')\n dependsList = []\n for dep in depends:\n textss = dep.xpath('./text()').extract()\n range = dep.xpath('./span/text()').extract()\n if len(textss) == len(range) and len(textss) == 1:\n dd = {}\n dd['name'] = textss[0]\n dd['version_range'] = range[0].strip()\n dependsList.append(dd)\n basicInfo['depends'] = dependsList\n\n vvs = response.xpath('//div[@class=\"version_row\"]')\n vlist = []\n for vv in vvs:\n names = vv.xpath('./a/text()').extract()\n forSpanLength = len(vv.xpath('./span').extract())\n if forSpanLength == 2:\n days_ago = vv.xpath('./span[1]/text()').extract()\n downloads_num = vv.xpath('./span[2]/text()').extract()\n else:\n days_ago = vv.xpath('./span[2]/text()').extract()\n downloads_num = vv.xpath('./span[3]/text()').extract()\n if len(names) == len(days_ago) and len(downloads_num) == len(days_ago) and len(days_ago) == 1:\n ddcs = {}\n\n ddcs['name'] = names[0].strip()\n print downloads_num[0].strip()\n ddcs['downloads'] = int(downloads_num[0].replace(',','').replace('downloads','').replace('download','').strip())\n ddcs['time_ago'] = days_ago[0].replace(',', '').strip()\n\n ddcs['rockspec_url'] = response.url.replace('modules','manifest')+'/'+ddcs['name']+'-'+'.rockspec'\n vlist.append(ddcs)\n\n\n if len(basicInfo['homepage']) > 0:\n basicInfo['other_urls'] = [basicInfo['homepage'].strip()]\n basicInfo['versions'] = vlist\n\n\n\n item = LuaItem()\n item['data'] = basicInfo\n yield item\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6490384340286255, "alphanum_fraction": 0.6546474099159241, "avg_line_length": 30.9743595123291, "blob_id": "8aa639f6ebda80be864dc9b15c9a3aa7230027f9", "content_id": "a58984cf438798248fd41f2a11d21b70d80d3a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1248, "license_type": "no_license", "max_line_length": 145, "num_lines": 39, "path": "/crawler/LuaSpider/lua/pipelines.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport scrapy\nfrom scrapy.pipelines.files import FilesPipeline\nfrom os.path import basename,dirname,join\nfrom urlparse import urlparse\nimport settings\nfrom lua.items import LuaItem\nimport pymongo\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n\nclass LuaPipeline(object):\n def __init__(self):\n print 'LuaPipeline'\n self.mongoClient = pymongo.MongoClient(settings.MONGO_HOST, settings.MONGO_PORT, socketKeepAlive = True, serverSelectionTimeoutMS = 6000)\n\n def __del__(self):\n if self.mongoClient is not None:\n self.mongoClient.close()\n\n\n def process_item(self, item, spider):\n db = self.mongoClient.get_database('LuaLocks')\n colc = db.get_collection('lua')\n if isinstance(item, LuaItem):\n projectUrl = item['data']['project_url']\n co = colc.find({\"project_url\":projectUrl}).count()\n if co == 0:\n colc.insert(item['data'])\n else:\n colc.update({\"project_url\":item['data']['project_url']}, {\"$set\":item['data']})\n\n" }, { "alpha_fraction": 0.6782007217407227, "alphanum_fraction": 0.6851211190223694, "avg_line_length": 13.8421049118042, "blob_id": "c3ca573f7ce940420d17e1e72c4d3db0aefd6e4f", "content_id": "cf6a653a10426eef754a981be6fe8c6f7c500059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/crawler/LuaSpider/lua/items.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\n\nimport scrapy\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n\n\n\nclass LuaItem(scrapy.Item):\n data = scrapy.Field()\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6884058117866516, "alphanum_fraction": 0.695652186870575, "avg_line_length": 24.18181800842285, "blob_id": "75073c89c63877f9eafce98297ee52c50a92568e", "content_id": "3d15abf73aecff569ef9df66f82b1b1a75d5c293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 74, "num_lines": 11, "path": "/crawler/LuaSpider/lua/CrawlerDriver.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport scrapy\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom scrapy import cmdline\n\nif __name__ == '__main__':\n cmdline.execute(\"scrapy crawl LuaSpider\".split())\n #cmdline.execute(\"scrapy runspider ./spiders/GolangSpider.py\".split())" }, { "alpha_fraction": 0.5101754665374756, "alphanum_fraction": 0.5140351057052612, "avg_line_length": 35.98701477050781, "blob_id": "55f87dd943ef6f38da1ae02d0a82f802db025719", "content_id": "13777c5b39b587d48f9230960e97b80009827866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2850, "license_type": "no_license", "max_line_length": 145, "num_lines": 77, "path": "/crawler/OcamlSpider/ocamel/pipelines.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport scrapy\nfrom scrapy.pipelines.files import FilesPipeline\nfrom os.path import basename,dirname,join\nfrom urlparse import urlparse\nimport settings\nfrom ocamel.items import OcamelItem\nfrom ocamel.items import OcamlVersionItem\nimport pymongo\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n\nclass OcamelPipeline(object):\n def __init__(self):\n print 'OcamelPipeline'\n self.mongoClient = pymongo.MongoClient(settings.MONGO_HOST, settings.MONGO_PORT, socketKeepAlive = True, serverSelectionTimeoutMS = 6000)\n\n def __del__(self):\n if self.mongoClient is not None:\n self.mongoClient.close()\n\n\n def process_item(self, item, spider):\n db = self.mongoClient.get_database('Ocamel')\n colc = db.get_collection('ocamel')\n\n if isinstance(item, OcamelItem):\n\n projectUrl = item['data']['project_url']\n co = colc.find({\"project_url\":projectUrl}).count()\n if co == 0:\n colc.insert(item['data'])\n else:\n uts = colc.find({\"project_url\": projectUrl})\n vss = item['data']['versions']\n newest = vss[0]\n if len(newest['name']) > 0:\n for ut in uts:\n previous = ut['versions']\n for pp in previous:\n if pp['name'] != newest['name']:\n item['data']['versions'].append(pp)\n colc.update({\"project_url\": item['data']['project_url']}, {\"$set\": item['data']})\n\n\n elif isinstance(item, OcamlVersionItem):\n urls = item['data']['project_url']\n indics = colc.find({\"project_url\": urls.strip()}).count()\n if indics == 0:\n del item['data']['project_url']\n newdict = {}\n newdict['project_url'] = urls\n newdict['versions'] = [item['data']]\n colc.insert(newdict)\n else:\n del item['data']['project_url']\n prt = item['data']['name']\n if len(prt.strip()) > 0:\n results = colc.find({\"project_url\": urls.strip()})\n for rs in results:\n newVList = []\n vers = rs['versions']\n for ver in vers:\n if ver['name'] != prt:\n newVList.append(ver)\n\n newVList.append(item['data'])\n rs['versions'] = newVList\n colc.update({\"project_url\": rs['project_url']}, {\"$set\": rs})\n\n\n" }, { "alpha_fraction": 0.6901408433914185, "alphanum_fraction": 0.6957746744155884, "avg_line_length": 15.571428298950195, "blob_id": "b82a75bbe50c3e1502089cf75e0b0940d270cd87", "content_id": "a80a536b2b9f66fd2531f8e99cbc93931cf3c028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/crawler/OcamlSpider/ocamel/items.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\n\nimport scrapy\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n\n\nclass OcamelItem(scrapy.Item):\n data = scrapy.Field()\n\nclass OcamlVersionItem(scrapy.Item):\n data = scrapy.Field()\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5736215710639954, "alphanum_fraction": 0.5786340832710266, "avg_line_length": 26.025423049926758, "blob_id": "c62592953d6383742f30a6b3c8c70d3aa9617106", "content_id": "1718b3d584aa2e518d101bbc6ba0902d890b4828", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3192, "license_type": "no_license", "max_line_length": 125, "num_lines": 118, "path": "/crawler/OcamlSpider/ocamel/MongoUtil.py", "repo_name": "pombredanne/OSSMonitor", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport ssl\nimport json\nimport pymongo\nimport traceback\nimport os\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass MongoUtil:\n\n\n #headers = {'content-type': 'application/json'}\n def __init__(self,addr, port):\n self.addr = addr\n self.port = port\n\n\n def connect(self):\n # socketKeepAlive = True\n self.mongoClient = pymongo.MongoClient(self.addr, self.port, socketKeepAlive = True, serverSelectionTimeoutMS = 6000)\n\n\n def close(self):\n if self.mongoClient is not None:\n self.mongoClient.close()\n\n def updateOneDoc(self, dbName, collectionName, docIndicate, updateContent):\n try:\n if self.mongoClient is not None:\n db = self.mongoClient.get_database(dbName)\n collection = db.get_collection(collectionName)\n collection.update_one(docIndicate, updateContent)\n\n return 1\n return -1\n except Exception,e:\n msg = traceback.format_exc()\n print msg\n return -1\n\n def saveDoc(self, dbName, collectionName, doc):\n try:\n if self.mongoClient is not None:\n db = self.mongoClient.get_database(dbName)\n collection = db.get_collection(collectionName)\n collection.save(doc)\n\n return 1\n return -1\n except Exception,e:\n msg = traceback.format_exc()\n print msg\n return -1\n\n\n\n\n def insertOneDoc(self,dbName,collectionName, docContnet):\n try:\n db = self.mongoClient.get_database(dbName)\n collection = db.get_collection(collectionName)\n collection.insert(docContnet)\n\n return 1\n except Exception,e:\n print e.message\n return -1\n\n\n def insertManyDoc(self,dbName,collectionName, docContnet):\n try:\n db = self.mongoClient.get_database(dbName)\n collection = db.get_collection(collectionName)\n collection.insert_many(docContnet)\n\n\n\n return 1\n except Exception,e:\n print e.message\n return -1\n\n # def exeucteQuery(self, dbName, collectionName, query):\n # try:\n # db = self.mongoClient.get_database(dbName)\n # collection = db.get_collection(collectionName)\n # result = collection.find(query)\n # except Exception, e:\n #\n # print e.message\n\n\n\n def getAll(self,dbName, collectionName):\n try:\n db = self.mongoClient.get_database(dbName)\n collection = db.get_collection(collectionName)\n\n result = collection.find()\n return result\n except Exception,e:\n print e.message\n return None\n\n\n def getResultForQuery(self, dbName, collectionName, query):\n try:\n db = self.mongoClient.get_database(dbName)\n collection = db.get_collection(collectionName)\n result = collection.find(query)\n return result\n except Exception,e:\n stds =traceback.format_exc()\n print stds\n return None\n\n\n\n" } ]
8
sisi0205/OO_software
https://github.com/sisi0205/OO_software
05fcb818e4f1f6b2d645a7f1164cfec85e57f2bc
df08b17afb7fbd77ff1e152e701c3cc118931156
c4f099df8eeaed1913824e31f944426514fd1040
refs/heads/master
2020-07-14T03:57:01.121989
2019-08-29T19:05:15
2019-08-29T19:05:15
205,232,003
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.557075560092926, "alphanum_fraction": 0.6115720868110657, "avg_line_length": 18.96412467956543, "blob_id": "99d2612c6d5923c9b0dd9e71f6dd976530dba032", "content_id": "26eabc99a5bb9593f2480a498dfa0190324ebd46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4459, "license_type": "no_license", "max_line_length": 83, "num_lines": 223, "path": "/include/model.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python3\n\nimport random\nimport sys\nsys.path.append('include')\nimport para\nimport random,math\n\n\n\nSIGMA=para.SIGMA\nEPSkb=para.EPSkb\nmHe=para.mHe\n\nLimit=0.8*SIGMA\n\nclass Model(object): \n\t\"\"\"docstring for MD, Npart: number of particles\"\"\"\n\tdef __init__(self):\n\t\tself.dt=1\n\t\tself.precoor=[]\n\t\tself.newcoor=[]\n\t\tself.V=[]\n\t\t##totoal energy\n\t\tself.TEnergy=[]\n\t\t##Kinetical energy\n\t\tself.KEnergy=[]\n\n\n\tdef acceleration(self, Npart,coor,boxsize, para,mass):\n\n\t\t#epsilon*kb kb=1.38e-23 Jk^-1\n\n\t\tEPSkb=para[0]*1.38e-23\n\t\tSIGMA=para[1]\n\t\tmass=mass/6.0221409e+26\n\t\tgap=0.8*SIGMA\n\t\tboxsizeX=(math.floor(math.sqrt(Npart))+1)*gap\n\t\tboxsizeY=(math.ceil(math.sqrt(Npart))+1)*gap\n\n\t\t###kinetic energy\n\n\t\tkinetic=0.0\n\t\tfor i in range(0, Npart):\n\t\t\t#print(self.V[i][0])\n\t\t\tkinetic+=0.5*mass*(self.V[i][0]*self.V[i][0]+self.V[i][1]*self.V[i][1])\n\t\t\t#print(kinetic)\n\t\tself.KEnergy.append(kinetic)\n\n\t\t#print(kinetic)\n\n\t\tacc=[]\n\n\t\tpotential=0.0\n\n\n\t\tfor i in range(0, Npart):\n\t\t\tacc.append([0,0])\n\t\t\t\n\t\tfor i in range(0,Npart-1):\n\t\t\tfor j in range(i+1, Npart):\n\t\t\t\t#print(coor[i], coor[j])\n\n\t\t\t\txij=coor[i][0]-coor[j][0]\n\t\t\t\tyij=coor[i][1]-coor[j][1]\n\n\t\t\t\tif xij>=0.5*boxsizeX:\n\t\t\t\t\txij-=boxsizeX\n\t\t\t\tif xij<-0.5*boxsizeX:\n\t\t\t\t\txij+=boxsizeX\n\t\t\t\tif yij>=0.5*boxsizeY:\n\t\t\t\t\tyij-=boxsizeY\n\t\t\t\tif yij<-0.5*boxsizeY:\n\t\t\t\t\tyij+=boxsizeY\n\n\t\t\t\trij2=xij*xij+yij*yij\n\t\t\t\trij6=rij2*rij2*rij2\n\t\t\t\trij12=rij6*rij6\n\t\t\t\t#print(rij2)\n\t\t\t\ttp2=SIGMA*SIGMA/rij2\n\t\t\t\ttp6=tp2*tp2*tp2\t\t\n\t\t\t\t#force \n\t\t\t\t#( (24*EPSILON*A)/r^2 ) * (SIGMA/r)^6 * ( 2*(SIGMA/r^2)^6 -1 )\n\n\t\t\t\t#if rij2 < 9*SIGMA*SIGMA:\n\n\t\t\t\tFfactor=24*EPSkb*tp6*(2*tp6-1)/rij2\n\t\t\t\taccel=Ffactor/(mass*1e10)\n\n\t\t\t\tacc[i][0]+=accel*xij\n\t\t\t\tacc[i][1]+=accel*yij\n\n\t\t\t\tacc[j][0]-=accel*xij\n\t\t\t\tacc[j][0]-=accel*yij\n\n\t\t\t\tpotential+=4*(1.0/rij12-1.0/rij6)\n\n\n\t\tself.TEnergy.append(potential)\n\n\n\t\treturn acc\n\n\t###boxsize is the size for periodic box, para:(EPSILON, SIGMA), mass: mass of atom\n\n\n\tdef Verlet(self,Npart, coor, boxsize,para,mass):\n\n\t\tacc=self.acceleration(Npart,coor,boxsize,para, mass)\n\n\t\tnewcoor=self.newcoor\n\t\tprecoor=self.precoor\n\n\t\t#calc v(t+0.5dt)\n\t\t#print(\"acc \", acc)\n\n\n\t\tfor i in range(0,Npart):\n\t\t\t#self.newcoor[i][0]=2*coor[i][0]-self.precoor[i][0]+acc[i][0]*self.dt*self.dt\n\t\t\t#self.newcoor[i][1]=2*coor[i][1]-self.precoor[i][1]+acc[i][1]*self.dt*self.dt\n\t\t\tnewcoor[i][0]=2*coor[i][0]-precoor[i][0]+acc[i][0]*self.dt*self.dt\n\t\t\tnewcoor[i][1]=2*coor[i][1]-precoor[i][1]+acc[i][1]*self.dt*self.dt\n\n\t\t\t#self.V[i][0]=(self.newcoor[i][0]-self.precoor[i][0])/(2*self.dt)\n\t\t\t#self.V[i][1]=(self.newcoor[i][1]-self.precoor[i][1])/(2*self.dt)\n\t\t\tself.V[i][0]=(newcoor[i][0]-precoor[i][0])/(2*self.dt)\n\t\t\tself.V[i][1]=(newcoor[i][1]-precoor[i][1])/(2*self.dt)\n\n\t\tself.precoor=precoor\n\t\tself.newcoor=newcoor\n\t\tcoor=newcoor\n\n\n\t\treturn coor\n\n\n\n\n ##initialize velocity\n\n\tdef InitVelo(self, Npart,boxsize,temp,para):\n\t\tcoord=self.Lattice(Npart, boxsize,para)\n\t\tprecoor=[]\n\t\t#print(\"coor in Lattice \", coor)\n\t\tself.V=[]\n\t\tspeed_scale=1.0\n\t\tsumv=[0,0]\n\t\tsumv2=0\n\n\t\tfor i in range(0,Npart):\n\t\t\tvx=speed_scale*(2*random.random()-1)\n\t\t\tvy=speed_scale*(2*random.random()-1)\n\t\t\tself.V.append([vx,vy])\n\t\t\tsumv[0]+=vx\n\t\t\tsumv[1]+=vy\n\t\t\tsumv2+=vx*vx+vy*vy\n\t\t\t#print(\"sumv2 is \",sumv2)\n\n\t\tsumv[0]=sumv[0]/Npart\n\t\tsumv[1]=sumv[1]/Npart\n\t\tfs=math.sqrt(3*temp/sumv2)\n\t\t#print(\"coor in Lattice \", coord)\n\n\t\tfor i in range(0,len(self.V)):\n\t\t\tself.V[i][0]=(self.V[i][0]-sumv[0])*fs\n\t\t\tself.V[i][1]=(self.V[i][1]-sumv[1])*fs\n\t\t\t#print(\"coor in loop 1 \", coord[i][0])\n\t\t\ttpx=coord[i][0]-self.V[i][0]*self.dt\n\t\t\t#self.precoor[i][0]=tpx\n\t\t\t#print(\"coor in loop 2 \", coord[i][0])\n\t\t\ttpy=coord[i][1]-self.V[i][1]*self.dt\n\t\t\t#print(\"coor in loop 3 \", coord[i])\n\t\t\tprecoor.append([tpx,tpy])\n\n\n\t\treturn precoor\n\n\n\n\tdef Lattice(self, Npart,boxsize, para):\n\t\tL=[]\n\t\tSIGMA=para[1]\n\t\t#print(SIGMA)\n\t\tgap=0.8*SIGMA\n\n\t\tn=int(math.sqrt(Npart))\n\t\ttpx=0\n\t\ttpy=0\n\t\t\n\t\tfor i in range(0,n):\n\t\t\ttpx+=gap\n\t\t\ttpy=0\n\t\t\tfor j in range(0,n):\n\t\t\t\ttpy+=gap\n\t\t\t\tL.append([tpx,tpy])\n\t\textr=Npart-n*n\n\t\ttpx+=gap\n\t\ttpy=0\n\n\t\tfor i in range(0,extr):\n\t\t\ttpy+=gap\n\t\t\tL.append([tpx,tpy])\n\t\t\t\n\t\tself.precoor=self.newcoor=L\n\t\t#print(\"The first coor\",L)\n\t\treturn L\n\n\n\n\tdef GetEnergy(self):\n\t\treturn (self.TEnergy, self.KEnergy)\n\n\n\t\t\nif __name__==\"__main__\":\n\tview=Model()\n\tcoor=view.Lattice(2, 20, (10.22, 2.556))\n\tview.InitVelo(2,20,300, (10.22, 2.556))\n\n\tfor i in range(1,10):\n\t\t#coor=view.Verlet(2,coor,5)\n\t\tcoor=view.Verlet(2,coor,20,(10.22, 2.556),4.0026)\n\t\tprint(coor)\n\n\n\t\n\n\t\t" }, { "alpha_fraction": 0.6635931134223938, "alphanum_fraction": 0.6934159398078918, "avg_line_length": 28.66239356994629, "blob_id": "b5ec0e2f14bd48ab72ef568d817fa3129d9ed2ac", "content_id": "e28a9d7ce6a14388ce8e34f8fa6e84bfbdd678f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6941, "license_type": "no_license", "max_line_length": 139, "num_lines": 234, "path": "/include/popNoble.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python3\n\nimport sys\nimport tkinter as tk\nfrom tkinter import messagebox\n\n\nGEOMETRY = '400x500+20+20'\nWIDTH=15\nHEIGHT=8\n\nclass PopBuild(object):\n\t\"\"\"docstring for PopBuild\"\"\"\n\tdef __init__(self, master):\n\t\ttop=self.top=tk.Toplevel(master)\n\t\ttop.title(\"Build Molecular Dynamical Model\")\n\t\tself.boxFont = ('Georgia', 14)\n\t\tself.labelFont = ('Georgia', 10)\n\t\tself.butFont = ('Georgia', 12)\n\t\ttop.geometry(GEOMETRY)\n\n\n\t\tself.type=tk.IntVar()\n\t\tself.type.set(1)\n\n\t\tself.name={1:\"He\", 2:\"Ar\", 3:\"Ne\", 4:\"Xe\"}\n\n\n\t\tself.Typevalue=1\n\t\tself.Framevalue=100\n\t\tself.Tempvalue=300\n\t\tself.Numvalue=10\n\t\tself.Submit=False\n\t\tself.output=\"output.txt\"\n\t\tself.Typename=self.name[self.Typevalue]\n\n\n\n\n\t\tframe1 = tk.Frame(top)\n\t\tframe1.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#FFCCE5',width=50)\n\t\tframe1.pack(side=tk.TOP)\n\n\t\tBoxsize=tk.Label(frame1, text=\"Number of Frames\")\n\t\tBoxsize.pack(side=tk.LEFT, anchor=tk.W)\n\t\tBoxsize.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0', width=15)\n\t\tBoxsize.config(font=self.labelFont)\n\n\t\tself.SizeBox = tk.Entry(frame1, font=self.boxFont, width=15)\n\t\tself.SizeBox.bind('<Return>', (lambda event: self.getFramesize()))\n\t\tself.SizeBox.pack(side=tk.LEFT, padx=5)\n\t\tself.SizeBox.config(relief=tk.SUNKEN, bd=5)\n\t\tself.SizeBox.focus()\n\t\t\n\t\t#enter1=tk.Button(frame1, text=\"Enter\",width=5, command=self.Enter1)\n\t\t#enter1.pack(side=tk.LEFT, anchor=tk.W)\n\t\t#enter1.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0')\n\t\t#enter1.config(font=self.butFont, state=tk.NORMAL)\n\n\t\tframe2 = tk.Frame(top)\n\t\tframe2.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#FFCCE5',width=50)\n\t\tframe2.pack(side=tk.TOP)\n\n\t\tNumAtom=tk.Label(frame2, text=\"Number of Molecules\")\n\t\tNumAtom.pack(side=tk.LEFT, anchor=tk.W)\n\t\tNumAtom.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0', width=15)\n\t\tNumAtom.config(font=self.labelFont)\n\n\t\tself.NumBox = tk.Entry(frame2, font=self.boxFont, width=15)\n\t\tself.NumBox.bind('<Return>', (lambda event: self.getNum()))\n\t\tself.NumBox.pack(side=tk.LEFT, padx=5)\n\t\tself.NumBox.config(relief=tk.SUNKEN, bd=5)\n\t\tself.NumBox.focus()\n\t\t\t\n\n\t\tframe3 = tk.Frame(top)\n\t\tframe3.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#FFCCE5',width=50)\n\t\tframe3.pack(side=tk.TOP)\n\n\n\t\tTemper=tk.Label(frame3, text=\"Temperature (K)\")\n\t\tTemper.pack(side=tk.LEFT, anchor=tk.W)\n\t\tTemper.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0', width=15)\n\t\tTemper.config(font=self.labelFont)\n\n\t\tself.TempBox = tk.Entry(frame3, font=self.boxFont, width=15)\n\t\tself.TempBox.bind('<Return>', (lambda event: self.getTemp()))\n\t\tself.TempBox.pack(side=tk.LEFT, padx=5)\n\t\tself.TempBox.config(relief=tk.SUNKEN, bd=5)\n\t\tself.TempBox.focus()\n\n\n\t\tframe3 = tk.Frame(top)\n\t\tframe3.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#FFCCE5', width=50)\n\t\tframe3.pack(side=tk.LEFT)\n\t\t\n\n\t\tself.atomtype=[(\"He\",1),(\"Ar\",2),(\"Ne\",3),(\"Xe\",4)]\n\t\tAtom=tk.Label(frame3, text=\"Type of Atom\")\n\t\tAtom.pack(side=tk.TOP, anchor=tk.W)\n\t\tAtom.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0', width=10)\n\t\tAtom.config(font=self.labelFont)\n\t\tfor atom, val in self.atomtype:\n\t\t\tRadio=tk.Radiobutton(frame3, text=atom, padx=5, variable=self.type, command=self.getType, value=val)\n\t\t\tRadio.config(indicatoron=0, width=10,padx=5, pady=5, bd=5, relief=tk.RAISED)\n\t\t\tRadio.pack(side=tk.TOP)\n\n\n\t\tsubmit=tk.Button(frame3, text=\"Submit\",width=8, command=self.SubVal)\n\t\tsubmit.pack(side=tk.TOP, anchor=tk.W)\n\t\tsubmit.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#f44248')\n\t\tsubmit.config(font=self.butFont, state=tk.NORMAL)\n\n\n\t\tframe4 = tk.Frame(top)\n\t\tframe4.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#FFCCE5',width=30)\n\t\tframe4.pack(side=tk.LEFT)\n\n\t\tscrollbar = tk.Scrollbar(frame4)\n\n\t\tscrollbar2 = tk.Scrollbar(frame4)\n\t\tscrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\t\tscrollbar2.pack(side=tk.BOTTOM,fill=\"both\")\n\n\t\tself.listBox = tk.Listbox(frame4)\n\t\tself.listBox.pack( side = tk.TOP, fill = tk.Y)\n\t\tself.listBox.config( bg='white', bd=5, font=self.boxFont )\n\t\tself.listBox.config(width=WIDTH, height=HEIGHT)\n\t\tself.listBox.bind('<Double-1>', self.Display)\n\t\tself.listBox.config(yscrollcommand=scrollbar.set)\n\t\tscrollbar.config(command=self.listBox.yview)\n\t\tself.listBox.config(xscrollcommand=scrollbar2.set)\n\t\tscrollbar2.config(command=self.listBox.xview, orient=tk.HORIZONTAL)\n\t\t\t\n\n\t\t#print(self.value)\n\n\n\n\tdef getType(self):\n\t\tself.Typevalue=self.type.get()\n\t\tself.Typename=self.name[self.Typevalue]\n\t\t#print(self.type.get())\n\t\tself.listBox.insert(tk.END,\"Atom type set to:\"+str(self.Typename))\n\n\t#def Enter1(self):\n\t\t#self.Numvalue=self.NumBox.get()\n\t\t#self.listBox.insert(tk.END,\"The boxsize set to:\"+self.Numvalue)\n\t\t#print(self.Numvalue)\n\n\n\n\t\n\tdef getNum(self):\n\t\ttry:\n\t\t\tif self.NumBox.get().isdigit():\n\t\t\t\tself.Numvalue=int(self.NumBox.get())\n\t\t\t\tif (self.Numvalue > 50 or self.Numvalue<3):\n\t\t\t\t\tself.outrangeWarning(self.NumBox,(3,50))\n\t\t\t\t\t#self.NumBox.delete(0,tk.END)\n\t\t\t\telse:\n\t\t\t\t\tself.listBox.insert(tk.END,\"Num set to:\"+str(self.Numvalue))\n\t\t\telse:\n\t\t\t\tself.noInterWarning(self.NumBox)\n\n\t\texcept:\n\t\t\tself.noInterWarning(self.NumBox)\n\t\t\t#self.NumBox.delete(0,tk.END)\n\n\t\t\n\n\tdef getTemp(self):\n\t\ttry:\n\t\t\tif self.TempBox.get().isdigit():\n\t\t\t\tself.Tempvalue=int(self.TempBox.get())\n\t\t\t\tif (self.Tempvalue<0 or self.Tempvalue>1000):\n\t\t\t\t\tself.outrangeWarning(self.TempBox,(0,1000))\n\t\t\t\telse:\n\t\t\t\t\tself.listBox.insert(tk.END,\"Temp set to:\"+str(self.Tempvalue)+\" K\")\n\t\t\telse:\n\t\t\t\tself.noInterWarning(self.TempBox)\n\n\t\texcept:\n\t\t\tself.noInterWarning(self.TempBox)\n\n\t\n\n\tdef getFramesize(self):\n\t\ttry:\n\t\t\tif self.SizeBox.get().isdigit():\n\t\t\t\tself.Framevalue=int(self.SizeBox.get())\n\t\t\t\tif(self.Framevalue<3 or self.Framevalue>4000):\n\t\t\t\t\tself.outrangeWarning(self.SizeBox,(3,4000))\n\t\t\t\telse:\n\t\t\t\t\tself.listBox.insert(tk.END,\"Frame number:\"+str(self.Framevalue))\n\t\t\telse:\n\t\t\t\tself.noInterWarning(self.SizeBox)\n\n\t\texcept:\n\t\t\tself.noInterWarning(self.SizeBox)\n\t\t\n\n\t\n\tdef SubVal(self):\n\t\tif messagebox.askyesno('Submit', \"Do you want to submit your parameter?\\nFrame Number is:\"+str(self.Framevalue)\n\t\t\t+\"\\nThe Number of Molecules is: \"+str(self.Numvalue)+\"\\nThe Temperature is: \"+str(self.Tempvalue)+\"\\nThe Atom type is: \"+self.Typename):\n\t\t messagebox.showwarning('Yes', 'Submit')\n\t\t self.Submit=True\n\t\t self.top.destroy()\n\t\telse:\n\t\t\tmessagebox.showinfo('No', 'I want reset the parameter')\n\t\t\n\n\tdef Display(self):\n\t\tindex = self.listBox.curselection()\n\t\tlabel = self.listBox.get(index)\n\t\tself.listBox.delete(index)\n \n ##enter the range in a tuple,widget is name of enterbox\n\tdef outrangeWarning(self,widget,range):\n\t\tmessagebox.showwarning('Out of range', 'Please enter the number in range of '+str(range[0])+\" to \"+str(range[1]))\n\t\twidget.delete(0,tk.END)\n\n ##widget is name of enterbox\n\tdef noInterWarning(self, widget):\n\t\tmessagebox.showwarning('Oops', 'Input is not Integer!')\n\t\twidget.delete(0,tk.END)\n\n\n\n\nif __name__ == \"__main__\":\n pop = PopBuild(tk.Tk())\n tk.mainloop()\n" }, { "alpha_fraction": 0.7043478488922119, "alphanum_fraction": 0.7057970762252808, "avg_line_length": 18.685714721679688, "blob_id": "5820179e363a1c8171ec9516dbf010cbe8c361b2", "content_id": "86a1fa1286adbc6b94e4167f26ef2b4e1c6bc66c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 60, "num_lines": 35, "path": "/include/controller.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python3\n\nimport os\nimport sys\nimport tkinter as tk\nsys.path.append('include')\nimport model\nimport view\n\nclass Controller():\n\tdef __init__(self):\t\n\t\tself.model =model.Model()\n\t\tself.view = view.View(self)\n\n\tdef Lattice(self, Npart, boxsize,para):\n\t\treturn self.model.Lattice(Npart,boxsize,para)\n\n\n\tdef InitVelo(self,Npart,boxsize,temp, para):\n\t\tself.model.InitVelo(Npart,boxsize,temp,para)\n\n\tdef Verlet(self,Npart, coor, boxsize, para,mass):\n\t\treturn self.model.Verlet(Npart, coor, boxsize, para, mass)\n\n\tdef GetEnergy(self):\n\t\treturn self.model.GetEnergy()\n\n\tdef getcwd(self):\n\t\treturn os.getcwd()\n\n\n\nif __name__ == \"__main__\":\n controller = Controller()\n tk.mainloop()\n\n" }, { "alpha_fraction": 0.7117646932601929, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 17.66666603088379, "blob_id": "50390195af5c30f300f6dc8c0d86567bec5073f3", "content_id": "60aa5fd7a902ef5234e1cd8b6486f75b829599b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/main.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python3\n\nimport sys\nimport os\nimport tkinter as tk\nimport include.controller\n\nif __name__ == \"__main__\":\n\tcontroller = include.controller.Controller()\n\t\n" }, { "alpha_fraction": 0.6541460752487183, "alphanum_fraction": 0.6748944520950317, "avg_line_length": 24.455373764038086, "blob_id": "d38f5fea8e9add7f612a20f999f18a453729eab6", "content_id": "dd30d1accc847e13beef7a21613ea719a32a8cf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13977, "license_type": "no_license", "max_line_length": 103, "num_lines": 549, "path": "/include/view.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python3\n\nimport sys,os\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport pygame\nfrom pygame.locals import *\nsys.path.append('include')\nimport popNoble\n#import controller\nimport model\nimport para\n\n###matplot\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\nimport numpy as np\n\n\nGEOMETRY = '700x400+2+2'\nWIDTH=120\nHEIGHT=19\n\nPWIDTH=400\nPHEIGHT=400\n\n\nclass Controller(object):\n def __init__(self):\n pass\n def pos(self):\n \tpass\n def getcwd(self):\n \tpass\n\nclass View(object):\n\t\"\"\"docstring for View\"\"\"\n\tdef __init__(self, controller):\n\t\t\n\t\tself.root=tk.Tk()\n\t\troot=self.root\n\t\troot.bind('<Escape>', lambda event: sys.exit())\n\t\troot.title(\"Molecular Dynamic simulation\")\n\t\troot.geometry(GEOMETRY)\n\t\tself.controller = controller\n\n\n\t\tself.labelFont = ('Georgia', 10)\n\t\tself.boxFont = ('Georgia', 14)\n\t\tself.butFont = ('Georgia', 12)\n\t\tself.colormap=para.colormap\n\n\t\tself.Tempvalue=300\n\t\tself.Numvalue=20\n\t\tself.Sizevalue=10\n\t\tself.Framevalue=500\n\t\tself.Typevalue=1\n\t\tself.Typename='He'\n\t\tself.Para=para.Paramap[\"He\"]\n\t\tself.Mass=para.Massmap[\"He\"]\n\t\tself.Color=para.colormap[\"He\"]\n\t\tself.Coord=[]\n\t\tself.output=\"output.txt\"\n\t\tself.stop=False\n\n\n\n\t\tself.fileOpt = {}\n\t\tself.fileOpt['defaultextension'] = '' \n\t\tself.fileOpt['filetypes'] = [('all files', '.*'), ('text files', '.txt'),(\"python files\",\"*.py\")]\n\t\tself.fileOpt['initialdir'] = self.controller.getcwd()\n\t\tself.fileOpt['initialfile'] = '*.txt'\n\t\tself.fileOpt['parent'] = self.root\n\n\n\t\tframe1 = tk.Frame(root)\n\t\tframe1.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#fda9b2')\n\t\tframe1.pack(side=tk.TOP)\n\n\t\tFile=tk.Menubutton(frame1, text=\"File\",width=10)\n\t\tFile.pack(side=tk.LEFT, anchor=tk.W)\n\t\tFile.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0')\n\t\tFile.config(font=self.butFont, state=tk.NORMAL)\n\t\tFile.menu=tk.Menu(File, tearoff = 0)\n\t\tFile[\"menu\"]=File.menu\n\t\tFile.menu.add_command(label=\"Directory\", command =self.askdiretory)\n\t\tFile.menu.add_command(label=\"Open file\", command =self.askopenfile)\n\t\tFile.menu.add_command(label=\"Save Current Energy\", command =self.saveEnergy)\n\t\tFile.menu.add_command(label=\"Save Current Coord\", command =self.saveCoord)\n\t\tFile.menu.add_command(label=\"Load simulation\", command =self.LoadTraj)\n\n\n\t\tself.build=tk.Button(frame1, text=\"Build\",width=10, command=self.popNoble)\n\t\tself.build.pack(side=tk.LEFT, anchor=tk.W)\n\t\tself.build.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0')\n\t\tself.build.config(font=self.butFont, state=tk.NORMAL)\n\n\n\t\tAnaly=tk.Menubutton(frame1, text=\"Analysis\",width=10)\n\t\tAnaly.pack(side=tk.LEFT, anchor=tk.W)\n\t\tAnaly.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#E0E0E0')\n\t\tAnaly.config(font=self.butFont, state=tk.NORMAL)\n\t\tAnaly.menu=tk.Menu(Analy, tearoff = 0)\n\t\tAnaly[\"menu\"]=Analy.menu\n\t\tAnaly.menu.add_command(label=\"Display Potential Curve\", command =self.Potential)\n\t\tAnaly.menu.add_command(label=\"Energy of Current running simulation\", command =self.TEnergy)\n\t\tAnaly.menu.add_command(label=\"Plot Energy from file\", command =self.ReadEnergy)\n\n\n\t\tstart=tk.Button(frame1, text=\"Start\",width=5, command=self.Start)\n\t\tstart.pack(side=tk.LEFT, anchor=tk.W)\n\t\tstart.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#f44248')\n\t\tstart.config(font=self.butFont, state=tk.NORMAL)\n\n\n\t\tstop=tk.Button(frame1, text=\"Stop\",width=5, command=self.Stop)\n\t\tstop.pack(side=tk.LEFT, anchor=tk.W)\n\t\tstop.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#f44248')\n\t\tstop.config(font=self.butFont, state=tk.NORMAL)\n\n\t\tContinue=tk.Button(frame1, text=\"Continue\",width=6, command=self.Continue)\n\t\tContinue.pack(side=tk.LEFT, anchor=tk.W)\n\t\tContinue.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#f44248')\n\t\tContinue.config(font=self.butFont, state=tk.NORMAL)\n\n\n\n\t\tframe2 = tk.Frame(root)\n\t\tframe2.config(padx=5, pady=5, bd=5, relief=tk.RAISED, bg='#FFCCE5')\n\t\tframe2.pack(side=tk.BOTTOM)\n\n\t\tscrollbar = tk.Scrollbar(frame2)\n\t\tscrollbar2 = tk.Scrollbar(frame2)\n\t\tscrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\t\tscrollbar2.pack(side=tk.BOTTOM,fill=\"both\")\n\n\t\tself.listBox = tk.Listbox(frame2)\n\t\tself.listBox.pack( side = tk.TOP, fill = tk.BOTH)\n\t\tself.listBox.config( bg='white', bd=5, font=self.boxFont )\n\t\tself.listBox.config(width=WIDTH, height=HEIGHT)\n\t\tself.listBox.bind('<Double-1>', self.displayItem)\n\t\tself.listBox.config(yscrollcommand=scrollbar.set)\n\t\tscrollbar.config(command=self.listBox.yview)\n\t\tself.listBox.config(xscrollcommand=scrollbar2.set)\n\t\tscrollbar2.config(command=self.listBox.xview, orient=tk.HORIZONTAL)\n\n\n\n\t\t####pygame\n\t\t#os.environ['SDL_WINDOWID'] = str(root.winfo_id())\n\t\tos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (200,200)\n\t\troot.update()\n\t\t\n\n\t\tpygame.init()\n\t\tself.FPS = 30 # frames per second setting\n\t\tself.fpsClock = pygame.time.Clock()\n\t\tself.DISPLAYSURF = pygame.display.set_mode((PWIDTH, PHEIGHT), 0, 32)\n\t\tpygame.display.set_caption('Animation')\n\t\t\n\n\n\t\tself.WHITE = (255, 255, 255)\n\n\t\tself.DISPLAYSURF.fill(self.WHITE)\n\n\t\twhile True:\n\t\t\t#frame1.update()\t\t\n\t\t\tfor event in pygame.event.get():\n\t\t\t\t\n\t\t\t\tif event.type==QUIT:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tsys.exit()\n\t\t\n\t\t\tpygame.display.update()\n\t\t\tself.root.update()\n\t\t\tself.fpsClock.tick(self.FPS)\n\n\n\t\t#self.file.close()\t\n\n\n\t\t#tk.mainloop()\n\n\tdef askdiretory(self):\n\t\tcwd=self.controller.getcwd()\n\t\tdirOpt = {}\n\t\tdirOpt['initialdir'] = cwd\n\t\tdirOpt['mustexist'] = False\n\t\tdirOpt['parent'] = self.root\n\t\tdirOpt['title'] = 'Select Directory'\n\n\t\tdirectory = tk.filedialog.askdirectory(**dirOpt)\n\t\tprint( \"askdirectory is:\", directory )\n\t\treturn directory\n\n\tdef askopenfile(self):\n\n\t\tcwd=self.controller.getcwd()\n\t\tself.fileOpt['title'] = 'Select File'\n\t\tfilename=filedialog.askopenfilename(**self.fileOpt)\n\t\t#print( \"askopenfilename is:\", filename )\n\t\tif filename:\n\t\t\tself.openfile(filename)\n\t\t\treturn open(filename, 'r')\n\n\t\n\tdef openfile(self, filename):\n\t\tpopup=tk.Toplevel(self.root)\n\t\tpopup.geometry(GEOMETRY)\n\t\tpopup.title(filename)\n\n\t\tscrollbar = tk.Scrollbar(popup)\n\t\tscrollbar2 = tk.Scrollbar(popup)\n\t\tscrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\t\tscrollbar2.pack(side=tk.BOTTOM,fill=\"both\")\n\n\t\tlistBox = tk.Listbox(popup)\n\t\tlistBox.pack( side = tk.TOP, fill = tk.BOTH)\n\t\tlistBox.config( bg='white', bd=5, font=self.boxFont )\n\t\tlistBox.config(width=WIDTH, height=HEIGHT)\n\t\tlistBox.bind('<Double-1>', self.displayItem)\n\t\tlistBox.config(yscrollcommand=scrollbar.set)\n\t\tscrollbar.config(command=listBox.yview)\n\t\tself.listBox.config(xscrollcommand=scrollbar2.set)\n\t\tscrollbar2.config(command=listBox.xview, orient=tk.HORIZONTAL)\n\n\t\tf=open(filename, 'r')\n\n\t\tf1=f.readlines()\n\t\tfor x in f1:\n\t\t\trmbreaker=x[:-1]\n\t\t\tlistBox.insert(tk.END,rmbreaker)\n\t\tf.close()\n\n\n\tdef saveEnergy(self):\n\n\t\tself.fileOpt['title'] = 'Save Energy File'\n\t\tf = tk.filedialog.asksaveasfile(**self.fileOpt)\n\t\tif f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n\t\t return\n\t\tEnergy=self.controller.GetEnergy()[0]\n\t\tfor x in Energy:\n\t\t\tf.write(\"%f\\n\"%x)\n\t\tf.close()\n\n\n\tdef saveCoord(self):\n\t\tself.fileOpt['title'] = 'Save Coordinate File'\n\t\tf = tk.filedialog.asksaveasfile(**self.fileOpt)\n\t\tif f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n\t\t return\n\t\tCoord=self.Coord\n\t\t#print(self.Coord)\n\t\tfor coor in Coord:\n\t\t\tfor i in range(0,len(coor)):\n\t\t\t\tf.write(str(coor[i][0])+\" \"+str(coor[i][1])+\",\")\n\t\t\tf.write(\"\\n\")\n\t\t\t\n\t\tf.close()\n\n\n\n\tdef LoadTraj(self):\n\n\t\tself.fileOpt['title'] = 'Select Trajectory File'\n\t\tfilename = tk.filedialog.askopenfilename(**self.fileOpt)\n\t\tcoord=[]\n\t\tcoorxy=[]\t\n\n\t\tif filename:\n\t\t openfile=open(filename,\"r\")\n\t\t line=openfile.readlines()\n\t\t for coor in line:\n\t\t \tstrip1=coor.split(\",\")\n\n\t\t \tself.DISPLAYSURF.fill(self.WHITE)\n\n\t\t \tfor xy in strip1:\n\t\t \t\tstrip2=list(xy.split())\n\t\t \t\tif strip2==[]:\n\t\t \t\t\ttry:\n\t\t\t \t\t\tfor x in coorxy:\n\t\t\t \t\t\t\tpygame.draw.circle(self.DISPLAYSURF,self.Color, x,10,0)\n\t\t\t \t\texcept:\n\t\t\t \t\t\tself.formatwarning()\n\t\t\t \t\t\treturn\n\n\t\t \t\t\tcoord.append(coorxy)\n\t\t \t\t\tcoorxy=[]\n\n\t\t \t\telse:\n\t\t \t\t\t#print(strip2)\n\t\t \t\t\ttry:\n\t\t\t \t\t\tx=int(strip2[0])\n\t\t\t \t\t\ty=int(strip2[1])\n\t\t\t \t\t\tcoorxy.append([x,y])\n\t\t\t \t\texcept:\n\t\t\t \t\t\tself.formatwarning()\n\t\t\t \t\t\treturn\n\n\t\t \tpygame.display.update()\n\t\t \tself.root.update()\n\t\t \tself.fpsClock.tick(self.FPS)\n\t\t openfile.close()\n\n\n\t\t#print(coord)\n\t\n\tdef ReadEnergy(self):\n\n\t\tself.fileOpt['title'] = 'Select Potential File'\n\t\tfilename = tk.filedialog.askopenfilename(**self.fileOpt)\n\n\t\tEnergy=[]\n\n\t\tif filename:\n\t\t\topenfile=open(filename,\"r\")\n\t\t\tline=openfile.readlines()\n\t\t\tfor x in line:\n\t\t\t\trmbreaker=x[:-1]\n\t\t\t\ttry: \n\t\t\t\t\tif float(rmbreaker):\n\t\t\t\t\t\tEnergy.append(float(rmbreaker))\n\n\t\t\t\texcept:\n\t\t\t\t\tself.formatwarning()\n\t\t\t\t\treturn\n\n\n\n\t\tif len(Energy)!=0:\n\t\t\tpopup=tk.Toplevel(self.root)\n\n\t\t\tf = Figure(figsize=(5,5), dpi=100)\n\t\t\ta = f.add_subplot(111)\n\t\t\t#print(self.Para[0],self.Para[1])\n\t\t\tx=np.arange(0.5,len(Energy),1)\n\n\t\t\ta.plot(x,Energy)\n\t\t\ta.set_title (\"Potential Energy\", fontsize=16)\n\t\t\ta.set_ylabel(\"Energy(J/mol)\", fontsize=14)\n\t\t\ta.set_xlabel(\"Time Step\", fontsize=14)\n\t\t\t\n\t\t\tself.Canvasformat(popup, f)\n\n\t\telse:\n\t\t\tmessagebox.showwarning('Oops', 'The input file is empty')\n\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\n\tdef Start(self):\n\n\t\tself.Coord=[]\n\n\t\t#self.file=open(self.output,\"w\")\t\n\t\tself.stop=False\t\t\n\t\tself.L=self.controller.Lattice(self.Numvalue,self.Sizevalue,self.Para)\n\t\tself.controller.InitVelo(self.Numvalue,self.Sizevalue, self.Tempvalue,self.Para)\n\t\tcoor=self.Zoom(self.Numvalue)\n\t\tself.Coord.append(coor)\n\n\n\t\tfor x in coor:\n\t\t\t\tpygame.draw.circle(self.DISPLAYSURF,self.Color, x,10,0)\n \n\t\t\n\t\tfor i in range(1,self.Framevalue):\n\n\t\t\tfor event in pygame.event.get():\n\t\t\t\t\n\t\t\t\tif event.type==QUIT:\n\t\t\t\t\tpygame.quit()\n\n\t\t\tif self.stop==False:\n\n\t\t\t\tself.DISPLAYSURF.fill(self.WHITE)\n\n\t\t\t #print(\"self.L\", self.L)\n\t\t\t\tfor j in range(1,200):\n\t\t\t\t\t#Verlet(self,Npart, coor, boxsize,para,mass):\n\t\t\t\t\tself.L=self.controller.Verlet(self.Numvalue,self.L,self.Sizevalue, self.Para, self.Mass)\n\n\t\t\t\tcoor=self.Zoom(self.Numvalue)\n\t\t\t\tself.Coord.append(coor)\n\t\t\t\t#print(\"self.Color\",self.Color)\t\t\t\t\n\t\t\t\tfor x in coor:\n\t\t\t\t\tpygame.draw.circle(self.DISPLAYSURF,self.Color, x,10,0)\t\n\t\t\t\t#frame1.update()\n\t\t\twhile self.stop==True:\n\t\t\t\tfor x in coor:\n\t\t\t\t\tpygame.draw.circle(self.DISPLAYSURF,self.Color, x,10,0)\n\t\t\t\tpygame.display.update()\n\t\t\t\tself.root.update()\n\t\t\t\tself.fpsClock.tick(5)\n\n\t\t\tpygame.display.update()\n\t\t\tself.root.update()\n\t\t\tself.fpsClock.tick(self.FPS)\n\n\t\t\n\n\n\n\tdef popNoble(self):\n\t\tself.wbuild=popNoble.PopBuild(self.root)\n\t\tself.build[\"state\"]=\"disabled\"\n\t\tself.root.wait_window(self.wbuild.top)\n\t\tself.build[\"state\"] = \"normal\"\n\n\t\tif self.wbuild.Submit==True:\n\n\t\t\tself.Framevalue=self.wbuild.Framevalue\n\t\t\tself.Tempvalue=self.wbuild.Tempvalue\n\t\t\tself.Numvalue=self.wbuild.Numvalue\n\t\t\tself.Typename=self.wbuild.Typename\n\t\t\tself.Typevalue=self.wbuild.Typevalue\n\t\t\tself.Color=self.colormap[self.Typename]\n\t\t\tself.Para=para.Paramap[self.Typename]\n\t\t\tself.Mass=para.Massmap[self.Typename]\n\t\t\tself.output=self.wbuild.output\n\t\t\tself.listBox.insert(tk.END,\"Frame number:\"+str(self.Framevalue))\n\t\t\tself.listBox.insert(tk.END,\"Temperature:\"+str(self.Tempvalue)+\" K\")\n\t\t\tself.listBox.insert(tk.END,\"Number of Atom:\"+str(self.Numvalue))\n\t\t\tself.listBox.insert(tk.END,\"Atom type:\"+str(self.Typename))\n\t\t\tself.Start()\n\t\n\t\n\tdef Potential(self):\n\n\n\t\tpopup=tk.Toplevel(self.root)\n\t\tf = Figure(figsize=(5,5), dpi=100)\n\t\ta = f.add_subplot(111)\n\t\t#print(self.Para[0],self.Para[1])\n\t\tx=np.arange(0.5,1.0,0.0001)\n\t\t#a.set_ylim(-0.5,2)\n\t\tvLJ=np.vectorize(self.LJ)\n\t\t#vLJ=self.LJ\n\t\ta.plot(x,vLJ(x))\n\t\ta.set_title (\"Potential\", fontsize=16)\n\t\ta.set_ylabel(\"U(r)\", fontsize=14)\n\t\ta.set_xlabel(\"r\", fontsize=14)\t\n\t\t#a.set_xlim([0.7,2])\n\t\tself.Canvasformat(popup, f)\n\n\n\tdef LJ(self,x):\n\t\tif x>0.0:\n\t\t\ttp=self.Para[1]/x\n\t\t\treturn self.Para[0]*(tp**12-2*tp**6)\n\n\n\tdef TEnergy(self):\n\t\tEnergy=self.controller.GetEnergy()[0]\n\n\t\tif len(Energy)!=0:\n\t\t\tpopup=tk.Toplevel(self.root)\n\n\t\t\tf = Figure(figsize=(5,5), dpi=100)\n\t\t\ta = f.add_subplot(111)\n\t\t\t#print(self.Para[0],self.Para[1])\n\t\t\tx=np.arange(0.5,len(Energy),1)\n\n\t\t\ta.plot(x,Energy)\n\t\t\ta.set_title (\"Potential Energy\", fontsize=16)\n\t\t\ta.set_ylabel(\"Energy(J/mol)\", fontsize=14)\n\t\t\ta.set_xlabel(\"Time Step\", fontsize=14)\n\t\t\t\n\t\t\tself.Canvasformat(popup, f)\n\n\t\telse:\n\t\t\tmessagebox.showwarning('Oops', 'No simulaitons, please set up simulaitons')\n\n\n\n\t\n\tdef Canvasformat(self, master, figure):\n\n\t\tcanvas = FigureCanvasTkAgg(figure, master)\n\t\tcanvas.draw()\n\t\tcanvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n\t\ttoolbar = NavigationToolbar2Tk(canvas, master)\n\t\ttoolbar.update()\n\t\tcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n\n\n\n\tdef displayItem(self, event):\n\t\tindex = self.listBox.curselection()\n\t\tlabel = self.listBox.get(index)\n\t\tself.listBox.delete(index)\t\t\t\t\t\n\n\n\tdef Zoom(self, Npart):\n\t\tsumx=0\n\t\tsumy=0\n\t\tcoor=[]\n\n\t\tfor i in range(0,len(self.L)):\n\t\t\tcoor.append([10*self.L[i][0],10*self.L[i][1]])\n\n\t\tfor i in range(0,len(self.L)):\n\t\t\tsumx+=coor[i][0]\n\t\t\tsumy+=coor[i][1]\n\t\tsumx/=Npart\n\t\tsumy/=Npart\n\t\tdeltax=PWIDTH/2-sumx\n\t\tdeltay=PHEIGHT/2-sumy\n\n\t\tfor i in range(0,len(self.L)):\n\t\t\tcoor[i][0]+=deltax\n\t\t\tcoor[i][1]+=deltay\n\t\t\tcoor[i][0]=int(coor[i][0])\n\t\t\tcoor[i][1]=int(coor[i][1])\n\t\t\t#self.file.write(str(coor[i][0])+\" \"+str(coor[i][1])+\",\")\n\t\t\tif (abs(coor[i][0])>PWIDTH or abs(coor[i][1])>PHEIGHT):\n\t\t\t\tmessagebox.showwarning('Oops', 'The parameter setting is not correct, please reset the parameters')\n\t\t\t\tself.stop=True\n\t\t#self.file.write(\"\\n\")\n\n\t\t#print(\"coor\", coor)\n\t\t#print(\"self.L\", self.L)\n\t\treturn coor\n\n\n\tdef formatwarning(self):\n\t\tmessagebox.showwarning('Oops', 'The format is not correct')\n\n\tdef Stop(self):\n\t\tself.stop=True\n\n\tdef Continue(self):\n\t\tself.stop=False\n\n\n\t\t\n\n\t\t\nif __name__ == \"__main__\":\n view = View(Controller() )\n #tk.mainloop()\n " }, { "alpha_fraction": 0.36276084184646606, "alphanum_fraction": 0.6292135119438171, "avg_line_length": 35.52941131591797, "blob_id": "03f5446657b94e2dba3dd501b372b7907aaef0f8", "content_id": "c5e3efc2ce0b53a38e4477b14d39b9984752f176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 623, "license_type": "no_license", "max_line_length": 111, "num_lines": 17, "path": "/include/para.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "\n#10.22 = epsilon/K in units of Kelvin\n# also, note that this is same as epsilon / Boltzmann's constant\n#so \"original\" epsilon is approx. 1.41e-22 J\n\n\ncolormap={\"He\":(255, 179, 179),\"Ne\":(255, 51, 0),\"Ar\":(102, 204, 255),\"Kr\":(51, 204, 204),\"Xe\":(204, 102, 255)}\n##\"He\":(EPSILON, SIGMA)\nParamap={\"He\":(10.22, 2.556),\"Ne\":(36.68, 2.79),\"Ar\":(120.0, 3.38),\"Kr\":(171.0, 3.60),\"Xe\":(221.0, 4.10)}\n\nMassmap={\"He\":4.0026,\"Ne\":20.179,\"Ar\":39.948,\"Kr\":83.80,\"Xe\":131.30}\n\nkb=1.38064852e-23\navogadro=6.0221409e+26 #gram/mol\nmHe=6.6464764e-27 #in gram\nEPSkb=1.41102279e-22 #epsilon*kb kb=1.38e-23 Jk^-1\nSIGMA=2.556\nEPSILON=10.22 \n" }, { "alpha_fraction": 0.5798400044441223, "alphanum_fraction": 0.6387199759483337, "avg_line_length": 19.682119369506836, "blob_id": "ff7c56194a922075cf9a29247e48ff4ea4c231b1", "content_id": "ec76fe85fc8cb00de6a6fc0a3601f770855e9caf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3125, "license_type": "no_license", "max_line_length": 128, "num_lines": 151, "path": "/include/testcase.py", "repo_name": "sisi0205/OO_software", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python3\n\nimport unittest\nimport sys\nsys.path.append('include')\nimport model\nimport math\nimport random\n\n\n\nclass Testmodel(unittest.TestCase):\n\t\"\"\"docstring for Testmodel\"\"\"\n\n\tdef setUp(self):\n\t\tself.model=model.Model()\n\t\tself.Npart=2\n\t\tself.temp=300\n\t\tself.boxsize=20\n\t\tself.para=(10.22, 2.556)\n\t\tself.mass=4.0026\n\t\tself.dt=1\n\t\n\n\n\tdef testAcceleration(self):\n\t\tboxsizeX=4.0896\n\t\tboxsizeY=6.134399999999999\n\t\tEPSkb=self.para[0]*1.38e-23\n\t\tSIGMA=self.para[1]\n\t\tmass=self.mass/6.0221409e+26\n\t\tself.model.InitVelo(self.Npart,self.boxsize,self.temp, self.para)\n\n\t\tdef coorgen():\n\t\t\tcoor1=[random.uniform(0,10),random.uniform(0,10)]\n\t\t\tcoor2=[random.uniform(0,10),random.uniform(0,10)]\n\t\t\twhile coor1==coor2:\n\t\t\t\tcoor1=[random.uniform(0,10),random.uniform(0,10)]\n\t\t\t\tcoor2=[random.uniform(0,10),random.uniform(0,10)]\n\t\t\treturn [coor1,coor2]\n\t\t\t\t\t\n\t\t\n\t\tfor i in range(1,1000):\n\t\t\tcoor=coorgen()\n\t\t\ttacc=self.model.acceleration(2,coor,self.boxsize,self.para, self.mass)\n\t\t\tacc=[]\n\n\t\t\tfor i in range(0,2):\n\t\t\t\tacc.append([0,0])\n\n\t\t\tfor i in range(0,1):\n\t\t\t\tfor j in range(i+1, 2):\n\n\t\t\t\t\txij=coor[i][0]-coor[j][0]\n\t\t\t\t\tyij=coor[i][1]-coor[j][1]\n\t\t\t\t\t\n\n\t\t\t\t\tif xij>=0.5*boxsizeX:\n\t\t\t\t\t\txij-=boxsizeX\n\t\t\t\t\tif xij<-0.5*boxsizeX:\n\t\t\t\t\t\txij+=boxsizeX\n\t\t\t\t\tif yij>=0.5*boxsizeY:\n\t\t\t\t\t\tyij-=boxsizeY\n\t\t\t\t\tif yij<-0.5*boxsizeY:\n\t\t\t\t\t\tyij+=boxsizeY\n\n\t\t\t\t\trij2=xij*xij+yij*yij\n\t\t\t\t\trij6=rij2*rij2*rij2\n\t\t\t\t\trij12=rij6*rij6\n\t\t\n\t\t\t\t\ttp2=SIGMA*SIGMA/rij2\n\t\t\t\t\ttp6=tp2*tp2*tp2\t\t\n\n\t\t\t\t\tFfactor=24*EPSkb*tp6*(2*tp6-1)/rij2\n\t\t\t\t\taccel=Ffactor/(mass*1e10)\n\n\t\t\t\t\tacc[i][0]+=accel*xij\n\t\t\t\t\tacc[i][1]+=accel*yij\n\n\t\t\t\t\tacc[j][0]-=accel*xij\n\t\t\t\t\tacc[j][0]-=accel*yij\n\t\t\t\n\n\t\t\tself.assertEqual(acc,tacc)\n\n\n\n\n\tdef testVerlet(self):\n\n\t\tcoor=self.model.Lattice(self.Npart, self.boxsize, self.para)\n\t\tprecoor=self.model.InitVelo(self.Npart,self.boxsize,self.temp, self.para)\n\t\tnewcoor=coor\n\t\t#print(coor) \n\t\tfor i in range(1,1000):\n\t\t\t\n\n\t\t\tacc=self.model.acceleration(self.Npart,coor,self.boxsize,self.para, self.mass)\n\t\t\n\t\t\tfor i in range(0,self.Npart):\n\t\t\t\tnewcoor[i][0]=2*coor[i][0]-precoor[i][0]+acc[i][0]*self.dt*self.dt\n\t\t\t\tnewcoor[i][1]=2*coor[i][1]-precoor[i][1]+acc[i][1]*self.dt*self.dt\n\n\t\t\ttcoor=self.model.Verlet(self.Npart, coor, self.boxsize,self.para,self.mass)\n\t\t\tself.assertEqual(len(tcoor),len(newcoor))\n\t\t\tprecoor=coor\n\t\t\tcoor=newcoor\n\n\n\tdef testInitVelo(self):\n\t\t###InitVelo() will get the random velocity, it is hard the test the random number, i just tested the size of the return list. \n\n\t\tfor i in range(1,100):\n\t\t\tprecoor=self.model.InitVelo(i, self.boxsize,self.temp, self.para)\n\t\t\tself.assertEqual(i,len(precoor))\n\t\t\n\t\n\tdef testLattice(self):\n\n\t\tfor i in range(2,1000):\n\t\t\tcoor=self.model.Lattice(i, self.boxsize, self.para)\n\t\t\tL=[]\n\n\t\t\tnum=int(math.sqrt(i))\n\t\t\ttpx=0\n\t\t\ttpy=0\n\t\t\tgap=0.8*2.556\n\n\t\t\tfor k in range(0,num):\n\t\t\t\ttpx+=gap\n\t\t\t\ttpy=0\n\t\t\t\n\t\t\t\tfor j in range(0,num):\n\t\t\t\t\ttpy+=gap\n\t\t\t\t\tL.append([tpx,tpy])\n\t\t\textr=i-num*num\n\t\t\ttpx+=gap\n\t\t\ttpy=0\n\n\t\t\tfor k in range(0,extr):\n\t\t\t\ttpy+=gap\n\t\t\t\tL.append([tpx,tpy])\n\n\t\t\t#print(coor)\n\t\t\tself.assertEqual(L,coor)\n\t\t\t\n\t\t\n\t\t\n\nif __name__==\"__main__\":\n\tunittest.main()\n\t\t" } ]
7
ProjetoVeritas/VerifyVeritas
https://github.com/ProjetoVeritas/VerifyVeritas
a0ab4c1f039b0bfe9a4eaacc1460d98235c24140
c293e921dc3fcc584d920dcfcd81c9e2809102e2
97516cb2071a21d9674bc47d8c5a48ad18927f23
refs/heads/main
2023-03-06T06:22:17.552922
2021-02-14T13:33:34
2021-02-14T13:35:07
332,066,103
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.516853928565979, "alphanum_fraction": 0.7191011309623718, "avg_line_length": 17, "blob_id": "33b552ac87730ec2f8f058c0c10dc8e31ef5a028", "content_id": "9bbaacaf8cb39bad84f07811ef7b7874a302db26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 89, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/requirements.txt", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "Flask==1.1.2\nFlask-RESTful==0.3.8\nelasticsearch==7.10.1\ngunicorn==20.0.4\nrequests==2.25.1" }, { "alpha_fraction": 0.5462598204612732, "alphanum_fraction": 0.5698819160461426, "avg_line_length": 28.882352828979492, "blob_id": "08dd28eb803fdf46ca4f5f4731729c1534cde951", "content_id": "27f206933a121cafd5a8f3c52ecfb11224e96d88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1016, "license_type": "no_license", "max_line_length": 78, "num_lines": 34, "path": "/flaskapp/routes/verify_text.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, request\n\n\nclass VerifyText(Resource):\n # Receives\n # {\n # text: 'TextToVerify'\n # }\n\n def __init__(self, es_client):\n self.es_client = es_client\n\n def post(self):\n args = request.get_json()\n\n text = args['text']\n\n response_get = self.es_client.get_answer_by_exact_text(text)\n\n # Text not registered\n if response_get['status'] == 'NOT_REGISTERED':\n response_register = self.es_client.register(text)\n\n if response_register['status'] == 'SUCCESS':\n return {'code': 200, 'message': 'SUCCESS_NOT_REGISTERED'}, 200\n\n if response_register['status'] == 'ERROR':\n return {'code': 500, 'message': 'ERROR_NOT_REGISTERED'}, 500\n\n # Text not answered yet\n if response_get['status'] == 'NOT_ANSWERED':\n return {'code': 200, 'message': 'SUCCESS_NOT_ANSWERED'}, 200\n\n return {'code': 200, 'message': 'ANSWERED', 'data': response_get}, 200\n" }, { "alpha_fraction": 0.5473684072494507, "alphanum_fraction": 0.5726315975189209, "avg_line_length": 24.052631378173828, "blob_id": "2501b250f60008e2560772fb2b5c633ffdfda9ec", "content_id": "a6104837ee597c272bc4f7dd6fadee4ac610e6e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 85, "num_lines": 19, "path": "/flaskapp/routes/get_by_id.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, request\n\n\nclass GetById(Resource):\n\n def __init__(self, es_client):\n self.es_client = es_client\n\n def get(self):\n args = request.get_json()\n\n id = args['id']\n\n response_get = self.es_client.get_by_id(id)\n\n if response_get['status'] == 'ERROR':\n return {'code': 500, \"message\": \"ERROR_GET_BY_ID\"}, 500\n\n return {'code': 200, 'message': 'SUCCESS', 'data': response_get['data']}, 200" }, { "alpha_fraction": 0.6395705342292786, "alphanum_fraction": 0.6426380276679993, "avg_line_length": 26.16666603088379, "blob_id": "df8694f9cd11f88186022c11e4370c8adb2bdbf3", "content_id": "0c07b4077bffdce6a0ae6116c9af1b13122de4c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 64, "num_lines": 48, "path": "/flaskapp/app.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append('../') # Must have a best solution\n\nimport os\n\nfrom flask import Flask\nfrom flask_restful import Api\n\nfrom flaskapp.DAO.elasticsearch import ESClient\n\nfrom flaskapp.routes.verify_text import VerifyText\nfrom flaskapp.routes.register_answer import RegisterAnswer\nfrom flaskapp.routes.get_texts import GetTexts\nfrom flaskapp.routes.get_by_id import GetById\nfrom flaskapp.routes.verify_media import VerifyMedia\n\nuser = os.getenv('user')\npassword = os.getenv('password')\nhost = os.getenv('host')\n\napp = Flask(__name__)\n\nes_client = ESClient(user, password, host)\n\napi = Api(app)\n\napi.add_resource(VerifyMedia,\n '/receive_media',\n resource_class_kwargs={'es_client': es_client})\n\napi.add_resource(VerifyText,\n '/receive_text',\n resource_class_kwargs={'es_client': es_client})\n\napi.add_resource(RegisterAnswer,\n '/register_answer',\n resource_class_kwargs={'es_client': es_client})\n\napi.add_resource(GetTexts,\n '/get_texts',\n resource_class_kwargs={'es_client': es_client})\n\napi.add_resource(GetById,\n '/get_by_id',\n resource_class_kwargs={'es_client': es_client})\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n" }, { "alpha_fraction": 0.6048387289047241, "alphanum_fraction": 0.6048387289047241, "avg_line_length": 61, "blob_id": "875024480467a1cb204b3e6c817309310b5d5520", "content_id": "f8608037cee7dfcf1423fe8099a64bb8a319d320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 95, "num_lines": 2, "path": "/flaskapp/support/clean_video_data.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "def clean_video_data(text):\n return ' '.join(' '.join(text.split('###Video_OCR###')).split('###Audio_Transcription###'))\n" }, { "alpha_fraction": 0.7663230299949646, "alphanum_fraction": 0.7663230299949646, "avg_line_length": 17.1875, "blob_id": "9271a91888d914d64154f12010743c8ae2582c21", "content_id": "94aa53c1a7252e67672cc4ca3b0e282f96a73192", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 582, "license_type": "no_license", "max_line_length": 68, "num_lines": 32, "path": "/README.md", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "# Veritas Verify API\n\nThis API is responsible for different routes in the Projeto Veritas:\n\n* Receive new fakes and register in the DB\n\n* Answer verify queries, returning the answer for fakes\n\n* Give available fakes to answer\n\n* Update unanswered fake entries in DB\n\n## Environment Variables\n\n* user: Elasticsearch user\n\n* password: Elasticsearch password\n\n* host: Elasticsearch host\n\n* TIKA_SERVER: Tika text extractor host\n\n* TRANSCRIBE_SERVER: Transcriber service host\n\n* VIDEOTRANSCRIPTIONOCR_SERVER: Video transcription/OCR server\n\n## How to run\n\n```shell script\ncd flaskapp\nflask run\n```\n" }, { "alpha_fraction": 0.35157510638237, "alphanum_fraction": 0.35379645228385925, "avg_line_length": 41.68965530395508, "blob_id": "5f26b2282705cf0bd2d2d55648ac14e98527e31e", "content_id": "885ae29ab0c84207232dee5fd9e438927e0a461e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4952, "license_type": "no_license", "max_line_length": 102, "num_lines": 116, "path": "/flaskapp/DAO/elasticsearch.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "from elasticsearch import Elasticsearch\nfrom elasticsearch import exceptions\n\nfrom flaskapp.support.hasher import hashtext\n\n\nclass ESClient:\n\n def __init__(self, user, password, host):\n self.es = Elasticsearch(\n [f'https://{user}:{password}@{host}'],\n verify_certs=False\n )\n\n def register(self, text):\n response = self.es.index(index='veritasdata', id=hashtext(text), body={'original_text': text})\n if response['result'] == 'created':\n return {'status': 'SUCCESS'}\n return {'status': 'ERROR'}\n\n def verify_registered_text_media(self, media_id):\n # This function verifies if a media id hash has a correspondent text id hash\n # [ENHANCEMENT] Must be optimized to only bring the linked_text_id\n try:\n response = self.es.get(index='veritasmedia', id=media_id)['_source']\n # If there's a linked text id to the media, return it\n return {\n 'status': 'SUCCESS',\n 'data': response['linked_text_id']}\n\n except exceptions.NotFoundError:\n # There's no registered text id for the media\n return {\n 'status': 'NOT_REGISTERED',\n 'data': None}\n\n def register_text_to_media(self, data_type, media_hash, text_hash, b64audio=''):\n\n response = self.es.index(index='veritasmedia', id=media_hash,\n body={'type': data_type,\n 'linked_text_id': text_hash,\n 'audiob64ogg': b64audio\n })\n\n if response['result'] == 'created':\n return {'status': 'SUCCESS'}\n return {'status': 'ERROR'}\n\n def get_answer_by_exact_text(self, text):\n try:\n response = self.es.get(index='veritasdata', id=hashtext(text))['_source']\n if 'answer' not in response.keys():\n return {\n 'status': 'NOT_ANSWERED',\n 'data': None}\n else:\n return {\n 'status': 'SUCCESS',\n 'data': response['answer']}\n\n except exceptions.NotFoundError:\n return {\n 'status': 'NOT_REGISTERED',\n 'data': None}\n\n def get_by_id(self, id):\n response = self.es.get(index='veritasdata', id=id)\n\n if response['found']:\n return {'status': 'SUCCESS', 'data': response['_source']}\n return {'status': 'ERROR', 'data': None}\n\n def add_answer_by_id(self, text_id, answer):\n response = self.es.update(index='veritasdata', id=text_id, body={\"doc\": {'answer': answer}})\n if response['result'] == 'updated':\n return {'status': 'SUCCESS'}\n return {'status': 'ERROR'}\n\n def get_random_unanswered_texts(self):\n response = self.es.search(index='veritasdata',\n body={\n \"size\": 10,\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {\n \"boost\": 1.0\n }\n }\n ],\n \"filter\": [\n {\n \"bool\": {\n \"must_not\": {\n \"exists\": {\n \"field\": \"answer\"\n }\n }\n }\n }\n ]\n }\n },\n \"random_score\": {},\n \"score_mode\": \"sum\"\n }\n }\n })\n\n if response['_shards']['failed'] == 0:\n return {'status': 'SUCCESS', 'data': response['hits']['hits']}\n\n return {'status': 'ERROR', 'data': None}\n" }, { "alpha_fraction": 0.6736842393875122, "alphanum_fraction": 0.7052631378173828, "avg_line_length": 18, "blob_id": "6c52002fa2b1f86303a749fc2efb220045aebfd2", "content_id": "cdec26f24a4b9bed811333c6c31d4f198b84c69d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 57, "num_lines": 5, "path": "/flaskapp/support/hasher.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "import hashlib\n\n\ndef hashtext(text):\n return hashlib.sha224(f\"{text}\".encode()).hexdigest()\n" }, { "alpha_fraction": 0.5522875785827637, "alphanum_fraction": 0.5718954205513, "avg_line_length": 28.190475463867188, "blob_id": "043965fc66c176a76d310e9380e566d7b1cd8177", "content_id": "2e6d5ff20d53b1947ac3e3869286b0751ccd936a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/flaskapp/routes/get_texts.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, request\n\n\nclass GetTexts(Resource):\n\n def __init__(self, es_client):\n self.es_client = es_client\n\n def get(self):\n response_get = self.es_client.get_random_unanswered_texts()\n\n if response_get['status'] == 'ERROR':\n return {'code': 500, \"message\": \"ERROR_GET_TEXTS\"}, 500\n\n output_results = []\n for result in response_get['data']:\n row = result['_source']\n row['id'] = result['_id']\n output_results.append(row)\n\n return {'code': 200, 'message': 'SUCCESS', 'data': output_results}, 200" }, { "alpha_fraction": 0.60550457239151, "alphanum_fraction": 0.6697247624397278, "avg_line_length": 35.5, "blob_id": "c184447cdadbafe307c3ac9544a3fa948c77f027", "content_id": "a38d234b0a63ecce2666dff7d15e3b8f55f81f13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 218, "license_type": "no_license", "max_line_length": 82, "num_lines": 6, "path": "/Dockerfile", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "FROM python:3.7-alpine\nWORKDIR /flaskapp\nADD ./flaskapp/ /flaskapp\nADD ./requirements.txt /flaskapp\nRUN pip install -r requirements.txt\nCMD [ \"gunicorn\", \"-w\", \"4\", \"--bind\", \"0.0.0.0:5000\", \"wsgi\", \"--timeout\", \"600\"]" }, { "alpha_fraction": 0.551203727722168, "alphanum_fraction": 0.5673733353614807, "avg_line_length": 42.484375, "blob_id": "a2ce603b369bccc7f70ffc9f801a73f5d501fdd3", "content_id": "3f35333d5b87ec078d6936d9efba8916987d95e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5566, "license_type": "no_license", "max_line_length": 137, "num_lines": 128, "path": "/flaskapp/routes/verify_media.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "import base64\nimport os\nimport requests\nfrom flask_restful import Resource, request\n\nfrom flaskapp.support.hasher import hashtext\nfrom flaskapp.support.clean_video_data import clean_video_data\n\ntikaserver = os.getenv('TIKA_SERVER')\nif tikaserver is None:\n tikaserver = 'http://localhost:9998'\ntranscribeserver = os.getenv('TRANSCRIBE_SERVER')\nif transcribeserver is None:\n transcribeserver = 'http://localhost:3800'\nvideotranscriptionocrserver = os.getenv('VIDEOTRANSCRIPTIONOCR_SERVER')\nif videotranscriptionocrserver is None:\n videotranscriptionocrserver = 'http://localhost:3680'\n\n\nclass VerifyMedia(Resource):\n # Receives\n # {\n # \"data\": 'base64image'\n # \"type\": 'image/jpeg' or 'audio/ogg; codecs=opus' or 'video/mp4'\n # }\n\n def __init__(self, es_client):\n self.es_client = es_client\n\n def post(self):\n args = request.get_json()\n\n # Get media encoded as base64\n mediabase64 = args['data']\n\n media_id = hashtext(mediabase64)\n\n # Verify if image data was already registered\n response_id_text_get = self.es_client.verify_registered_text_media(media_id)\n\n # Media is not connected to a text\n if response_id_text_get['status'] == 'NOT_REGISTERED':\n # If not registered, proceed to get text\n\n if args['type'] == 'image/jpeg':\n # Decode media\n mediadata = base64.b64decode(mediabase64)\n\n # Get text from tika\n response = requests.put(f'{tikaserver}/tika', data=mediadata,\n headers={'Content-type': 'image/jpeg', 'X-Tika-OCRLanguage': 'por'})\n\n text = response.content.decode('utf8')\n\n if args['type'] == 'audio/ogg; codecs=opus':\n # Get audio transcription\n response = requests.post(f'{transcribeserver}/transcribe', json={'data': mediabase64},\n headers={'Content-Type': 'application/json'})\n # Decode text\n text = eval(response.content.decode('utf8'))['data']\n\n if args['type'] == 'video/mp4':\n # Get multiple data from video\n response = requests.post(f'{videotranscriptionocrserver}/extract_video', json={'data': mediabase64},\n headers={'Content-Type': 'application/json'})\n\n video_data = eval(response.content.decode('utf8'))['data']\n text = f'###Video_OCR### \\n ' \\\n f'{clean_video_data(video_data[\"video_ocr\"])} \\n ' \\\n f'###Audio_Transcription### \\n ' \\\n f'{clean_video_data(video_data[\"audio_transcription\"])}'\n\n # Get text hash\n text_id = hashtext(text)\n\n # Make link between media id (hash) and text id (hash) and audio, if audio or video\n if args['type'] == 'image/jpeg':\n response_media_text_link = self.es_client.register_text_to_media(args['type'], media_id, text_id)\n if args['type'] == 'audio/ogg; codecs=opus':\n response_media_text_link = self.es_client.register_text_to_media(args['type'], media_id, text_id, mediabase64)\n if args['type'] == 'video/mp4':\n response_media_text_link = self.es_client.register_text_to_media(args['type'], media_id, text_id, video_data['audiob64'])\n\n # If linking was successful\n if response_media_text_link['status'] == 'SUCCESS':\n # If the link between the media and text was made, try to find if text was already answered\n\n # Here it finds if the text was already answered\n response_get = self.es_client.get_answer_by_exact_text(text)\n # Text not registered\n if response_get['status'] == 'NOT_REGISTERED':\n response_register = self.es_client.register(text)\n # Success in registration\n if response_register['status'] == 'SUCCESS':\n return {'code': 200, 'message': 'SUCCESS_NOT_REGISTERED'}, 200\n # Error in registration\n if response_register['status'] == 'ERROR':\n return {'code': 500, 'message': 'ERROR_NOT_REGISTERED'}, 500\n\n # Text not answered yet\n if response_get['status'] == 'NOT_ANSWERED':\n return {'code': 200, 'message': 'SUCCESS_NOT_ANSWERED'}, 200\n\n # Text answered, return response\n return {'code': 200, 'message': 'ANSWERED', 'data': response_get}, 200\n\n # Error in link registration\n if response_media_text_link['status'] == 'ERROR':\n return {'code': 500, 'message': 'ERROR_NOT_REGISTERED'}, 500\n\n # Media is connected to a text\n # Get id of linked text\n id_text = response_id_text_get['data']\n\n answer_get_by_id = self.es_client.get_by_id(id_text)\n\n # If there's error when getting\n if answer_get_by_id['status'] == 'ERROR':\n return {'code': 500, 'message': 'ERROR_GET_BY_ID'}, 500\n\n # If there's an entry but it's not answered\n if 'answer' not in answer_get_by_id['data'].keys():\n return {'code': 200, 'message': 'SUCCESS_NOT_ANSWERED'}, 200\n\n # If there's an entry and it was already answered\n response_get = answer_get_by_id['data']['answer']\n\n return {'code': 200, 'message': 'ANSWERED', 'data': response_get}, 200\n" }, { "alpha_fraction": 0.5488958954811096, "alphanum_fraction": 0.5678233504295349, "avg_line_length": 22.481481552124023, "blob_id": "96c4c3ff20a983eed962186c2a0dc2a008554a5a", "content_id": "9d4ab55df9ba5952a935fb6a76c5ba781a966015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 75, "num_lines": 27, "path": "/flaskapp/routes/register_answer.py", "repo_name": "ProjetoVeritas/VerifyVeritas", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, request\n\n\nclass RegisterAnswer(Resource):\n # Receives\n # {\n # text: 'TextToVerify',\n # answer: 'AnswerToText'\n # }\n\n def __init__(self, es_client):\n self.es_client = es_client\n\n def post(self):\n args = request.get_json()\n\n print(args)\n\n text_id = args['id']\n answer = args['answer']\n\n response = self.es_client.add_answer_by_id(text_id, answer)\n\n if response['status'] == 'ERROR':\n return {'code': 500, \"message\": \"ERROR_ANSWER_REGISTERED\"}, 500\n\n return {'code': 200, \"message\": \"ANSWER_REGISTERED\"}, 200\n" } ]
12
flaskoski/ec2-stop
https://github.com/flaskoski/ec2-stop
fceaa5178aa10ba341802505c0b26af81b72ed79
c278b6ad3710764355c3fefa359c876fd3fb5136
5e23f6e06d34e0384907a7e9739aec4e3aec66f2
refs/heads/master
2023-02-07T14:50:52.421746
2021-01-05T03:10:52
2021-01-05T03:10:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.673057496547699, "alphanum_fraction": 0.6851664781570435, "avg_line_length": 23.774999618530273, "blob_id": "42100e2adc2abf6e67b0422486b8b1dd94857e1e", "content_id": "405458ee2728b26f2c980a710c2fd5e7a2fa4eb1", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 993, "license_type": "permissive", "max_line_length": 87, "num_lines": 40, "path": "/EC2_start_function.py", "repo_name": "flaskoski/ec2-stop", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport sys\nimport boto3\n\nLOGGER = logging.getLogger()\nfor h in LOGGER.handlers:\n LOGGER.removeHandler(h)\n\nHANDLER = logging.StreamHandler(sys.stdout)\nFORMAT = '%(levelname)s %(asctime)s [%(filename)s:%(funcName)s:%(lineno)d] %(message)s'\nHANDLER.setFormatter(logging.Formatter(FORMAT))\nLOGGER.addHandler(HANDLER)\nLOGGER.setLevel(logging.INFO)\n\n# get environment variable\nINSTANCE_ID = os.environ['INSTANCE_ID']\n\nEC2 = boto3.resource('ec2')\nEC2INSTANCE = EC2.Instance(INSTANCE_ID)\nEC2CLIENT = EC2.meta.client\n\n\ndef main(event, context):\n \"\"\"\n main function\n \"\"\"\n try:\n startEC2()\n except Exception as error:\n LOGGER.exception(error)\n\ndef startEC2():\n #check if instance is currently in the stopped state\n if EC2INSTANCE.state['Name'] == 'stopped':\n EC2INSTANCE.start()\n LOGGER.info('Start InstanceID: ' + INSTANCE_ID)\n return\n else:\n LOGGER.info('InstanceID \"' + INSTANCE_ID + '\" is not stopped!')\n" }, { "alpha_fraction": 0.7679814100265503, "alphanum_fraction": 0.7726218104362488, "avg_line_length": 19.571428298950195, "blob_id": "ab5e25a1fa93df2f32840f9a54aa270bec3fb42f", "content_id": "da9f11ec30741e7eb6012a0812b2b7bf2e10f78e", "detected_licenses": [ "MIT", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 433, "license_type": "permissive", "max_line_length": 77, "num_lines": 21, "path": "/README.md", "repo_name": "flaskoski/ec2-stop", "src_encoding": "UTF-8", "text": "# EC2 Instance Stop Function\n\nThis function is a function to stop the EC2 instance.\n\nAfter deployment, CloudWatch Events is set.\nFunction is executed in a single interval day by default.\nThis setting can be changed.\n\n\n# How to use it?\n\nPlease set the Instance ID that you want to stop in the environment variable.\n\nex) i-XXXXXXXXXXXX\n\n\n# License\n\nMIT License (MIT)\n\nThis software is released under the MIT License, see LICENSE.txt." } ]
2
R-Tatara/RaspberryPi
https://github.com/R-Tatara/RaspberryPi
3e51f8ef78224a3c0fa837a92969574babfd79ce
f5b491d974953892eb217078edcc02bff4895e1b
9a54f9cb42b4b8bc7a05a40d82af50c25c4ddf6e
refs/heads/master
2020-04-24T20:19:07.428415
2020-03-01T13:09:54
2020-03-01T13:09:54
172,239,532
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5907990336418152, "alphanum_fraction": 0.5956416726112366, "avg_line_length": 16.20833396911621, "blob_id": "12a7bd117ce0383c653dea75aaf8bbc221fa4873", "content_id": "08dad020b46d67340645d32617587cd2d3ce2206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 48, "num_lines": 24, "path": "/Time/time.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\nfrom datetime import datetime\nfrom time import sleep\n\ndef now_time():\n d = datetime.now()\n year = d.year\n month = d.month\n day = d.day\n hour = d.hour\n minute = d.minute\n second = d.second\n datetime_str = d.strftime('%Y/%m/%d %H:%M:%S')\n print(datetime_str)\n return\n\ndef main():\n while True:\n now_time()\n sleep(1)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5626311898231506, "alphanum_fraction": 0.5962211489677429, "avg_line_length": 38.69444274902344, "blob_id": "6168fa16c52841ee8604d0bd00ccd73fd3fb25d2", "content_id": "d5b73f9debd5e83bf9346494c76d9d8831b97fd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 90, "num_lines": 36, "path": "/Wunderground/wunderground.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\nimport requests\n\ndef main():\n ApiUrl = 'http://api.wunderground.com/api/7129c16bae9ee521/forecast/q/JP/Susenji.json'\n r = requests.get(ApiUrl)\n forecast = r.json\n list = ['forecast','simpleforecast','forecastday','0']\n\n year = forecast()[list[0]][list[1]][list[2]][int(list[3])]['date']['year']\n year = int(year)\n month = forecast()[list[0]][list[1]][list[2]][int(list[3])]['date']['month']\n month = int(month)\n day = forecast()[list[0]][list[1]][list[2]][int(list[3])]['date']['day']\n day = int(day)\n day_week = forecast()[list[0]][list[1]][list[2]][int(list[3])]['date']['weekday_short']\n weather = forecast()[list[0]][list[1]][list[2]][int(list[3])]['conditions']\n high_temp = forecast()[list[0]][list[1]][list[2]][int(list[3])]['high']['celsius']\n high_temp = int(high_temp)\n low_temp = forecast()[list[0]][list[1]][list[2]][int(list[3])]['low']['celsius']\n low_temp = int(low_temp)\n humidity = forecast()[list[0]][list[1]][list[2]][int(list[3])]['avehumidity']\n humidity = int(humidity)\n rain_prob = forecast()[list[0]][list[1]][list[2]][int(list[3])]['pop']\n rain_prob = int(rain_prob)\n\n print (year, '/', month, '/',day, '(', day_week, ')')\n print ('Weather : ', weather)\n print ('High temp: ', high_temp)\n print ('Low temp : ', low_temp)\n print ('Humidity : ', humidity)\n print ('Rain prob: ', rain_prob)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5768262147903442, "alphanum_fraction": 0.6146095991134644, "avg_line_length": 19.894737243652344, "blob_id": "e154ea3f14e2b10df35ab7a123e792f295c71cbc", "content_id": "ea3db90b680e07c02bc3b5fd2c7358273df41f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 79, "num_lines": 19, "path": "/Sound/test_wav.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\nimport pygame\nfrom pygame.locals import *\n\n#Play sound.wav\ndef main():\n pygame.mixer.init(frequency = 44100, size = -16, channels = 2, buffer = 1024)\n sound = pygame.mixer.Sound(\"../../sound/8bit_2.wav\")\n sound.play()\n\n try:\n while True:\n pass\n except KeyboardInterrupt:\n pygame.mixer.quit()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.38367345929145813, "alphanum_fraction": 0.4693877696990967, "avg_line_length": 19.41666603088379, "blob_id": "1f91e828550756f47d2788c5cda0ed198d069c8a", "content_id": "419c6ae3bd97cbf9344d3d61a4e47b5b07815e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 253, "license_type": "no_license", "max_line_length": 29, "num_lines": 12, "path": "/AccSensor/Readme.txt", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "$ python3 acc_sensor.py\n\nacc Pi\n 1 VDD 1 3.3V\n 2 GND 6 GND\n 3 SPC 23 SPI_CSLK\n 4 SDI 19 SPI_MOSI\n 5 SDO 21 SPI_MISO\n 6 CS 24 SPI_CE0\n 7\n\nRefference:Raspberry Pi 電子工作\n" }, { "alpha_fraction": 0.5703125, "alphanum_fraction": 0.5807291865348816, "avg_line_length": 16.454545974731445, "blob_id": "7f47661011f79abda6559cb0e7f00cd58e5da5c6", "content_id": "179dff21b366c07d8a1dd115dd584802d264d1f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 52, "num_lines": 22, "path": "/FileInput/file_read.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\nimport sys\n\n#Read file\ndef main():\n if (len(sys.argv) != 2):\n print(\"Usage : $ python file_input.py filename\")\n sys.exit\n\n scriptname = sys.argv[0]\n filename = sys.argv[1]\n\n file = open(filename, \"r\")\n lines = file.readlines()\n file.close()\n\n for line in lines:\n print(line, end = \"\")\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6198083162307739, "alphanum_fraction": 0.6405750513076782, "avg_line_length": 24.040000915527344, "blob_id": "571081c3d4765ad3f5188aa944eeea5e8bbf2d99", "content_id": "a5616ea37345a1ddd5a479951f028991cbb05956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 123, "num_lines": 25, "path": "/Talk/jtalk.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: UTF-8\nimport subprocess\n\n#Speak with female voice of jtalk\ndef jtalk(text):\n word = '\"' + text + '\"'\n command = \"echo \\\"%s\\\"\" % word\n command += \" | open_jtalk -x /var/lib/mecab/dic/open-jtalk/naist-jdic -m ./Voice_mei/mei_normal.htsvoice -ow /dev/stdout\"\n command += \" | aplay --quiet\"\n\n proc= subprocess.Popen(\n command,\n shell = True,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n print (word)\n\ndef main():\n text = 'おはようございます。ただいま、12月24日日曜日22時23分、今日の天気はくもり、気温は11度、降水確率は10パーセントです。'\n jtalk(text)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.556034505367279, "avg_line_length": 16.769229888916016, "blob_id": "834c10c12665f359509a35ad848b8967cedbdb40", "content_id": "04f39696ff2dbe8feb877bc36ed02c11b983d7dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 232, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/Arduino/Serial_sample.ino", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#Sample program for Arduino\nvoid setup() {\n Serial.begin(9600);\n}\n\nvoid loop() {\n for (int n = 0; n <1024; n++) {\n //Serial.write(n); //up to 8bit\n //int x = analogRead(A0);\n Serial.println(n, DEC);\n delay(10);\n }\n}\n\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5479876399040222, "avg_line_length": 15.149999618530273, "blob_id": "38af08bb301c03de36aa86241eab531387d57f8c", "content_id": "555db986acd7db02ac66fe4d09a5506eb546fd61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 27, "num_lines": 20, "path": "/ClassSample/class_sample.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\n\nclass MyClass:\n def __init__(self):\n self.data1 = 1\n self.data2 = []\n\n def func(self, data):\n self.data2.append(data)\n return 'Hi!'\n\ndef main():\n c = MyClass()\n print (c.data1)\n print (c.func('Python!'))\n print (c.data2)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4956970810890198, "alphanum_fraction": 0.5215146541595459, "avg_line_length": 22.04166603088379, "blob_id": "7acfa03794e74025defa1f45e5247c78e600a499", "content_id": "0d2ed9d84456724ee21ae5be5a80a6e3f16f7860", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 72, "num_lines": 24, "path": "/AccSensor/acc_sensor.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\nimport wiringpi as pi\r\nimport time, math\r\nimport lis3dh\r\n\r\ndef main():\r\n count = 0\r\n SPI_CS = 0\r\n SPI_SPEED = 100000\r\n pi.wiringPiSPISetup (SPI_CS, SPI_SPEED)\r\n accel = lis3dh.lis3dh( SPI_CS )\r\n\r\n while True:\r\n ( acc_x, acc_y, acc_z ) = accel.get_accel()\r\n ( x_angle, y_angle ) = accel.get_angle()\r\n print (count, \" A_X : \", acc_x, \" A_Y : \", acc_y, \"A_Z : \", acc_z )\r\n print (\"X_angle : \", x_angle, \" Y_angle : \", y_angle, \"\\n\" )\r\n #time.sleep(0.5)\r\n count += 1\r\n\r\nif __name__== \"__main__\":\r\n main()\r\n\r\n\r\n" }, { "alpha_fraction": 0.552971601486206, "alphanum_fraction": 0.5762273669242859, "avg_line_length": 18.350000381469727, "blob_id": "215149726b56227e62873e7f090da0ff4ea020b8", "content_id": "800cb8a445a78035c05b80a7c2aa3a2db6a0b6fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 43, "num_lines": 20, "path": "/FileOutput/time_write.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\nfrom datetime import datetime\nfrom time import sleep\nimport random\n\ndef main():\n log = open(\"time_output.txt\", \"w\")\n\n for i in range(10):\n now = str(datetime.now())\n data = random.randint(0, 1024)\n log.write(now + \" \" + str(data) + \"\\n\")\n print(\".\")\n sleep(.9)\n log.flush()\n log.close\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5939716100692749, "alphanum_fraction": 0.6046099066734314, "avg_line_length": 26.200000762939453, "blob_id": "5bcdac1d66837af2b7b9973659eb6bc0e61d0b7e", "content_id": "0d5a87f355fc7b4dbcf1ab5ae7ac745e74555d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1128, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/SocketIO/server.js", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "var http = require('http');\r\nvar socketio = require('socket.io');\r\nvar fs = require('fs'); //Filestream module\r\nvar exec = require('child_process').exec; //For linux command\r\nvar port = 8080;\r\nvar pi_data;\r\nvar pi_state = '1';\r\n\r\nvar server = http.createServer(serveron);\r\nserver.listen(port);\r\nvar io = socketio.listen(server);\r\nconsole.log('http websocket server started.');\r\n\r\n//Callbak function for http request\r\nfunction serveron(req,res) {\r\n if(req.url == '/favicon.ico'){\r\n return;\r\n }\r\n\r\n //console.log(req.method); //GET\r\n res.writeHead(200, {'Content-Type' : 'text/html'});\r\n res.end(fs.readFileSync(__dirname + '/index.html', 'utf-8'));\r\n}\r\n\r\n//Callback function for connection\r\nio.sockets.on('connection', function(socket){\r\n console.log('socket connected.');\r\n setInterval(function(){\r\n //Send data\r\n socket.emit('Pi_data', {value : pi_data});\r\n socket.emit('Pi_state', {value : pi_state});\r\n }, 1);\r\n});\r\n\r\n//Get data\r\nsetInterval(function(){\r\n exec('date +\"%Y/%m/%d %H:%M:%S.%3N\"', function(err, stdout, stderr){\r\n pi_data = stdout;\r\n });\r\n}, 1);\r\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.6580459475517273, "avg_line_length": 20.75, "blob_id": "e47370dbef46fa19dffa8c9622ba2c117e7f783e", "content_id": "dcbd14147444dcc317eadc62634daac02c5614c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/Arduino/SerialEcho.py", "repo_name": "R-Tatara/RaspberryPi", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- cording: utf-8 -*-\nimport serial\n\n#Read 1 byte from Arduino\ndef main():\n port = \"/dev/ttyACM0\"\n serialFromArduino = serial.Serial(port, 9600)\n serialFromArduino.flushInput()\n while True:\n input = serialFromArduino.readline()\n input_value = int(input)\n print(input_value)\n\nif __name__ == \"__main__\":\n main()\n" } ]
12
JackWei8/sz_security_housing
https://github.com/JackWei8/sz_security_housing
c884083b442108a1ec7e982125766165b83a6c87
f067485a494be8d00e615adfad50e1643e957af3
eaddf849a14a37b52073e8bd84bfb115eaf9875b
refs/heads/master
2023-03-18T15:52:44.341344
2021-02-06T14:54:50
2021-02-06T14:54:50
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.519280195236206, "alphanum_fraction": 0.7390745282173157, "avg_line_length": 21.882352828979492, "blob_id": "4d4c1bbb723126ead385ad75592714d2b6328dba", "content_id": "6d15b56bb9305c9362c2a2887285a4085124166d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1032, "license_type": "no_license", "max_line_length": 101, "num_lines": 34, "path": "/README.md", "repo_name": "JackWei8/sz_security_housing", "src_encoding": "UTF-8", "text": "# 深圳公租房爬虫\n按家庭人数分析区级排名\n\n## 目录\n\n1.使用python的scrapy框架爬取深圳公租房轮候库,数据保存在文件里;\n\n2.将文件数据导入mysql;\n\n3.使用python将文本数据导入mysql;\n\n4.使用ELK,将数据导入elasticsearch,通过kibana展示;\n\n5.kibana数据分析;\n\n## 爬取说明\n爬取网址:[http://www.szjs.gov.cn/bsfw/zdyw_1/zfbz/gxfgs/](http://www.szjs.gov.cn/bsfw/zdyw_1/zfbz/gxfgs/)\n\n![page1](https://github.com/tianduo4/sz_security_housing/blob/master/imgs/page_1.png)\n\n![page2](https://github.com/tianduo4/sz_security_housing/blob/master/imgs/page_2.png)\n\n2018年9月30日爬取结果data.txt\n\n 1\tBHJ005840\t3955877\t1\t南山区\n 2\tBHJ005866\t3955878\t1\t南山区\n 3\tBHJ021327\t3955879\t2\t南山区\n 4\tBHJ005848\t3955880\t1\t南山区\n 5\tBHJ006961\t3955881\t4\t南山区\n 6\tBHJ016656\t3955882\t1\t南山区\n 7\tBHJ002199\t3955883\t1\t南山区\n 8\tBHJ029628\t3955884\t3\t罗湖区\n 9\tBHJ016179\t3955885\t3\t盐田区\n 10\tBHJ022242\t3955886\t1\t罗湖区\n" }, { "alpha_fraction": 0.6505110263824463, "alphanum_fraction": 0.6983184814453125, "avg_line_length": 35.10714340209961, "blob_id": "f5686cee1807193040551aeb988da74522c213bc", "content_id": "dab0632da21443bd5b3424c376033b59b151fa78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3057, "license_type": "no_license", "max_line_length": 322, "num_lines": 84, "path": "/sz_security_housing/spiders/sz_security_housing.py", "repo_name": "JackWei8/sz_security_housing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom sz_security_housing.items import SzSecurityHousingItem\nfrom scrapy.http import FormRequest\nimport json\nimport time\n\nclass SzSecurityHousingSpider(scrapy.Spider):\n\t#爬虫名\n\tname = 'szsh'\n\n #爬虫域\n\tallowed_domains = ['zjj.sz.gov.cn']\n\n\tdefault_pageSize=100 #接口最大支持100\n\n\tstart_pageNum=1\n\n\tfetch_maxcount=26202\n\n\tdef start_requests(self):\n\t\turl='http://zjj.sz.gov.cn/bzflh/lhmcAction.do?method=queryYgbLhmcList&waittype=2'\n\n\t\theaders = {\n\t\t\t'Accept': 'application/json, text/javascript, */*; q=0.01',\n\t\t\t'Accept-Encoding': 'gzip, deflate',\n\t\t\t'Accept-Language': 'zh-CN,zh;q=0.9',\n\t\t\t'Connection': 'keep-alive',\n\t\t\t'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',\n\t\t\t'Cookie':'_trs_uv=jw3pizal_1323_iylm; pgv_pvi=6386012160; ftzjjszgovcn=0; Hm_lvt_ddaf92bcdd865fd907acdaba0285f9b1=1612591183; swfUrl=%2Fvideos%2Fcnill_polyfill.swf; session-cookie=59436614; JSESSIONID=FP517QIoSGPw-rxUZoeKY_kKonPBUSvoMYgHAMpaAK-lsOa6VpqD!-522403324; Hm_lpvt_ddaf92bcdd865fd907acdaba0285f9b1=1612592534',\n\t\t\t'Host': 'zjj.sz.gov.cn',\n\t\t\t'Origin': 'http://zjj.sz.gov.cn',\n\t\t\t'Referer': 'http://zjj.sz.gov.cn/bzflh/lhmcAction.do?method=queryYgbLhmcInfo&waittype=2',\n\t\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'\n\t\t}\n\n\t\tyield scrapy.FormRequest(\n\t\t\turl = url,\n\t\t\theaders = headers,\n\t\t\tformdata = {\"pageNumber\" : \"1\", \"pageSize\" : str(self.default_pageSize),\"waittype\":\"2\",\"num\":\"0\",\"shoulbahzh\":\"\",\"xingm\":\"\",\"idcard\":\"\",\"start_paix\":\"\",\"end_paix\":\"\"},\n\t\t\tmeta={'pageNum':self.start_pageNum,'pageSize':self.default_pageSize,\"headers\":headers},\n\t\t\tcallback = self.parse\n\t\t)\n\n\n\tdef parse(self,response):\n\t\tdata = json.loads(response.body_as_unicode())\n\t\t# print(data)\n\t\ttotal = data[\"total\"]\n\t\t# print(total)\n\t\tlist = data[\"rows\"]\n\t\titem=SzSecurityHousingItem()\n\t\tfor value in list:\n\t\t\titem['userid']=value['LHMC_ID']\n\t\t\titem['seqno']=value['PAIX']\n\t\t\titem['applyNo']=value['SHOULHZH']\n\t\t\titem['username']=value['XINGM']\n\t\t\t# print(value)\n\t\t\tyield item\n\t\turl = 'http://zjj.sz.gov.cn/bzflh/lhmcAction.do?method=queryYgbLhmcList&waittype=2'\n\t\tmeta=response.meta\n\t\tprepageNumber=meta[\"pageNum\"]\n\t\tpageSize=meta[\"pageSize\"]\n\t\theaders=meta[\"headers\"]\n\t\tcurrent_total=prepageNumber*pageSize\n\t\tprint('finsh scrapy pageNumber:%s'%prepageNumber)\n\t\tprint('finsh current total:%s'%current_total)\n\t\tprint(self.fetch_maxcount)\n\t\tprint(len(list))\n\t\tprint('pageSize:%s'%pageSize)\n\t\tpageNumber=prepageNumber+1\n\t\tif len(list) == pageSize and current_total<self.fetch_maxcount:\n\t\t\trequestdata={\"pageNumber\" : \"1\", \"pageSize\" : str(self.default_pageSize),\"waittype\":\"2\",\"num\":\"0\",\"shoulbahzh\":\"\",\"xingm\":\"\",\"idcard\":\"\",\"start_paix\":\"\",\"end_paix\":\"\"}\n\t\t\trequestdata['pageNumber']=str(pageNumber)\n\t\t\trequestdata['pageSize']=str(pageSize)\n\t\t\tmeta['pageNum']=pageNumber\n\t\t\t# print(requestdata)\n\t\t\tyield scrapy.FormRequest(\n\t\t\t\turl = url,\n\t\t\t\theaders = headers,\n\t\t\t\tformdata =requestdata,\n\t\t\t\tmeta=meta,\n\t\t\t\tcallback = self.parse\n\t\t\t)\n" }, { "alpha_fraction": 0.6333333253860474, "alphanum_fraction": 0.6888889074325562, "avg_line_length": 44, "blob_id": "1b2ce6c5ca03253233fcba6772117d115e58b30f", "content_id": "9fed77fbe03dd070af3ab796ec9a09f010eebdc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 186, "license_type": "no_license", "max_line_length": 85, "num_lines": 4, "path": "/db/analyse.sql", "repo_name": "JackWei8/sz_security_housing", "src_encoding": "WINDOWS-1252", "text": "SELECT T.PLACE,T.NUM,COUNT(1) FROM T_PRH_DATA T \nWHERE T.SEQ_NO <=(SELECT D.SEQ_NO FROM T_PRH_DATA D WHERE D.APPLY_NO='BHR00058341')\nAND T.PLACE='±¦°²Çø' \nGROUP BY T.PLACE,T.NUM " }, { "alpha_fraction": 0.5929487347602844, "alphanum_fraction": 0.5961538553237915, "avg_line_length": 17.352941513061523, "blob_id": "573771f2770a801a1a3fddd1a5a659bead5fc6a5", "content_id": "dcd7cf8a0dec94228dc11c67a39b84e21e6231b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/sz_security_housing/items.py", "repo_name": "JackWei8/sz_security_housing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport scrapy\n\nclass SzSecurityHousingItem(scrapy.Item):\n #用户唯一id\n userid = scrapy.Field()\n #申请人名\n username = scrapy.Field()\n #轮候排位\n seqno = scrapy.Field()\n #备案回执好\n applyNo = scrapy.Field()\n #申请人数\n num = scrapy.Field()\n #户籍所在地\n place = scrapy.Field()\n" }, { "alpha_fraction": 0.6254295706748962, "alphanum_fraction": 0.6323024034500122, "avg_line_length": 31.36111068725586, "blob_id": "89337daec882b36c83e58e83e2c612f6dc390e75", "content_id": "c700a3f7798dafa19dd2a297b2df87e3f768cb43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 148, "num_lines": 36, "path": "/sz_security_housing/pipelines.py", "repo_name": "JackWei8/sz_security_housing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport time\nfrom urllib import request\nfrom lxml import etree\nimport re\n\nclass SzSecurityHousingPipeline(object):\n\n\n\tdef process_item(self, item, spider):\n\t\t# print(item)\n\t\turl='http://zjj.sz.gov.cn/bzflh/lhmcAction.do?method=queryDetailLhc&lhmcId=%s&waittype=2'%(item['userid'])\n\t\t# print(url)\n\t\ttry:\n\t\t\tresponse = request.urlopen(url,timeout=5)\n\t\t\tpage = response.read()\n\t\t\tpage = page.decode('utf-8')\n\t\t\tselector = etree.HTML(page)\n\t\t\tcontent=selector.xpath('//div[@class=\"leader_intro1\"]')[1].xpath('string(.)')\n\t\t\tplace = re.search('户籍所在区.*区',content).group().replace('户籍所在区:','')\n\t\t\titem['place']=place\n\t\t\tnum=len(selector.xpath('//div[@class=\"leader_intro1\"]'))-1\n\t\t\titem['num']=num\n\t\texcept Exception:\n\t\t\tprint (\"Error:%s\"%(item['seqno']))\n\t\t\tprint(repr(e))\n\n\t\t# else:\t\n\t\t# \tprint (\"Success:%s\"%(item['seqno']))\n\t\tret=str(item['userid'])+','+str(item['username'])+','+str(item['seqno'])+\",\"+str(item['applyNo'])+\",\"+str(item['num'])+\",\"+str(item['place'])+\"\\n\"\n\t\ttoday = time.strftime(\"%Y-%m-%d\",time.localtime(time.time()))\n\t\tsaveFile = open('data_'+today+'.txt','a') \n\t\tsaveFile.write(ret) \n\t\tsaveFile.close() \n\t\t# print(item)" }, { "alpha_fraction": 0.651442289352417, "alphanum_fraction": 0.6802884340286255, "avg_line_length": 45.33333206176758, "blob_id": "52a1aea42cbd05fe51d9ba1a01aaf12a73e26ecf", "content_id": "936f042fd779059a79a248b7a623a1cb51196fd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 460, "license_type": "no_license", "max_line_length": 59, "num_lines": 9, "path": "/db/scheme.sql", "repo_name": "JackWei8/sz_security_housing", "src_encoding": "GB18030", "text": "CREATE TABLE `T_PRH_DATA` (\n `USER_ID` int(20) unsigned NOT NULL COMMENT '用户ID',\n `SEQ_NO` int(20) NOT NULL COMMENT '轮候排位',\n `APPLY_NO` varchar(20) NOT NULL DEFAULT '' COMMENT '备案号',\n `NUM` tinyint(4) NOT NULL DEFAULT 0 COMMENT '申请人数',\n `PLACE` varchar(20) NOT NULL DEFAULT '' COMMENT '户籍所在区',\n PRIMARY KEY (`USER_ID`),\n KEY `INDEX_APPLY_NO` (`APPLY_NO`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='轮候信息'" } ]
6
Extult/Reply-Collection
https://github.com/Extult/Reply-Collection
bd0556e3bc02e263536274dd607dccd96ec37573
7f554975e02d26d51b05693d475879234fc24578
dbd7edd4da6b4ae636363339c73c7d5ebbf8af26
refs/heads/master
2020-05-22T22:07:11.281442
2019-05-14T04:28:53
2019-05-14T04:34:04
186,540,994
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8198970556259155, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 19.821428298950195, "blob_id": "c2880bff657e8e64398435a0557557983585dbbe", "content_id": "ef7efa9b72ac41086853e9f293b8d0ab7204b3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 71, "num_lines": 28, "path": "/README.md", "repo_name": "Extult/Reply-Collection", "src_encoding": "UTF-8", "text": "# Reply-Collection\n\n## 概要\n近頃クオリティフィルターやらShadow BanやらでTwitterの仕様が複雑になり、必要なリプライを見落としてしまうことが増えてきました。\n\n- A: リプライ欄にも検索結果にも表示されるツイート\n- B: リプライ欄には表示されるが検索結果には表示されないツイート\n- C: リプライ欄には表示されないが検索結果には表示されるツイート\n- D: リプライ欄にも検索結果にも表示されないツイート\n\nDはどうしようもありませんが、BとCの取りこぼしだけでも避けられるようシンプルなスクリプトを作成してみました。\n\n## 動作環境\nPython 3.6以上 \nconfigparser \nrequests \nBeautifulSoup4\n\n## 使い方\n1. settings.ini.sample を settings.ini にリネームします。\n2. 必要項目を埋めます。\n3. main.py を実行します。\n\nこれで当該ツイートへのリプライ(A〜C)がCSV形式で出力されます。\n\n## 注意\n本スクリプトはTwitter APIを使用していないため、乱用すると利用制限や凍結等の処置が行われる可能性があります。 \nそれらの損害に対しExtultは一切の責任を負いかねますので予めご了承ください。\n" }, { "alpha_fraction": 0.5777909755706787, "alphanum_fraction": 0.5799683332443237, "avg_line_length": 25.3125, "blob_id": "e70385a6c272496684e0061c9fdc7c13283f1d3d", "content_id": "0c240264a29bcb7648040e2bce1257c1d253eb51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5588, "license_type": "no_license", "max_line_length": 93, "num_lines": 192, "path": "/main.py", "repo_name": "Extult/Reply-Collection", "src_encoding": "UTF-8", "text": "import csv\nfrom datetime import datetime, timedelta\nfrom time import sleep\n\nimport configparser\nimport requests\nfrom bs4 import BeautifulSoup\n\nini = configparser.ConfigParser()\nini.read('settings.ini', 'UTF-8')\n\nWAIT_SEC = int(ini['browser']['wait_sec'])\nTARGET_TWEET_URL = ini['twitter']['target']\n\n\nclass Tweet:\n def __init__(self, div):\n self.id = div.attrs['data-tweet-id']\n self.username = div.attrs['data-screen-name']\n self.fullname = div.attrs['data-name']\n self.unixtime = int(div.find('span', '_timestamp').attrs['data-time'])\n self.text = div.find('p', 'tweet-text').text\n\n @staticmethod\n def get_header():\n \"\"\"\n :return: CSV出力用のヘッダー行を返す\n \"\"\"\n return ['datetime',\n 'username',\n 'fullname',\n 'text',\n 'href']\n\n def as_list(self):\n \"\"\"\n :return: CSV出力用のリストを返す\n \"\"\"\n return [self.strftime(),\n self.username,\n self.fullname,\n self.text,\n f'https://twitter.com/{self.username}/status/{self.id}']\n\n def strftime(self):\n return self.datetime().strftime('%Y年%m月%d日%H:%M%z')\n\n def datetime(self):\n return datetime.fromtimestamp(self.unixtime)\n\n\ndef login():\n \"\"\"\n twitterへログイン\n \"\"\"\n r = session.get('https://twitter.com/')\n soup = BeautifulSoup(r.text, 'lxml')\n login_form = soup.find('form', 'LoginForm')\n authenticity_token = login_form.find(attrs={'name': 'authenticity_token'}).attrs['value']\n data = {\n 'session[username_or_email]': ini['twitter']['username_or_email'],\n 'session[password]': ini['twitter']['password'],\n 'authenticity_token': authenticity_token,\n }\n session.post('https://twitter.com/sessions', data=data)\n\n\ndef get_target(url):\n \"\"\"\n 対象ツイートを取得\n :param url: 対象ツイートのURL\n :return: Tweetオブジェクト\n \"\"\"\n r = session.get(url)\n soup = BeautifulSoup(r.text, 'lxml')\n div = soup.find('div', 'permalink-tweet')\n return Tweet(div)\n\n\ndef get_from_target(tweet):\n \"\"\"\n 対象ツイートのリプライ欄を取得\n :param tweet: 対象のTweetオブジェクト\n :return: Tweetオブジェクトのリスト\n \"\"\"\n params = {'max_position': 1}\n url = f'https://twitter.com/i/{tweet.username}/conversation/{tweet.id}'\n list_ = []\n\n while params['max_position']:\n r = session.get(url, params=params)\n json_ = r.json()['descendants']\n soup = BeautifulSoup(json_['items_html'], 'lxml')\n tweets = soup.find_all('div', 'tweet')\n\n for div in tweets:\n list_.append(Tweet(div))\n\n params['max_position'] = json_['min_position']\n sleep(WAIT_SEC)\n\n print(f'リプライ欄から{len(list_)}件のツイートを取得しました')\n return list_\n\n\ndef get_from_search(tweet, until):\n \"\"\"\n 検索結果からリプライを取得\n :param tweet: 対象のTweetオブジェクト\n :param until: 検索範囲(終了日)\n :return: Tweetオブジェクトのリスト\n \"\"\"\n since = (tweet.datetime() - timedelta(days=1)).strftime('%Y-%m-%d') # 念のため対象ツイートの1日前から検索\n params = {\n 'f': 'tweets',\n 'vertical': 'default',\n 'q': f'to:{tweet.username} since:{since} until:{until}',\n 'src': 'typd',\n 'qf': 'off',\n 'include_available_features': 1,\n 'include_entities': 1,\n 'max_position': 1,\n 'reset_error_state': False,\n }\n url = 'https://twitter.com/i/search/timeline'\n list_ = []\n items_html = True\n\n while items_html:\n r = session.get(url, params=params)\n json_ = r.json()\n items_html = json_['items_html'].strip()\n soup = BeautifulSoup(items_html, 'lxml')\n tweets = soup.find_all('div', attrs={'data-conversation-id': tweet.id})\n\n for div in tweets:\n list_.append(Tweet(div))\n\n params['max_position'] = json_['min_position']\n sleep(WAIT_SEC)\n\n print(f'検索結果から{len(list_)}件のツイートを取得しました')\n\n return list_\n\n\ndef make_unique(*lists):\n \"\"\"\n リストを連結後、Tweet.as_list()して重複を削除\n :param lists: Tweetオブジェクトのリスト(複数)\n :return: CSV出力用の2次元リスト\n \"\"\"\n joined_list = []\n for l in lists:\n joined_list += l\n\n list_ = list(map(list, set(map(tuple, [tweet.as_list() for tweet in joined_list]))))\n print(f'計{len(list_)}件のツイートを抽出しました')\n\n return list_\n\n\ndef output_CSV(list_, filename):\n \"\"\"\n リストをusername(2列目の値)で昇順にソートしてCSV形式で書き出し\n \"\"\"\n sorted_list = sorted(list_, key=lambda x: x[1])\n final_list = [Tweet.get_header()] + sorted_list\n with open(filename, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(final_list)\n print(f'{filename}を出力しました')\n\n\nif __name__ == '__main__':\n session = requests.Session()\n session.headers.update({\n 'referer': 'https://twitter.com/',\n 'User-Agent': ini['browser']['User-Agent'],\n })\n\n login()\n\n target = get_target(TARGET_TWEET_URL)\n\n rep_list = get_from_target(target)\n\n search_list = get_from_search(target, ini['twitter']['until'])\n\n unique_list = make_unique(rep_list, search_list)\n\n output_CSV(unique_list, ini['output']['filename'])\n" } ]
2
kumar-shubham/wisecells-library
https://github.com/kumar-shubham/wisecells-library
90036bd9458c58d7569d07fce9e9617c30bac35a
43fb7740b87b8f1e2ae170a2ed44c6e78cc09f60
ced4e95094c93e749dc7fae9ce3583cc35da1b57
refs/heads/master
2021-01-13T02:16:03.738899
2013-09-21T16:59:26
2013-09-21T16:59:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6430801153182983, "alphanum_fraction": 0.6430801153182983, "avg_line_length": 33.32143020629883, "blob_id": "a145fbea8ed9e8f4218a977d1a6e34ed922a187d", "content_id": "5a7a3a742d63758ffc44d5c400708493517960f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 107, "num_lines": 28, "path": "/library/urls.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n#from django.contrib import admin\n#admin.autodiscover()\n\nurlpatterns = patterns('library.views',\n url(r'^$', 'index'),\n url(r'^details/$', 'mydetails'),\n url(r'^addbook/$', 'addbook'),\n url(r'^addstudent', 'addstudent'),\n url(r'^studentdetail','studentdetail'),\n url(r'^(?P<bb_id>\\d+)/issue$', 'issuebook'),\n url(r'^(?P<b_id>\\d+)/confirm$', 'confirmation'),\n url(r'^details/returnbook$', 'returnbook'),\n #url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'library/login.html'}),\n #url(r'^return/$', 'returnbook'), \n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n \n)\n\n#urlpatterns += patterns('',\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n#)\n" }, { "alpha_fraction": 0.7202643156051636, "alphanum_fraction": 0.7224669456481934, "avg_line_length": 24.27777862548828, "blob_id": "7bd78427d32d7c5e0f0014a5b3255554aec20499", "content_id": "952ab0004537c1307660af70210214bf619b87c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 454, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/polls/admin.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "from polls.models import Poll\nfrom django.contrib import admin\nfrom polls.models import Choice\n\n#admin.site.register(Poll)\nclass ChoiceInLine(admin.TabularInline):\n\tmodel = Choice\n\textra = 3\n\t\n\nclass PollAdmin(admin.ModelAdmin):\n\tfieldsets = [\n\t\t('Question',\t\t\t{ 'fields': ['question']}),\n\t\t('Date information', {'fields': ['pub_date'],'classes':['collapse']})\n\t]\n\tinlines = [ChoiceInLine]\nadmin.site.register(Poll,PollAdmin)\n#admin.site.register(Choice)" }, { "alpha_fraction": 0.6805555820465088, "alphanum_fraction": 0.6805555820465088, "avg_line_length": 29.85714340209961, "blob_id": "daeaee00e5ad93354e98bd2106944d0184fc7eb8", "content_id": "c19a1d8aeeba7313f421cd739e813493ca5a4f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/mysite/urls.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^library/', include('library.urls')),\n url(r'^login/$', 'library.views.userlogin'),\n url(r'^auth/$', 'library.views.userauth'),\n url(r'^logout/$', 'library.views.userlogout'),\n url(r'^admin/', include(admin.site.urls)),\n\n)\n" }, { "alpha_fraction": 0.7744361162185669, "alphanum_fraction": 0.7744361162185669, "avg_line_length": 23.272727966308594, "blob_id": "0d6c1d2e11ba989a540cd09e74107e78cece2788", "content_id": "dc43d00d07837b12e74c45dcf4a8a306e6cfb3df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/mysite/apache/django.wsgi", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport sys\n\nsys.path.append('~/DjangoTry/mysite/')\nsys.path.append('~/Djangotry/mysite/mysite')\n\nos.environ['DJANGO_SETTING_MODULE'] = 'mysite.settings'\n\nimport django.core.handlers.wsgi\napplication = django.core.handlers.wsgi.WSGIHandlers()" }, { "alpha_fraction": 0.7848699688911438, "alphanum_fraction": 0.7848699688911438, "avg_line_length": 23.941177368164062, "blob_id": "9989f577f1138bdfe7621a33f2ea2cdf8a4d0df6", "content_id": "2b7b55a4a29bcbfcca2f13fba855042c1f409292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 56, "num_lines": 17, "path": "/library/admin.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "from library.models import Books\nfrom library.models import Students\nfrom library.models import BookIssued\nfrom django.contrib import admin\n\nclass StudentAdmin(admin.ModelAdmin):\n\tlist_display = ('s_id', 'name')\n\nclass BooksAdmin(admin.ModelAdmin):\n\tlist_display = ('book_id','book_name','author','count')\n\t\n\n\n\nadmin.site.register(Books,BooksAdmin)\nadmin.site.register(Students,StudentAdmin)\nadmin.site.register(BookIssued)" }, { "alpha_fraction": 0.7063615322113037, "alphanum_fraction": 0.708947479724884, "avg_line_length": 40.12234115600586, "blob_id": "84931556dc30e69de5c8db8d72efed284d94fab3", "content_id": "43456fce77d1988d4dddb97aa68271f81354baba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7734, "license_type": "no_license", "max_line_length": 138, "num_lines": 188, "path": "/library/views.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom library.models import Books, Students, BookIssued\nfrom django.template import Context, loader, RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404, get_list_or_404, redirect\nfrom django.core.context_processors import csrf\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, logout, login\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom library.forms import AddBookForm, AddStudentForm\ndef index(request):\n\tif request.user.is_authenticated():\n\t\tlist_of_books = Books.objects.all()\n\t\treturn render_to_response('library/index.html', {'list_of_books' : list_of_books}, context_instance=RequestContext(request))\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef mydetails(request):\n\tif request.user.is_authenticated():\n\t\tuser = request.user\n\t\ttry:\n\t\t\tsid = request.POST.get('sid',0 )\n\t\t\ts = Students.objects.get(s_id=sid)\n\t\t\ti = BookIssued.objects.filter(s_id__s_id = sid)\n\t\t\treturn render_to_response('library/mydetails.html', {'student':s, 'books':i, 'user':user}, context_instance=RequestContext(request))\t\n\t\texcept ObjectDoesNotExist:\n\t\t\treturn render_to_response('library/mydetails.html', {'user':user} , context_instance=RequestContext(request))\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef returnbook(request):\n\tif request.user.is_authenticated():\n\t\tuser = request.user\n\t\ts_id = request.POST['s_id']\n\t\tsid = Students.objects.get(name=s_id)\n\t\tb_id = request.POST['b_id']\n\t\tb_id = Books.objects.get(book_name=b_id)\n\t\tb_id = BookIssued.objects.get(b_id=b_id, s_id=sid)\n\t\tbook = b_id.b_id\n\t\tbook.count += 1\n\t\tbook.save()\n\t\n\t\tb_id.delete()\n\t\t\n\t\treturn render_to_response('library/confirmreturn.html', {'book':book,'student':sid, 'user':user})\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\n\ndef issuebook(request, bb_id):\n\tif request.user.is_authenticated():\n\t\tb = Books.objects.get(book_id = bb_id)\n\t\treturn render_to_response('library/issue.html', {'issue': b}, context_instance=RequestContext(request))\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\n\ndef confirmation(request, b_id):\n\tif request.user.is_authenticated():\n\t\tuser = request.user\n\t\tb = Books.objects.get(book_id = b_id)\n\t\terror_msg =\"\"\n\t\terror_flag = 0\n\t\ttry:\n\t\t\tsid = request.POST['ssid']\n\t\t\ts = Students.objects.get(s_id=sid)\n\t\t\ttry:\n\t\t\t\talreadyIssue = BookIssued.objects.get(b_id=b, s_id=s)\n\t\t\t\terror_msg = \"Sorry !!! You already have this book.\"\n\t\t\t\terror_flag = 1\n\t\t\t\treturn render_to_response('library/confirm.html',{'book':b, 'student':s, 'msg':error_msg, 'flag':error_flag})\n\t\t\texcept ObjectDoesNotExist:\t\n\t\t\t\tb.count -= 1\n\t\t\t\tb.save()\n\t\t\t\tc = BookIssued(b_id = b, s_id = s )\n\t\t\t\tc.save()\n\t\texcept ObjectDoesNotExist:\n\t\t\terror_msg = \"Sorry!!! Incorrect Entry in ID field\"\n\t\t\terror_flag = 1\n\t\t\treturn render_to_response('library/confirm.html',{'book':b, 'msg':error_msg, 'flag':error_flag, 'user':user})\n\n\t\treturn render_to_response('library/confirm.html', {'book':b, 'student':s, 'msg':error_msg, 'flag':error_flag,'user':user})\n\telse:\n\t\treturn HttpResponseRedirect('/login/')\ndef userlogin(request):\n\tif request.user.is_authenticated():\n\t\treturn HttpResponseRedirect('/library/')\n\treturn render_to_response('library/login.html', context_instance=RequestContext(request))\n\ndef userauth(request):\n\tif request.method == 'POST':\n\t\tusername = request.POST['username']\n\t\tpassword = request.POST['password']\n\t\tuser = authenticate(username=username, password=password)\n\t\tif user is not None:\n\t\t\tif user.is_active:\n\t\t\t\tlogin(request,user)\n\t\t\t\tuser_id = User.objects.get(username= username).id\n\t\t\t\trequest.session['user_id'] = user_id\n\t\t\t\tuser = User.objects.get(username=username)\n\t\t\t\treturn redirect('/library/',user)\n\t\t\telse:\n\t\t\t\terror_msg = \"Sorry!!! Your Account is Currently Disabled\"\n\t\t\t\treturn render_to_response('library/error.html', {'error_msg':error_msg})\n\t\telse:\n\t\t\terror_msg = \"Sorry!!! Invalid username or password\"\n\t\t\treturn render_to_response('library/error.html', {'error_msg':error_msg})\n\n\ndef userlogout(request):\n\tlogout(request)\n\treturn render_to_response('library/login.html', context_instance=RequestContext(request))\n\ndef addbook(request):\n\tif request.user.is_authenticated():\n\t\tform = AddBookForm()\n\t\tif request.method == 'POST':\n\t\t\tform = AddBookForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tbook_id = form.cleaned_data.get('book_id',0)\n\t\t\t\tbook_name = form.cleaned_data['book_name']\n\t\t\t\tcount = form.cleaned_data['count']\n\t\t\t\tauthor = form.cleaned_data['author']\n\t\t\t\ttry:\n\t\t\t\t\tbook_object = Books.objects.get(book_id=book_id)\n\t\t\t\t\terror_msg = \"That ID is Already Taken\"\n\t\t\t\t\treturn render_to_response('library/addbook.html',{'form':form, 'error_msg':error_msg}, context_instance=RequestContext(request))\n\t\t\t\texcept ObjectDoesNotExist:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tbook_object = Books.objects.get(book_name=book_name)\n\t\t\t\t\t\terror_msg = \"A Entry Already Exists With same Name\"\n\t\t\t\t\t\treturn render_to_response('library/addbook.html', {'form':form, 'error_msg':error_msg}, context_instance=RequestContext(request))\n\t\t\t\t\texcept ObjectDoesNotExist:\n\t\t\t\t\t\tnew_book = Books(book_id=book_id, book_name=book_name, count=count, author=author)\n\t\t\t\t\t\tnew_book.save()\n\t\t\t\t\t\tflag = 1\n\t\t\t\t\t\tmsg = \"Book successsfully Added\"\n\t\t\t\t\t\treturn render_to_response('library/addbook.html', {'flag':flag, 'form':form, 'msg':msg}, context_instance=RequestContext(request))\n\t\t\telse:\n\t\t\t\treturn render_to_response('library/addbook.html', {'form':form}, context_instance=RequestContext(request))\n\t\t\t\n\t\telse:\n\t\t\tform = AddBookForm()\n\t\t\treturn render_to_response('library/addbook.html', {'form':form}, context_instance=RequestContext(request))\t\n\telse:\n\t\tHttpResponseRedirect('/login/')\n\n\t\t\ndef addstudent(request):\n\tif request.user.is_authenticated():\n\t\tform = AddStudentForm()\n\t\tif request.method == 'POST':\n\t\t\tform = AddStudentForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\ts_id = form.cleaned_data.get('s_id', 0)\n\t\t\t\tname = form.cleaned_data['name']\n\t\t\t\temail = form.cleaned_data['email']\n\t\t\t\ttry:\n\t\t\t\t\tstudent_object = Students.objects.get(s_id=s_id)\n\t\t\t\t\terror_msg = \"That Student ID is Already Taken\"\n\t\t\t\t\treturn render_to_response('library/addstudent.html', {'form':form, 'error_msg':error_msg}, context_instance=RequestContext(request))\n\t\t\t\texcept ObjectDoesNotExist:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tstudent_object = Students.objects.get(email=email)\n\t\t\t\t\t\terror_msg = \"That email is already associated with another account\"\n\t\t\t\t\t\treturn render_to_response('library/addstudent.html', {'form':form, 'error_msg':error_msg}, context_instance=RequestContext(request))\n\t\t\t\t\texcept ObjectDoesNotExist:\n\t\t\t\t\t\tnew_student = Students(s_id=s_id, name=name, email=email)\n\t\t\t\t\t\tnew_student.save()\n\t\t\t\t\t\tflag = 1\n\t\t\t\t\t\tmsg = \"New Student is successsfully added\"\n\t\t\t\t\t\treturn render_to_response('library/addstudent.html', {'form':form, 'msg':msg}, context_instance=RequestContext(request))\n\t\t\telse:\n\t\t\t\treturn render_to_response('library/addstudent.html', {'form':form}, context_instance=RequestContext(request))\n\t\telse:\n\t\t\treturn render_to_response('library/addstudent.html', {'form':form}, context_instance=RequestContext(request))\n\telse:\n\t\tHttpResponseRedirect('/login/')\n\ndef studentdetail(request):\n\ttry:\n\t\tsid = request.POST.get('s_id',0 )\n\t\ts = Students.objects.get(s_id=sid)\n\t\ti = BookIssued.objects.filter(s_id__s_id = sid)\n\t\treturn render_to_response('library/studentdetail.html', {'student':s, 'books':i}, context_instance=RequestContext(request))\n\texcept ObjectDoesNotExist:\n\t\terror_msg = \"Incorrect Student ID\"\n\t\treturn render_to_response('library/login.html', {'error_msg':error_msg} , context_instance=RequestContext(request))\n\t\n\n" }, { "alpha_fraction": 0.5569487810134888, "alphanum_fraction": 0.5585162043571472, "avg_line_length": 28.461538314819336, "blob_id": "e02fcf130f8b8e676c2d0112c6d704ec6159515d", "content_id": "79d8de9c0a5588f0f35ae6a7bb6a68ca17928db2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1914, "license_type": "no_license", "max_line_length": 98, "num_lines": 65, "path": "/templates/library/index.html", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<head>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.min.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.responsive.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static//bootstrap/css/bootstrap.responsive.min.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.css\">\n{% load bootstrap_toolkit %}\n</head>\n<body background=\"background.jpg\">\n\t<div id=\"wrap\" class=\"container clearfix\" data-role=\"page\">\n\t<div class=\"navbar \">\n <div class=\"navbar-inner\">\n <div class=\"container-fluid\">\n \n <div class=\"nav-collapse\">\n <ul class=\"nav\">\n \t<li class=\"active\"><a href=\"#\">Home</a></li>\n <li><a href=\"/library/details/\">Student Details</a></li>\n <li><a href=\"/library/addbook/\">Add Books</a></li>\n <li><a href=\"/library/addstudent/\">Add Student</a></li>\n </ul>\n <p class=\"navbar-text pull-right\">\n \t\tLogged In as <font color=brown>{{user}}</font> (<a href=\"/logout/\">Logout?</a>)\n \t</p>\n </div><!--/.nav-collapse -->\n </div>\n </div>\n </div>\n{% if list_of_books %}\n<table class=\"table table-striped\">\n\t<thead>\n\t\t<tr>\n\t\t\t<th><h4>Books Detail</h4></th>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<th>Books Name</th>\n\t\t\t<th>Copies Available</th>\n\t\t\t<th>Issue Book</th>\n\t\t</tr>\n\t</thead>\n\t<tbody>\n\t\t{% for books in list_of_books %}\n\t\t\n\t\t\t<tr>\n\t\t\t\t<td>{{ books }}</td>\n\t\t\t\t<td>{{ books.count}}</td>\n\t\t\t\t<td>{% if books.count < 1 %}\n\t\t\t\t\t<button class=\"btn btn-primary\">Not Available</button>\n\t\t\t\t\t{% else %}\n\t\t\t\t\t<a href=\"{{books.book_id}}/issue\">\n\t\t\t\t\t<button class=\"btn btn-primary\">Issue Book</button></a>\n\t\t\t\t\t{% endif %}\n\t\t\t\t</td>\n\t\t\t</tr>\n\t\t{% empty %}\n\t</tbody>\n</table>\n</body>\n</html>\n\t\t{% endfor %}\n\t</ul>\n{% else %}\n\t<p> No Books are available.</p>\n{% endif %}" }, { "alpha_fraction": 0.7160243391990662, "alphanum_fraction": 0.727180540561676, "avg_line_length": 24.842105865478516, "blob_id": "35af26db2f15bb57c7944525054a5408a1effc99", "content_id": "4eafc7ff207883d16027e44762969acc5260482e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 986, "license_type": "no_license", "max_line_length": 62, "num_lines": 38, "path": "/library/forms.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "from django import forms\nfrom library.models import Books\nfrom django.core.validators import validate_email\n\n\nclass AddBookForm(forms.Form):\n\tbook_id = forms.IntegerField()\n\tbook_name = forms.CharField(max_length=50)\n\tcount = forms.IntegerField()\n\tauthor = forms.CharField(max_length=50)\n\n\tdef clean_book_name(self):\n\t\tbook_name = self.cleaned_data['book_name']\n\t\tif len(book_name) < 3:\n\t\t\traise forms.ValidateError(\"Atleast 3 characters required!\")\n\t\telse:\n\t\t\treturn book_name\n\n\t\nclass AddStudentForm(forms.Form):\n\ts_id = forms.CharField(max_length=4)\n\tname = forms.CharField(max_length=50)\n\temail = forms.EmailField()\n\n\tdef clean_name(self):\n\t\tname = self.cleaned_data['name']\n\t\tif len(name) < 3:\n\t\t\traise forms.ValidateError(\"Atleast 3 characters required\")\n\t\telse:\n\t\t\treturn name\n\n\tdef clean_email(self):\n\t\temail = self.cleaned_data['email']\n\t\ttry:\n\t\t\tvalidate_email(email)\n\t\t\treturn email\n\t\texcept ValidationError:\n\t\t\traise forms.ValidationError(\"Enter a valid email address\")\n\n\t\t\n" }, { "alpha_fraction": 0.7135922312736511, "alphanum_fraction": 0.7249190807342529, "avg_line_length": 24.70833396911621, "blob_id": "ac54ec3836212fe5acac25b3bd1511f10035cc37", "content_id": "845cb8b2082191132c6cd7646ed610e3e0d77e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 44, "num_lines": 24, "path": "/library/models.py", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Books(models.Model):\n\tbook_id = models.IntegerField()\n\tbook_name = models.CharField(max_length=50)\n\tcount = models.IntegerField()\n\tauthor = models.CharField(max_length=50)\n\tdef __unicode__(self):\n\t\treturn self.book_name\n\t\n\nclass Students(models.Model):\n\ts_id = models.CharField(max_length=4)\n\tname = models.CharField(max_length=50)\n\temail = models.EmailField()\n\tdef __unicode__(self):\n\t\treturn self.name\n\nclass BookIssued(models.Model):\n\tb_id = models.ForeignKey(Books)\n\ts_id = models.ForeignKey(Students)\n\tdef __unicode__(self):\n\t\treturn str(self.b_id)\t\n" }, { "alpha_fraction": 0.6011080145835876, "alphanum_fraction": 0.6038781404495239, "avg_line_length": 33.39682388305664, "blob_id": "d4e155dbe03a9f013ed516a2cc67de5866d341ea", "content_id": "98ad5116764882b2634309a891e3ef97a9b41567", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2166, "license_type": "no_license", "max_line_length": 110, "num_lines": 63, "path": "/templates/library/addbook.html", "repo_name": "kumar-shubham/wisecells-library", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n<head>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.min.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.responsive.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.responsive.min.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"/static/bootstrap/css/bootstrap.css\">\n{% load bootstrap_toolkit %}\n</head>\n<body background=\"background.jpg\">\n\t<div id=\"wrap\" class=\"container clearfix\" data-role=\"page\">\n\t<div class=\"navbar \">\n <div class=\"navbar-inner\">\n <div class=\"container-fluid\">\n \n <div class=\"nav-collapse\">\n <ul class=\"nav\">\n \t<li><a href=\"/library/\">Home</a></li>\n <li><a href=\"/library/details/\">Student Details</a></li>\n <li class=\"active\"><a href=\"/library/addbook/\">Add Books</a></li>\n <li><a href=\"/library/addstudent/\">Add Student</a></li>\n </ul>\n <p class=\"navbar-text pull-right\">\n \t\tLogged In as <font color=brown>{{user}}</font> (<a href=\"/logout/\">Logout?</a>)\n \t</p>\n </div><!--/.nav-collapse -->\n </div>\n </div>\n </div>\n\n\n <div class=\"container\">\n <h3><font color=green>{{ msg }}</font></h3>\n <h2 class=\"form-signin-heading\"> Add Book </h2>\n <h3><font color=red>{{ error_msg }}</font></h3>\n<form method=\"POST\" action=\"/library/addbook/\" class=\"form-registration\">\n\n{% csrf_token %}\n\n<div class=\"fieldWrapper\">\n {{ form.book_id.errors }}\n <label for=\"id_book_id\">Enter Book ID: </label>\n {{ form.book_id }}\n</div>\n<div class=\"fieldWrapper\">\n {{ form.book_name.errors }}\n <label for=\"id_book_name\">Enter Book Name: </label>\n {{ form.book_name }}\n</div>\n<div class=\"fieldWrapper\">\n {{ form.count.errors }}\n <label for=\"id_count\">Enter Number of Copies: </label>\n {{ form.count }}\n</div>\n<div class=\"fieldWrapper\">\n {{ form.author.errors }}\n <label for=\"id_author\">Enter Author's Name: </label>\n {{ form.author }}\n</div>\n<p>\n<button type=\"submit\" class=\"btn btn-primary\" name=\"submit\" value=\"sumbit-value\"> Add Book Detail</button></p>\n</form>\n</div>" } ]
10
glifchits/promise-pool-testing
https://github.com/glifchits/promise-pool-testing
2ac01e6b0c09f97a5ad097d5ce43097cd35e68fa
e97b028b5f7722fb8b220c143aed3c34adafb8ef
a3f9d30f1de8be8f2834e652de9136c5b8522252
refs/heads/master
2016-09-13T13:44:35.596855
2016-05-18T18:42:58
2016-05-18T18:42:58
59,141,543
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5860214829444885, "alphanum_fraction": 0.6034946441650391, "avg_line_length": 25.571428298950195, "blob_id": "07d3b302c90ff96253622524b3a07f94916e6083", "content_id": "11b0d1f230c21a321faa53a5b12b15b5e5ee53d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 744, "license_type": "no_license", "max_line_length": 84, "num_lines": 28, "path": "/main.js", "repo_name": "glifchits/promise-pool-testing", "src_encoding": "UTF-8", "text": "import agent from 'superagent';\nimport defaults from 'superagent-defaults';\nimport Promise from 'bluebird';\nimport Queue from 'promise-queue';\n\nPromise.promisifyAll(agent);\nconst request = defaults(agent);\n\nvar url = 'http://localhost:8888/';\n\nconst start = new Date();\nconsole.log('started', start);\n\nQueue.configure(Promise); // use bluebird promise\n\nlet q = new Queue(2 /* concurrent */, Infinity /* max in queue */);\n\nfor (let i = 0; i < 10; i++) {\n let qAdd = q.add(() => (\n request.get(url).endAsync().then(() => {\n const end = new Date();\n console.log(`${i} finished after ${((end-start)/1000).toFixed(2)} sec`);\n })\n ))\n qAdd.then(function() {\n console.log('qadd then\\'d');\n });\n}\n" }, { "alpha_fraction": 0.7587412595748901, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 22.83333396911621, "blob_id": "d617a981cd52b04937a05dab83953a8a258b87f3", "content_id": "e72411b477edc0a5bef3f68aa8b27ed8e79b0aa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 572, "license_type": "no_license", "max_line_length": 111, "num_lines": 24, "path": "/README.md", "repo_name": "glifchits/promise-pool-testing", "src_encoding": "UTF-8", "text": "# promise-pool-testing\n\nInvestigation into how to use AJAX requests and promises on front-end in a pool/queue with limited concurrency.\n\nIncludes a dummy local non-blocking Python/Tornado server to test concurrent requests.\n\nThe great [promise-queue](https://github.com/azproduction/promise-queue) library is used.\n\n### Installing\n\n```bash\npip install -r requirements.txt\nnpm install\n```\n\n### Running\n```bash\npython tornadoserver.py\nnpm start # and point browser to localhost:8080\n```\n\n### Example\nConcurrency: 2\n![network profile with 2 concurrent requests](profile.png)\n" }, { "alpha_fraction": 0.630630612373352, "alphanum_fraction": 0.6381381154060364, "avg_line_length": 23.66666603088379, "blob_id": "8a41480e105c42f9e347b1d6df0e8dcff47a1f1d", "content_id": "a899e2ea68854983365d274b88d2d385e97c8ca5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/tornadoserver.py", "repo_name": "glifchits/promise-pool-testing", "src_encoding": "UTF-8", "text": "import time\nimport tornado.ioloop\nfrom tornado.web import RequestHandler, asynchronous\nfrom tornado import gen\nfrom tornado.httputil import HTTPHeaders\n\nclass MainHandler(RequestHandler):\n\n @asynchronous\n @gen.engine\n def get(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.write('sleeping')\n self.flush()\n yield gen.sleep(2)\n self.write(\"Hello, world {}\".format(time.time()))\n self.finish()\n\ndef make_app():\n return tornado.web.Application([\n (r\"/*\", MainHandler),\n ])\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n" } ]
3
DevLoL/devbot
https://github.com/DevLoL/devbot
8fa4fdcf8fd4116b0dd38915855c76c7c8ed500a
08fb41d7de29ce9a80fbb3b9ccfaed0fcae3b447
99a31afe6aa0cb411dde10ce81a2943288847ca6
refs/heads/master
2021-01-23T08:56:38.450928
2015-12-20T15:40:41
2015-12-20T15:40:41
28,942,415
0
1
null
2015-01-08T01:18:28
2015-01-30T21:35:37
2015-07-27T13:37:56
Python
[ { "alpha_fraction": 0.6431095600128174, "alphanum_fraction": 0.6593639850616455, "avg_line_length": 29.106382369995117, "blob_id": "1e70d18d8a2098bf479f3c292b99ba10d0142646", "content_id": "13871d6668856d25a2f76c8638e6dd9d01817420", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1416, "license_type": "no_license", "max_line_length": 107, "num_lines": 47, "path": "/modules/youtube2.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\n\"\"\"\nyoutube.py - Willie YouTube Module\nCopyright 2014, doebi\nCopyright 2012, Dimitri Molenaars, Tyrope.nl.\nCopyright © 2012-2013, Elad Alfassa, <[email protected]>\nCopyright 2012, Edward Powell, embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dfbta.net\n\nThis module will respond to .yt and .youtube commands and searches the youtubes.\n\"\"\"\n\nfrom willie import web, tools\nfrom willie.module import rule, commands, example\nimport json\nimport re\nimport requests\nfrom HTMLParser import HTMLParser\n\n\ndef setup(bot):\n regex = re.compile('(youtube.com/watch\\S*v=|youtu.be/)([\\w-]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.WillieMemory()\n bot.memory['url_callbacks'][regex] = ytinfo\n\n\n@rule('.*(youtube.com/watch\\S*v=|youtu.be/)([\\w-]+).*')\ndef ytinfo(bot, trigger, found_match=None):\n \"\"\"\n Get information about the latest video uploaded by the channel provided.\n \"\"\"\n match = found_match or trigger\n #Grab info from YT API\n uri = 'http://www.youtube.com/oembed?url=http://youtube.com/watch?v=' + match.group(2) + '&format=json'\n r = requests.get(uri)\n if not r.status_code == requests.codes['\\o/']:\n return\n \n ytoembed = r.json()\n #combine variables and print\n message = '[YouTube] ' + ytoembed['title'] + \\\n ' [' + ytoembed['author_name'] + ']'\n\n bot.say(HTMLParser().unescape(message))\n" }, { "alpha_fraction": 0.7627302408218384, "alphanum_fraction": 0.7627302408218384, "avg_line_length": 28.774192810058594, "blob_id": "5511b2e7494b9387494ea2767bf0b42d51f168cd", "content_id": "f668d22e74e5525e060360e1122db9df5ee6bf0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 923, "license_type": "no_license", "max_line_length": 164, "num_lines": 31, "path": "/README.md", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "# devbot\nOur irc bot based on willie (https://github.com/embolalia/willie), which tells us lots of interesting things.\n\n## commands\n* help\n* devmon\n* door\n* temp\n* weather\n\n### help\nPrints this manual.\n\n### devmon\nReplies statistics of collected meta-data about our network-infrastructure.\n\n### door\nPrints the current status of the door at /dev/lol. If locked or unlocked. Which is also displayed as prefix [OPEN] or [CLOSED] in the channels topic\n\n### temp\nPrints current temperature and how it feels like.\n\n### weather\nPrints a human-readable summary of the hourly weather-data provided by forecast.io for Linz.\n\n## additional features\nthe bot periodically checks the status of devlol in the hackerspace api. If the status has changed it reports it to the channel and also keeps the topic up to date.\n\n## legacy\n\n* status: used to handle the manual status flag, is now replaced by more precise and automatic door_locked\n" }, { "alpha_fraction": 0.6491803526878357, "alphanum_fraction": 0.6524590253829956, "avg_line_length": 24.41666603088379, "blob_id": "184521473a01996e7e6138d89bc8fc1662f7b161", "content_id": "db0dd7de35e995bf24dc5b907ef127eefc9f1b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/modules/door.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "import willie\nimport urllib\nimport json\nimport status\n\[email protected]('door')\ndef door(bot, trigger):\n data = json.loads(status.query_api(mode=''))\n if data['sensors']['door_locked'][0]['value']:\n bot.reply(\"The door is locked!\")\n else:\n bot.reply(\"The door is unlocked!\")\n" }, { "alpha_fraction": 0.5658953785896301, "alphanum_fraction": 0.5870221257209778, "avg_line_length": 25.50666618347168, "blob_id": "559d9bf313b276a188a9a707c46320a7b4d6ee22", "content_id": "58be1322a52f2ee308c40ccbb2b9fbfe76eada20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1988, "license_type": "no_license", "max_line_length": 136, "num_lines": 75, "path": "/modules/devmon.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "import willie\nimport urllib\nimport status\n\nalert = True\n\n#print info of devmon: Bandwidth usage of last 24h, and lease information\[email protected]('devmon')\ndef devmon(bot, trigger):\n url = \"https://devlol.org/devmon/data/day.tsv\"\n data = urllib.urlopen(url)\n rx = 0\n tx = 0\n peak = 0\n last = 0\n last_date = ''\n next(data)\n for l in data:\n args = l.split('\\t')\n rx += float(args[1])\n tx += float(args[2])\n lease = int(args[3])\n if lease > peak:\n peak = lease\n if lease > 0:\n last = lease\n last_date = args[0]\n rx = (rx * 15 * 60) / 1024\n tx = (tx * 15 * 60) / 1024\n if lease > 0:\n people = '%d Devices active now' %lease\n else:\n people = 'No activity since: %s' %last_date\n dump = 'Total Bandwidth Usage in last 24h: download: %.2fMB, upload: %.2fMB, Peak of active Devices: %d, %s' %(rx, tx, peak, people)\n bot.reply(dump)\n\n\ndef get_leases():\n url = \"https://devlol.org/devmon/now.php\"\n return int(urllib.urlopen(url).read())\n\n\n#periodically check if status and devmon fit each other\[email protected](15*60)\ndef check_status_match(bot):\n global alert\n if alert:\n state = status.query_api()\n leases = get_leases()\n if ('OPEN' in state) and (leases == 0):\n bot.msg('#devlol', 'Warning: status is set to \\'OPEN\\', but no activity detected!')\n bot.msg('#devlol', '\\'.status close\\' or \\'.alert off\\'')\n\n\n#this command lets you manually turn off the status alert\[email protected]('alert')\ndef alert(bot, trigger):\n global alert\n cmd = trigger.group(2)\n if cmd == 'on':\n alert = True\n elif cmd == 'off':\n alert = False\n\n if alert:\n bot.say('alert is on')\n else:\n bot.say('alert is off')\n\n\n#because we tend to forget toggles, alert resets every hour\[email protected](60*60)\ndef reset_alert(bot):\n global alert\n alert = True\n" }, { "alpha_fraction": 0.5246548056602478, "alphanum_fraction": 0.5259697437286377, "avg_line_length": 29.420000076293945, "blob_id": "2053ab58782d1efcf50fe40dff6423dd5ddfa970", "content_id": "3c32e3bf5a68f4b786dbf94088ad9e2c45e68f80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1521, "license_type": "no_license", "max_line_length": 118, "num_lines": 50, "path": "/modules/help.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport willie\n\ncommands = {\n 'help': {\n 'synopsis': 'help [ COMMAND ]',\n 'description': 'Prints this manual.',\n 'alias': ['h', 'man'],\n },\n 'devmon': {\n 'synopsis': 'devmon',\n 'description': 'Replies statistics of collected meta-data about our network-infrastructure.',\n 'alias': ['dm', 'mon'],\n },\n 'door': {\n 'synopsis': 'door',\n 'description': 'Prints the current status of the door at /dev/lol. If locked or unlocked.',\n 'alias': ['dm', 'mon'],\n },\n 'temp': {\n 'synopsis': 'temp',\n 'description': 'Prints current Temperature of various sensors',\n 'alias': ['t'],\n },\n 'humidity': {\n 'synopsis': 'humidity',\n 'description': 'Prints current Humidity of various sensors',\n 'alias': ['h'],\n },\n 'weather': {\n 'synopsis': 'weather',\n 'description': 'Prints a human-readable summary of the hourly weather-data provided by forecast.io for Linz.',\n 'alias': ['w'],\n },\n}\n\[email protected]('h', 'help', 'man')\ndef help(bot, trigger):\n cmd = trigger.group(2)\n if cmd:\n if cmd in commands.keys():\n bot.say('.' + commands[cmd]['synopsis'])\n bot.say(commands[cmd]['description'])\n else:\n bot.say(\"command not found!\")\n else:\n bot.say(\"=== /dev/bot manual ===\")\n for k, v in sorted(commands.iteritems()):\n bot.say('.' + v['synopsis'])\n" }, { "alpha_fraction": 0.6071628332138062, "alphanum_fraction": 0.6138780117034912, "avg_line_length": 26.492307662963867, "blob_id": "a6829c35201afe8f5233b8968ba95b77ac09636f", "content_id": "081bb13b9d40020b6ebe98712d3e6248d5b9c875", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1787, "license_type": "no_license", "max_line_length": 120, "num_lines": 65, "path": "/modules/status.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "import willie\nimport urllib\nimport datetime\nimport json\n\nlaststate = True\n\ndef query_api(mode='viewstatus'):\n url = \"https://devlol.org/status/hackerspaceapi/\"\n return urllib.urlopen(url + mode).read()\n\ndef isLocked():\n data = json.loads(query_api(mode=''))\n b = data['sensors']['door_locked'][0]['value']\n return b\n\ndef isOpen():\n return not isLocked()\n\[email protected]('isitChristmas')\ndef christmas(bot, trigger):\n days_to_go = (datetime.date(datetime.date.today().year, 12, 24) - datetime.date.today()).days\n if days_to_go == 0:\n boy.say(\"Happy Birthday Brian!\")\n else:\n bot.say(\"No. But it's only %i days to go.\" % days_to_go)\n\[email protected](60)\ndef check_status(bot):\n global laststate\n state = isOpen()\n if laststate is not state:\n # trigger the topic broadcast\n bot.write(('TOPIC', '#devlol'))\n laststate = state\n if state:\n bot.msg('#devlol', 'the space is now OPEN')\n else:\n bot.msg('#devlol', 'the space is now CLOSED')\n\[email protected]('.*')\[email protected]('332')\ndef topic_set(bot, trigger):\n if isOpen():\n prefix = '[OPEN]'\n else:\n prefix = '[CLOSED]'\n if not trigger.startswith(prefix) or trigger.count(prefix) > 1:\n bot.write(('TOPIC', '#devlol'), prefix + \" \" + trigger.replace('[OPEN]', '').replace('[CLOSED]', '').strip(\" \"))\n\[email protected]('.*')\[email protected]('TOPIC')\ndef topic_trigger(bot, trigger):\n bot.write(('TOPIC', '#devlol'))\n\[email protected]('door')\ndef door(bot, trigger):\n bot.write(('TOPIC', '#devlol'))\n if isLocked():\n bot.reply(\"The door is locked!\")\n else:\n bot.reply(\"The door is unlocked!\")\n\n#init state on startup\nlaststate = isOpen()\n" }, { "alpha_fraction": 0.6193029284477234, "alphanum_fraction": 0.6193029284477234, "avg_line_length": 30.08333396911621, "blob_id": "c50ad6acd0e6e1f221a7b32bf75ddbc0e3675adf", "content_id": "eaabe24db05ac3f3ed1e71c98bac245756678f27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 100, "num_lines": 12, "path": "/modules/events.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "import willie\nimport urllib\nimport datetime\nimport json\n\[email protected]('events')\ndef events(bot, trigger):\n dump = urllib.urlopen(\"http://devlol.at/api/events\").read()\n data = json.loads(dump)\n bot.reply(\"upcoming events:\")\n for e in data['events']:\n bot.say(e['start_date'] + \" \" + e['start_time'] + \", \" + e['title'] + \" - \" + e['subtitle'])\n" }, { "alpha_fraction": 0.6175345778465271, "alphanum_fraction": 0.6357752084732056, "avg_line_length": 32.9900016784668, "blob_id": "573a1375c45603302f941ad91939ac1c8b6856ed", "content_id": "6c12adfe45c4ab4e8b8a700ecf0980b6f81eb7a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3402, "license_type": "no_license", "max_line_length": 118, "num_lines": 100, "path": "/modules/weather.py", "repo_name": "DevLoL/devbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport urllib\nimport willie\nimport forecastio\nimport mosquitto\n\nAPI_KEY = \"41cbdafc2dc5571e85712340b993a278\"\nlat = \"48.3058255\"\nlng = \"14.2838517\"\n\ndungeon = {\"temp\": 0, \"hum\": 0}\nmainroom = {\"temp\": 0, \"hum\": 0}\n\nresponses = {\n \"clear-day\": \"It's fucking alright.\",\n \"clear-night\": \"It's fucking dark.\",\n \"rain\": \"It's fucking raining.\",\n \"snow\": \"It's fucking snowing.\",\n \"sleet\": \"Fucking snow with rain.\",\n \"wind\": \"It's fucking windy.\",\n \"fog\": \"Fucking fifty shades of grey.\",\n \"cloudy\": \"Fucking clouds everywhere.\",\n \"partly-cloudy-day\": \"Some fucking clouds.\",\n \"partly-cloudy-night\": \"Fucking dark with clouds.\",\n}\n\ndef metafridge(bot, msg):\n urllib.urlopen(\"http://metafridge.metalab.at/cgi-bin/post_text.cgi?killmockingbird=0&effect=0&scroll_text=\" + msg)\n\ndef on_message(client, bot, msg):\n try:\n if(msg.topic == \"devlol/h19/dungeon/DHT21/temp\"):\n dungeon[\"temp\"] = float(msg.payload.replace('\\t', ''))\n if(msg.topic == \"devlol/h19/dungeon/DHT21/hum\"):\n dungeon[\"hum\"] = float(msg.payload.replace('\\t', ''))\n if(msg.topic == \"devlol/h19/mainroom/DHT21/temp\"):\n mainroom[\"temp\"] = float(msg.payload)\n if(msg.topic == \"devlol/h19/mainroom/DHT21/hum\"):\n mainroom[\"hum\"] = float(msg.payload)\n if((msg.topic == \"devlol/h19/mainroom/craftui/button/buttonHi5\") and (msg.payload == \"DOWN\")):\n bot.msg('#devlol', 'Hi5!')\n metafridge(bot, \"Hi5! Hi5! Hi5!\")\n if((msg.topic == \"devlol/h19/mainroom/craftui/button/button_black\") and (msg.payload == \"DOWN\")):\n metafridge(bot, \"Gruesze aus Linz!\")\n except Exception, e:\n print e\n print \"Error in MQTT Message.\"\n pass\n\nclient = mosquitto.Mosquitto()\nclient.connect(\"mqtt.devlol.org\")\nclient.on_message = on_message\nclient.subscribe(\"devlol/#\")\n\[email protected]('weather')\ndef weather(bot, trigger):\n forecast = forecastio.load_forecast(API_KEY, lat, lng)\n today = forecast.hourly()\n bot.reply(today.summary)\n\[email protected]('fuckingweather')\ndef fuckingweather(bot, trigger):\n forecast = forecastio.load_forecast(API_KEY, lat, lng)\n now = forecast.currently()\n if now.icon in responses:\n bot.reply(responses[now.icon])\n else:\n bot.reply(\"I have no fucking clue!\")\n\[email protected]('temp')\ndef temp(bot, trigger):\n forecast = forecastio.load_forecast(API_KEY, lat, lng)\n now = forecast.currently()\n dump = \"Mainroom: %.2f°C, Dungeon: %.2f°C, Outside: %.2f°C\" %(mainroom[\"temp\"], dungeon[\"temp\"], now.temperature)\n bot.reply(dump)\n\[email protected]('humidity')\ndef humidity(bot, trigger):\n forecast = forecastio.load_forecast(API_KEY, lat, lng)\n now = forecast.currently()\n dump = \"Mainroom: %.2f, Dungeon: %.2f, Outside: %.2f\" %(mainroom[\"hum\"], dungeon[\"hum\"], now.humidity*100)\n bot.reply(dump)\n\[email protected](1)\ndef mqtt_update(bot):\n global client\n if mqtt_update.init_userdata:\n mqtt_update.init_userdata = False\n client.user_data_set(bot)\n client.loop()\nmqtt_update.init_userdata = True\n\[email protected]('metafridge')\ndef metatrigger(bot, trigger):\n if(trigger.group(2) is not None):\n metafridge(bot, trigger.group(2))\n bot.say(\"Posted to Metafridge!\")\n else:\n bot.reply(\"Add a Message.\")\n" } ]
8
ZihaoZhai/Recommendation_System
https://github.com/ZihaoZhai/Recommendation_System
7720f3e058b8614dfdcaf5a200d6a1258252f6b1
2de837459193540b553efe42b926efc9dfbd595d
ff93a3225865b24ce468136fa90dad82b9aaf6da
refs/heads/master
2020-04-07T17:54:04.248074
2018-12-28T23:33:17
2018-12-28T23:33:17
158,588,412
0
0
null
2018-11-21T18:09:20
2018-11-28T18:21:48
2018-12-05T01:14:44
Python
[ { "alpha_fraction": 0.6540521383285522, "alphanum_fraction": 0.6822563409805298, "avg_line_length": 29.791208267211914, "blob_id": "2bd15970fcfc9b1e281f9ca9e5caf32fb1776c18", "content_id": "beaf8f89f251148bba9e4a31a9f96b98f6bbc910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2801, "license_type": "no_license", "max_line_length": 79, "num_lines": 91, "path": "/collaborative_filtering.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import collections\nimport bisect\nfrom datetime import datetime\nimport json\n\ndef collaborativeFiltering(data,env):\n\tdef jaccardSimilarity(s1,s2):\n\t\treturn len(s1&s2)/float(len(s1|s2))\n\n\tdef reverseMapping(data):\n\t\tnewData=collections.defaultdict(list)\n\t\tfor user in data:\n\t\t\tfor pro in data[user]:\n\t\t\t\tnewData[pro].append(user)\n\t\treturn newData\n\n\tdef userBase(data):\n\t\tsimilarUser=collections.defaultdict(dict)\n\t\tcandidateProduct=collections.defaultdict(set)\n\t\trecommendProduct=collections.defaultdict(list)\n\t\tkey=data.keys()\n\t\tstart=datetime.now()\n\t\tprint ( 'Running user-base approach...')\n\t\tprint ( 'total user:',len(key))\n\t\tprint ( 'finding similar user and predicting...')\n\t\tcount=0\n\t\tfor i in xrange(len(key)):\n\t\t\tfor j in xrange(i+1,len(key)):\n\t\t\t\tk1,k2=key[i],key[j]\n\t\t\t\ts1,s2=set(data[k1]),set(data[k2])\n\t\t\t\tif env['similarity']==\"Jaccard\":\n\t\t\t\t\tsimi=jaccardSimilarity(s1,s2)\n\t\t\t\tif simi>=env['userSimilarityThreshold']:\n\t\t\t\t\tif k1[0]=='C':\n\t\t\t\t\t\tsimilarUser[k1][k2]=simi\n\t\t\t\t\t\tcandidateProduct[k1]=candidateProduct[k1]|(s2-s1)\n\t\t\t\t\tif k2[0]=='C':\n\t\t\t\t\t\tsimilarUser[k2][k1]=simi\n\t\t\t\t\t\tcandidateProduct[k2]=candidateProduct[k2]|(s1-s2)\n\t\t\tif len(similarUser[k1].keys())>0 and k1[0]=='C':\n\t\t\t\ttotal=sum([similarUser[k1][u]**2 for u in similarUser[k1]])\n\t\t\t\tfor pro in candidateProduct[k1]:\n\t\t\t\t\tsimi=0\n\t\t\t\t\tfor u in productUserMapping[pro]:\n\t\t\t\t\t\tsimi+=similarUser[k1].get(u,0)**2\n\t\t\t\t\tsimi=simi/float(total)\n\t\t\t\t\tif simi>=env['recommendProductThreshold']:\n\t\t\t\t\t\tbisect.insort(recommendProduct[k1],(simi,pro))\n\t\t\t\tdel similarUser[k1]\n\t\t\t\tif k1 in candidateProduct:\n\t\t\t\t\tdel candidateProduct[k1]\n\t\t\tcount+=1\n\t\t\tif count%1000==0:\n\t\t\t\tprint ( int(count/float(len(key))*100),'%','finished',datetime.now()-start)\n\t\treturn recommendProduct\n\n\tdef itemBase(data):\n\t\tkey=data.keys()\n\t\tcount=0\n\t\tstart=datetime.now()\n\t\tprint ( 'Running items-base approach...' )\n\t\tprint ( 'total product:',len(key))\n\t\tprint ( 'finding similar product...')\n\t\trelatedData=collections.defaultdict(dict)\n\t\tfor i in xrange(len(key)):\n\t\t\tfor j in xrange(i+1,len(key)):\n\t\t\t\tk1,k2=key[i],key[j]\n\t\t\t\ts1,s2=set(data[k1]),set(data[k2])\n\t\t\t\tsimi=jaccardSimilarity(s1,s2)\n\t\t\t\tif simi>env['itemSimilarityThreshold']:\n\t\t\t\t\trelatedData[k1][k2]=simi\n\t\t\t\t\trelatedData[k2][k1]=simi\n\t\t\tcount+=1\n\t\t\tif count%500==0:\n\t\t\t\tprint ( int(count/float(len(key))*100),'%','finished',datetime.now()-start)\n\t\treturn relatedData\n\n\n\tproductUserMapping=reverseMapping(data)\n\tif env[\"method\"]=='UserBase':\n\t\tresult=userBase(data)\n\t\toutput=open('../Data/recommendProduct_userBase.json','w')\n\t\toutput.write(json.dumps(result))\n\t\toutput.close()\n\telif env[\"method\"]=='ItemBase':\n\t\tresult=itemBase(productUserMapping)\n\t\tprint ( result )\n\t\toutput=open('../Data/recommendProduct_itemBase.json','w')\n\t\toutput.write(json.dumps(result))\n\t\toutput.close()\n\treturn result" }, { "alpha_fraction": 0.5615264773368835, "alphanum_fraction": 0.5693146586418152, "avg_line_length": 32.81578826904297, "blob_id": "8539ec39730cc94a9c26230927c9e23170088fe3", "content_id": "e8e430e91cd8f626114932efdde5c01888f9b7c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1284, "license_type": "no_license", "max_line_length": 121, "num_lines": 38, "path": "/featureExtractor/filterStyle_features.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom connection import connect\n#Add filter Style\ndef fetch_filterStyle(env):\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n filter_style = {}\n uniqId = 0\n sql = \"select distinct filterstyle from product where category_path <> 'category/materials' or category_path is null\"\n cur.execute(sql)\n row = cur.fetchone()\n while row:\n if row[0] is None or len(row[0]) == 0:\n if 'None' not in filter_style.keys():\n filter_style['None'] = uniqId\n uniqId += 1\n else:\n styles = row[0].split(',')\n styles = [x for s in styles for x in s.split('&')]\n for style in styles:\n if style.strip() not in filter_style.keys():\n filter_style[style.strip()] = uniqId\n uniqId += 1\n row = cur.fetchone()\n return filter_style\n\n\ndef add_filterStyle(filterStyle, style_dict):\n res = [0 for _ in range(len(style_dict))]\n if not filterStyle:\n return res\n styles = filterStyle.split(',')\n styles = [x for s in styles for x in s.split('&')]\n for style in styles:\n if style.strip() in style_dict.keys():\n res[style_dict[style.strip()]] = 1\n \n return res" }, { "alpha_fraction": 0.8019169569015503, "alphanum_fraction": 0.8019169569015503, "avg_line_length": 103.33333587646484, "blob_id": "f5a5484b5e2b99cf388fcd9b1a6fbaa3b5655de3", "content_id": "d5d372f45f19d0c85776cdf3ac5f8bbe2f654185", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 313, "license_type": "no_license", "max_line_length": 287, "num_lines": 3, "path": "/README.md", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "# Recommendation_System\n\n##### This projact aims to find related products. After data cleaning and integration, we use Association Rules, Collaborative Filtering and other related algorithm or rules to find the result. We use python with Spark and design well use interface to modify parameter. Hopes us do well.\n" }, { "alpha_fraction": 0.5955356955528259, "alphanum_fraction": 0.6098214387893677, "avg_line_length": 27.743589401245117, "blob_id": "78260e6b01ead868d0562468fa01238da9b3eaef", "content_id": "700b3644cb3167025eb7e8efed898141a5188e48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 81, "num_lines": 39, "path": "/featureExtractor/price_features.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom connection import connect\nimport numpy as np\n\n\n\ndef price_range(env):\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n sql = \"select price from product where category_path <> 'category/materials'\"\n prices = []\n cur.execute(sql)\n row = cur.fetchone()\n while row:\n prices.append(float(row[0]))\n row = cur.fetchone()\n interval = env[\"similarityRulesParameter\"][\"price_intervals\"]\n ratio = env[\"similarityRulesParameter\"][\"price_filterRatio\"]\n filter_prices = [ele for ele in prices if ele >= 1 and ele < 20000]\n l = len(filter_prices)\n filter_prices = sorted(filter_prices)\n rangePrice = (filter_prices[int(ratio * l)] - filter_prices[0]) / interval\n res = []\n for i in range(1, interval):\n res.append(np.floor(i * rangePrice))\n return res\n\ndef add_price(price, priceInterval):\n L = len(priceInterval)\n res = [0 for i in range(L)]\n i = 0\n while i < L:\n if priceInterval[i] > price:\n res[i] = 1\n break\n i += 1\n if i == L:\n res[L - 1] = 1\n return res" }, { "alpha_fraction": 0.598342776298523, "alphanum_fraction": 0.607937216758728, "avg_line_length": 27.649999618530273, "blob_id": "19df8a9eb7bdf0523c6b17be8a8c2327443d4246", "content_id": "6e88b06a6c208c00b33ed646017dda98701077c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2293, "license_type": "no_license", "max_line_length": 104, "num_lines": 80, "path": "/featureExtractor/color_features.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom connection import connect\nfrom collections import defaultdict\n\n#Add simple color\ndef fetch_simpleColors(env):\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n colors = []\n sql = \"select color_code from product where category_path <> 'category/materials'\"\n cur.execute(sql)\n row = cur.fetchone()\n while row:\n colors.append(row[0])\n row = cur.fetchone()\n return colors\n \n \ndef simpleColor_dict(env):\n colors = fetch_simpleColors(env)\n simple_dict = {}\n uniqId = 0\n for color in colors:\n if color not in simple_dict.keys() and color != None:\n simple_dict[color] = uniqId\n uniqId += 1\n #print(simple_dict)\n return simple_dict\n \n \ndef simpleColor_vectors(simpleColors, color_dict):\n '''\n input: \n color : The color to transform\n color_dict : color-number dictionary\n \n return:\n encoding vector for given color.\n '''\n\n uniqKeys = list(color_dict.keys())\n encoding_vec = [0 for _ in range(len(uniqKeys))]\n for simpleColor in simpleColors:\n if simpleColor:\n encoding_vec[color_dict[simpleColor]] = 1\n return encoding_vec\n\n\ndef configsku_color_dict(env):\n '''\n input:\n env : parameter environment.\n \n return:\n {configsku1 : [1, 1, 0, 0, 0, 1], configsku2 : [0, 1, 0, 1, 0, 0], ...}\n\n usage:\n sku_color_features = configsku_color_dict(env)\n ''' \n color_dict = simpleColor_dict(env)\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n configsku_color_list = []\n sql = \"select configurable_sku, color_code from product where category_path <> 'category/materials'\"\n cur.execute(sql)\n row = cur.fetchone()\n while row:\n configsku_color_list.append((row[0], row[1]))\n row = cur.fetchone()\n \n sku_color_dict = defaultdict(set)\n for (configsku, color) in configsku_color_list:\n sku_color_dict[configsku].add(color)\n\n sku_color_features = {}\n for sku in sku_color_dict.keys():\n color_vector = simpleColor_vectors(sku_color_dict[sku], color_dict)\n sku_color_features[sku] = color_vector\n\n return sku_color_features\n\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 15, "blob_id": "011365c15c5e7a05be94a1f7733bb9f2272654f3", "content_id": "7a6bdf143e7dc1e38c19fe7bd6da97d38060fd41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/evaluator.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "def RMSE(test):\n\t" }, { "alpha_fraction": 0.6464815139770508, "alphanum_fraction": 0.655042827129364, "avg_line_length": 29.90322494506836, "blob_id": "68ba4ef7c38597d2674eba59fb2d1fd0e97a7441", "content_id": "a8bca1076842ee85635010671823126e1483ee9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4789, "license_type": "no_license", "max_line_length": 125, "num_lines": 155, "path": "/data_cleaner.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import collections\nfrom datetime import datetime\nfrom random import random\nimport os\nimport json\nimport time\n\ndef getDataInfor(dataSet):\n\tprint ('tarinning set size:',len(dataSet['train'].keys()))\n\tfor i in xrange(len(dataSet['test'])):\n\t\tprint ('testing set '+str(i+1)+' :',len(dataSet['test'][i].keys()))\n\ndef readCleanData(env):\n\n\tdef phoneNumberCleaner(s):\n\t\tdata=[]\n\t\tfor c in s:\n\t\t\tif c.isdigit():\n\t\t\t\tdata.append(c)\n\t\tdata=''.join(data)\n\t\tif 10<=len(data)<=12:\n\t\t\treturn data[-10:]\n\t\tif len(data)==13:\n\t\t\treturn data[-11:]\n\t\treturn None\n\n\tdef aggregateByOrder(data_obj_list):\n\t\tdata=collections.defaultdict(set)\n\t\tfor obj in data_obj_list:\n\t\t\torder=obj['order_id']\n\t\t\titem=obj[env['aggregateFocus']]\n\t\t\tdata[order].add(item)\n\t\tfor k in data.keys():\n\t\t\tif len(data[k])>1:\n\t\t\t\tdata[k]=sorted(data[k])\n\t\t\telse:\n\t\t\t\tdel data[k]\n\t\treturn data\n\n\tdef aggregateByCustomer(data_obj_list):\n\n\t\tdef customerResolution(data):\n\t\t\tnum=0\n\t\t\tstart=datetime.now()\n\t\t\tuserNum=-1\n\t\t\twhile userNum!=len(data.keys()):\n\t\t\t\tuserNum=len(data.keys())\n\t\t\t\tkeys=data.keys()\n\t\t\t\ttotal=len(keys)\n\t\t\t\tfor c in keys:\n\t\t\t\t\tfor k in data:\n\t\t\t\t\t\tif c!=k and (data[c]['email']&data[k]['email'] or data[c]['telephone']&data[k]['telephone']):\n\t\t\t\t\t\t\tdata[k]['email']=data[k]['email']|data[c]['email']\n\t\t\t\t\t\t\tdata[k]['telephone']=data[k]['telephone']|data[c]['telephone']\n\t\t\t\t\t\t\tdata[k]['customer_id']=data[k]['customer_id']|data[c]['customer_id']\n\t\t\t\t\t\t\tdata[k]['category']=data[k]['category']|data[c]['category']\n\t\t\t\t\t\t\tdata[k][env['aggregateFocus']]=data[k][env['aggregateFocus']]|data[c][env['aggregateFocus']]\n\t\t\t\t\t\t\tdel data[c]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tnum+=1\n\t\t\t\t\tif num%2000==0:\n\t\t\t\t\t\tprint ('customer resolution:',str(int(num/float(total)*100))+'%',datetime.now()-start)\n\t\t\t\tbreak\n\t\t\treturn data\n\n\t\tdata={}\n\t\tincremntalId=0\n\t\tfor obj in data_obj_list:\n\t\t\tuser=obj['customer_id']\n\t\t\tif not user:\n\t\t\t\tuser='empty_'+str(incremntalId)\n\t\t\t\tincremntalId+=1\n\t\t\tif user not in data:\n\t\t\t\tdata[user]={}\n\t\t\t\tfor k in obj:\n\t\t\t\t\tdata[user][k]=set([obj[k]]) if obj[k] else set()\n\t\t\telse:\n\t\t\t\tfor k in obj:\n\t\t\t\t\tif obj[k]:\n\t\t\t\t\t\tdata[user][k].add(obj[k])\n\t\tsingleUser={}\n\t\tfor k in data.keys():\n\t\t\tif not data[k]['telephone'] and not data[k]['email']:\n\t\t\t\tsingleUser[k]=data[k]\n\t\t\t\tdel data[k]\n\t\tresolution=customerResolution(data)\n\t\tdata=dict(singleUser.items()+resolution.items())\n\t\tnewData={}\n\t\tfor k in data:\n\t\t\tif len(data[k][env['aggregateFocus']])>1:\n\t\t\t\tnewData[min(data[k]['customer_id'])]=sorted(data[k][env['aggregateFocus']])\n\t\treturn newData\n\n\n\n\n\tprint ('data aggregated by',env['aggregateDimension'])\n\tsource_data=open(env['dataFilesPath']+env['soureInputData'],'r').read().decode(\"utf-16\").split('\\n')\n\theader=source_data.pop(0).split('\\t')\n\tsource_data.pop()\n\n\t# source_data=source_data[:50000]\n\n\tkey_mapping={}\n\tfor i in xrange(len(header)):\n\t\tkey_mapping[i]=header[i]\n\tdataSet={\n\t\t\"train\":[],\n\t\t\"test\":[[] for i in xrange(env['testSetNum'])]\n\t}\n\tfor d in source_data:\n\t\td=d.split('\\t')\n\t\tobj={}\n\t\tif len(d)>len(header):\n\t\t\td.pop(-3)\n\t\tfor i in xrange(len(d)):\n\t\t\tobj[key_mapping[i]]=d[i]\n\t\tobj['telephone']=phoneNumberCleaner(obj['telephone'])\n\t\tobj['email']=None if len(obj['email'])<=10 else obj['email']\n\t\tobj['email']=obj['registered_email'] if len(obj['registered_email'])>10 else obj['email']\n\t\tsingleTestRate=env[\"testSetRate\"]/float(env[\"testSetNum\"])\n\t\tti=int(random()/singleTestRate)\n\t\tif ti<env[\"testSetNum\"]:\n\t\t\tdataSet['test'][ti].append(obj)\n\t\telse:\n\t\t\tdataSet['train'].append(obj)\n\tdel source_data\n\tprint ('start aggregating testing set')\n\tprint ('total number of testing set:',len(dataSet['test']))\n\tif env['aggregateDimension']=='cus':\n\t\tfiles=os.listdir(env['dataFilesPath'])\n\t\tfilePath=env['dataFilesPath']+env['intermediateResult']\n\t\tif env['intermediateResult'] in files and env['aggregateFocus']=='configurable_sku':\n\t\t\tprint ('reading existing data, created at',time.strftime(\"%Y/%m/%d %H:%M:%S\", time.localtime(os.path.getctime(filePath))))\n\t\t\tdataSet=json.loads(open(filePath).read())\n\t\t\tgetDataInfor(dataSet)\n\t\telse:\n\t\t\tfilePath=env['dataFilesPath']+env['intermediateResult']\n\t\t\tfor i in xrange(len(dataSet['test'])):\n\t\t\t\tprint ('start aggregating testing set '+str(i+1))\n\t\t\t\tdataSet['test'][i]=aggregateByCustomer(dataSet['test'][i])\n\t\t\tprint ('start aggregating training set')\n\t\t\tdataSet['train']=aggregateByCustomer(dataSet['train'])\n\t\t\tgetDataInfor(dataSet)\n\t\t\tuserProduct=open(filePath,'w')\n\t\t\tuserProduct.write(json.dumps(dataSet))\n\t\t\tuserProduct.close()\n\telif env['aggregateDimension']=='ord':\n\t\tfor i in xrange(len(dataSet['test'])):\n\t\t\tprint ('start aggregating testing set '+str(i+1))\n\t\t\tdataSet['test'][i]=aggregateByOrder(dataSet['test'][i])\n\t\tprint ('start aggregating training set')\n\t\tdataSet['train']=aggregateByOrder(dataSet['train'])\n\t\tgetDataInfor(dataSet)\n\treturn dataSet" }, { "alpha_fraction": 0.6746640801429749, "alphanum_fraction": 0.6804222464561462, "avg_line_length": 30.57575798034668, "blob_id": "e9672400cdb28d6b886f1b3f5cd3aad97a5bf99d", "content_id": "0287efcd3a7ade3667f1050d4fa9d9fda0a662f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1042, "license_type": "no_license", "max_line_length": 73, "num_lines": 33, "path": "/association_rule.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import collections\nimport json\n\ndef findFrequentPairs(data,paremeter):\n\tprint ('Using association rules to find frequent pairs...')\n\tdata=[data[k] for k in data]\n\thashSingle={}\n\thashPair={}\n\thashResult=collections.defaultdict(dict)\n\tfor d in data:\n\t\tfor i in xrange(len(d)):\n\t\t\thashSingle[d[i]]=hashSingle.get(d[i],0)+1\n\tfor k in hashSingle.keys():\n\t\tif hashSingle[k]<paremeter['support']:\n\t\t\tdel hashSingle[k]\n\tfor d in data:\n\n\t\tfor i in xrange(len(d)):\n\t\t\tfor j in xrange(i+1,len(d)):\n\t\t\t\tif d[i] in hashSingle and d[j] in hashSingle:\n\t\t\t\t\thashPair[(d[i],d[j])]=hashPair.get((d[i],d[j]),0)+1\n\tfor p in hashPair:\n\t\tfor i in xrange(len(p)): # trigger\n\t\t\to=abs(len(p)-1-i)\n\t\t\tconf=hashPair[p]/float(hashSingle[p[i]]) \n\t\t\tinte=conf-hashSingle[p[o]]/float(len(data))\n\t\t\tif conf>=paremeter['confidence'] and abs(inte)>=paremeter['interest']:\n\t\t\t\thashResult[p[i]][p[o]]=conf\n\toutput=open(paremeter['outPutFile'],'w')\n\toutput.write(json.dumps(hashResult))\n\toutput.close()\n\tprint ('Frequent Pairs Number:',len(hashResult.keys()))\n\treturn hashResult\n" }, { "alpha_fraction": 0.6399345397949219, "alphanum_fraction": 0.6481178402900696, "avg_line_length": 28.80487823486328, "blob_id": "2e3cbb02079fbde6518643d00d7282bf5a8abe37", "content_id": "a253a68208e531cc18cac7383a5f8a875df83291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1222, "license_type": "no_license", "max_line_length": 132, "num_lines": 41, "path": "/featureExtractor/category_features.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom connection import connect\n\ndef fetch_category(env):\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n category = {}\n uniqId = 0\n sql = \"select distinct category_path from product where category_path <> 'category/materials' or category_path is null\"\n cur.execute(sql)\n row = cur.fetchone()\n while row:\n if row[0] and row[0] not in category:\n category[row[0]] = uniqId\n uniqId += 1\n row = cur.fetchone()\n return category\n\ndef encode_category(category, category_dict):\n\n res = [0 for _ in range(len(category_dict))]\n if not category:\n return res\n \n res[category_dict[category]] = 1 \n return res\n\n\ndef configsku_category_dict(env):\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n sql = \"select configurable_sku, category_path from product where category_path <> 'category/materials' or category_path is null\"\n cur.execute(sql)\n row = cur.fetchone()\n category_dict = fetch_category(env)\n sku_category = dict()\n while row:\n sku_category[row[0]] = encode_category(row[1], category_dict)\n row = cur.fetchone()\n\n return sku_category\n" }, { "alpha_fraction": 0.751483678817749, "alphanum_fraction": 0.751483678817749, "avg_line_length": 43.83333206176758, "blob_id": "b4b5d33df1b119a3c69b3daeede0381276b5dcda", "content_id": "e358249d3091c44df91650f8ff98aa96fc35ae28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "no_license", "max_line_length": 119, "num_lines": 30, "path": "/recommendation_system.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import json\n\nimport data_cleaner\nimport association_rule\nimport basic_rules\nimport collaborative_filtering\nfrom similarityRule import get_similarity_dict\n\nenv=json.loads(open('env.json').read())\nfor k in env:\n\tif str(env[k]).isdigit():\n\t\tenv[k]=int(env[k]) if '.' not in str(env[k]) else float(env[k])\nprint 'running', env['algorithmMethod']\nif env['algorithmMethod'] == 'BasicRules':\n\tdata=basic_rules.findBasicRulesProductSet(env['basicRulesParameter'],env['PostgreSqlConnectParameter'])\n\toutput=open(env['dataAggregateParameter']['dataFilesPath']+env['basicRulesParameter']['rule']+'.json','w')\n\toutput.write(json.dumps(data))\n\toutput.close()\nelif env['algorithmMethod'] == 'ContentBaseSimilarity':\n\titem_similarity_dict = get_similarity_dict(env)\n\tif env['similarityRulesParameter']['save_file_path']:\n with open(env['similarityRulesParameter']['save_file_path'], 'w') as f:\n json.dump(item_similarity_dict, f)\nelse:\n\tdata=data_cleaner.readCleanData(env['dataAggregateParameter'])\n\tprint '\\n'\n\tif env['algorithmMethod']=='AssociationRule':\n\t\tfrequentPairs=association_rule.findFrequentPairs(data['train'],env['associationRulesParemeter'])\n\telif env['algorithmMethod']=='CollaborativeFiltering':\n\t\trecommendProduct=collaborative_filtering.collaborativeFiltering(data['train'],env['collaborativeFilteringParameter'])\n\t\t\n" }, { "alpha_fraction": 0.6233545541763306, "alphanum_fraction": 0.6339702606201172, "avg_line_length": 28.450000762939453, "blob_id": "63c2cfc0975ae0cfbd49c889015cc6609e836021", "content_id": "d012d3f1aa99ebd6e21512fed48a290d61153683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2355, "license_type": "no_license", "max_line_length": 127, "num_lines": 80, "path": "/basic_rules.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\nimport collections\n\ndef findBasicRulesProductSet(env,con):\n\n\tdef connect(params):\n\t conn = None\n\t try:\n\t print('Connecting to the PostgreSQL database...')\n\t conn = psycopg2.connect(**params)\n\t print('Connected...\\n')\n\t cur = conn.cursor()\n\t return cur\n\t except (Exception, psycopg2.DatabaseError) as error:\n\t print(error)\n\n\tdef hash(display_id):\n\t\tif display_id:\n\t\t\texist=False\n\t\t\tfor e in env['hashRule']:\n\t\t\t\tif display_id[0] in e['case']:\n\t\t\t\t\texist=True\n\t\t\t\t\tparams=e[env['rule']]\n\t\t\t\t\tif params[-1]=='e':\n\t\t\t\t\t\tparams[1]=params[1] if params[1]!='e' else len(display_id)\n\t\t\t\t\t\tkey=display_id[params[0]:params[1]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tkey=display_id[0]\n\t\t\t\t\thashMap[key].add(display_id)\n\t\t\t\t\tbreak\n\t\t\tif not exist:\n\t\t\t\texception.add(display_id)\n\tdef pick(display_id):\n\t\texist=False\n\t\tfor e in env['hashRule']:\n\t\t\tif display_id[0] in e['case']:\n\t\t\t\texist=True\n\t\t\t\tparams=e[env['rule']]\n\t\t\t\tif params[-1]=='e':\n\t\t\t\t\tkey=display_id[params[0]:params[1]]\n\t\t\t\t\tif env['prePick'] and e[env['rule']+'PrePick']!=-1:\n\t\t\t\t\t\tresult[display_id]=list(hashMap[key])[:e[env['rule']+'PrePick']] # pick product from start or end can make them different\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult[display_id]=list(hashMap[key]) # pay attention! low copy here!\n\t\t\t\telif params[-1]=='n':\n\t\t\t\t\tkey=display_id[0]\n\t\t\t\t\tcandidate=set()\n\t\t\t\t\tfor d in hashMap[key]:\n\t\t\t\t\t\tif display_id[params[0]:params[1]]!=d[params[0]:params[1]]:\n\t\t\t\t\t\t\tcandidate.add(d)\n\t\t\t\t\tif env['prePick'] and e[env['rule']+'PrePick']!=-1:\n\t\t\t\t\t\tresult[display_id]=list(candidate)[:e[env['rule']+'PrePick']]\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult[display_id]=list(candidate)\n\t\t\t\tbreak\n\t\tif not exist:\n\t\t\tresult[display_id]=[]\n\t\t\tunMatched.add(display_id)\n\t\t\t\n\n\tcur=connect(con)\n\tcur.execute('select configurable_sku from product')\n\tprint ('Calculating',env['rule'])\n\trow = cur.fetchone()\n\thashMap,exception,result,unMatched=collections.defaultdict(set),set(),collections.defaultdict(set),set()\n\tprint ('hashing all product...')\n\twhile row:\n\t\tdisplay_id = row[0]\n\t\thash(display_id)\n\t\trow = cur.fetchone()\n\tprint (len(exception),'exception\\n')\n\tcur.execute('select display_id from display_unit')\n\tprint ('picking related product...')\n\trow = cur.fetchone()\n\twhile row:\n\t\tdisplay_id=row[0]\n\t\tpick(display_id)\n\t\trow=cur.fetchone()\n\tprint (len(unMatched),'unmatched')\n\treturn result" }, { "alpha_fraction": 0.605381190776825, "alphanum_fraction": 0.6188340783119202, "avg_line_length": 21.399999618530273, "blob_id": "da46f2d8f4af0bd87c5cdcca224ab0e976630b8e", "content_id": "faebda55cbf9cd165c031ea60c83b69e0a5cc1e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/featureExtractor/connection.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\n\ndef connect(params):\n conn = None\n try:\n conn = psycopg2.connect(**params)\n return conn\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n conn.close()" }, { "alpha_fraction": 0.6011112928390503, "alphanum_fraction": 0.6151484251022339, "avg_line_length": 40.180721282958984, "blob_id": "3a37f984b31a3ec9c752862f2b8375ae6058d96f", "content_id": "3c2e7354930a8e3012f21d20a035fb6b15ff8f2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6839, "license_type": "no_license", "max_line_length": 109, "num_lines": 166, "path": "/similarityRule.py", "repo_name": "ZihaoZhai/Recommendation_System", "src_encoding": "UTF-8", "text": "import psycopg2\nimport json\nimport numpy as np\nfrom collections import defaultdict\nfrom datetime import datetime\nimport sys\n\nsys.path.append('./featureExtractor/')\nfrom connection import connect\nfrom color_features import configsku_color_dict\nfrom price_features import price_range, add_price\nfrom filterStyle_features import fetch_filterStyle, add_filterStyle\nfrom category_features import configsku_category_dict\n\nclass Item :\n def __init__(self, env, row,\\\n priceInterval, style_dict, sku_color_dict, sku_category_dict):\n self.env = env\n self.row = row\n self.price = float(row[0])\n self.filterStyle = row[1]\n self.category_path = row[2]\n self.configsku = row[3]\n self.sku_color_dict = sku_color_dict\n self.sku_category_dict = sku_category_dict\n if env['similarityRulesParameter']['usePrice']:\n self.price_vector = add_price(self.price, priceInterval)\n \n if env['similarityRulesParameter']['useSimpleColor']:\n self.color_vector = self.sku_color_dict[self.configsku]\n \n if env['similarityRulesParameter']['useFilterStyle']:\n self.style_vector = add_filterStyle(self.filterStyle, style_dict)\n\n if env['similarityRulesParameter']['useCategory']:\n self.category_vector = self.sku_category_dict[self.configsku]\n\n def Jacard_similarity(self, vec1, vec2):\n if vec1 is None or vec2 is None or len(vec1) != len(vec2):\n return 0\n denominator = 0\n for i in range(len(vec1)):\n if vec1[i] == 1 and vec2[i] == 1:\n denominator += 2\n elif vec1[i] == 1 or vec2[i] == 1:\n denominator += 1\n if denominator == 0:\n return 0\n return float(sum([1 for i in range(len(vec1)) if vec1[i] == vec2[i] and vec1[1] == 1])) / denominator\n\n def Cosine_similarity(self, vec1, vec2):\n vec1 = np.array(vec1)\n vec2 = np.array(vec2)\n return 0 if sum(vec1) == 0 or sum(vec2) == 0 else float(vec1.dot(vec2)) / (sum(vec1) * sum(vec2))\n\n def calculate_similarity(self, vec1, vec2):\n if self.env['similarityRulesParameter']['similarity_metric'] == 'cosine':\n return self.Cosine_similarity(vec1, vec2)\n else:\n return self.Jacard_similarity(vec1, vec2)\n\n def price_similarity(self, item):\n return self.calculate_similarity(self.price_vector, item.price_vector)\n\n def color_similarity(self, item):\n return self.calculate_similarity(self.color_vector, item.color_vector)\n\n def style_similarity(self, item):\n return self.calculate_similarity(self.style_vector, item.style_vector)\n\n def category_similarity(self, item):\n return self.calculate_similarity(self.category_vector, item.category_vector)\n\n def subfield_similarity(self, item, start, end):\n intersection = 0\n union = 0\n\n for i in range(start, end):\n if self.row[i] and self.row[i] == item.row[i]:\n intersection += 1\n union += 1\n elif self.row[i] and item.row[i]:\n union += 2\n elif self.row[i] or item.row[i]:\n union += 1\n\n if union == 0:\n return 0\n return float(intersection) / union\n\n def bra_similarity(self, item):\n return self.subfield_similarity(item, 4, 17)\n\n def panty_similarity(self, item):\n return self.subfield_similarity(item, 17, 20)\n\n def lingerie_similarity(self, item):\n return self.subfield_similarity(item, 20, 27)\n\n def beauty_similarity(self, item):\n return self.subfield_similarity(item, 27, 30)\n\n\n def similarity(self, item,env):\n return env['priceWeight'] * self.price_similarity(item) + \\\n env['colorWeight'] * self.color_similarity(item) + \\\n env['styleWeight'] * self.style_similarity(item) + \\\n env['categoryWeight'] * self.category_similarity(item) + \\\n env['braTypeSimilarity'] * self.bra_similarity(item) + \\\n env['pantyTypeWeight'] * self.panty_similarity(item) + \\\n env['lingerieTypeWeight'] * self.lingerie_similarity(item) + \\\n env['beautyTypeSimilarity'] * self.beauty_similarity(item)\n\ndef get_similarity_dict(env):\n print('Connecting to the PostgreSQL database...')\n print('Connected...\\n')\n conn = connect(env['PostgreSqlConnectParameter'])\n cur = conn.cursor()\n sql = \" select price, filterstyle, category_path, configurable_sku, \\\n bra_type, bra_by_function, bra_padding_level, bra_padding_style, \\\n bra_wire_style, bra_strap, bra_wear_style, bra_neck_style, \\\n bra_closure, bra_shape, bra_seam, bra_back_style, \\\n bra_smooth_level, \\\n panty_style, panty_cut, panty_smooth_level, \\\n lingerie_product_type, clothing_by_function, sleeve_length, \\\n pant_length, dress_length, dress_knee_length, collar_shape, \\\n beauty_type, makeup_product_type, skincare_product_type \\\n from product\\\n where category_path <> 'category/materials' or category_path is null\"\n cur.execute(sql)\n item_list = []\n visited = set()\n row = cur.fetchone()\n priceInterval = price_range(env) # transform price into ranges feature\n style_dict = fetch_filterStyle(env) # get {filterStyle : uniqId} dictionary\n sku_color_dict = configsku_color_dict(env) # get {configsku : color_set} dctionary\n sku_category_dict = configsku_category_dict(env) # get {configsku : category} dctionary\n while row:\n configsku = row[3]\n if configsku in visited:\n row = cur.fetchone()\n continue\n item_list.append(Item(env, row,\\\n priceInterval, style_dict, sku_color_dict, sku_category_dict))\n visited.add(configsku)\n row = cur.fetchone()\n \n\n print 'Calculating similarity...'\n start=datetime.now()\n count=0\n item_similarity_dict = defaultdict(dict)\n for i in range(len(item_list)):\n item1 = item_list[i]\n for j in range(i + 1, len(item_list)):\n item2 = item_list[j]\n similarity = item1.similarity(item2,env['similarityRulesParameter'])\n if similarity >= env['similarityRulesParameter']['similarity_threshold']:\n item_similarity_dict[item1.configsku][item2.configsku] = similarity\n item_similarity_dict[item2.configsku][item1.configsku] = similarity\n count+=1\n if count%100==0:\n print int(count/float(len(item_list))*100),'%','finished',datetime.now()-start\n item_similarity_dict = dict(item_similarity_dict)\n conn.close()\n return item_similarity_dict\n\n\n\n" } ]
13
hwang106/flask-retirement
https://github.com/hwang106/flask-retirement
e03ec0f5603f726afa0298ca9840b8b0cb946440
c128a4861c308d1b523d94f29e764cce472b379d
cc506cb1ce24d0febb8e87cb89e2ec557278122b
refs/heads/main
2023-06-26T08:09:05.568857
2021-07-28T16:35:27
2021-07-28T16:35:27
390,395,492
0
0
null
2021-07-28T15:10:51
2021-07-28T13:41:55
2021-07-28T13:41:51
null
[ { "alpha_fraction": 0.4677419364452362, "alphanum_fraction": 0.6935483813285828, "avg_line_length": 14.75, "blob_id": "c51accbf26c3240f00b64a91152aa26375b8ad1f", "content_id": "bec03b5d364ded516dd6aa75c6281bd0cabc278a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 62, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/requirements.txt", "repo_name": "hwang106/flask-retirement", "src_encoding": "UTF-8", "text": "Flask==1.1.2\ngunicorn==20.0.4\npymongo==3.12.0\ndnspython==2.1.0" }, { "alpha_fraction": 0.6800878643989563, "alphanum_fraction": 0.6830161213874817, "avg_line_length": 28.717391967773438, "blob_id": "5b67d519a74e7a49f2f7a201e4a4fc4794f4a070", "content_id": "c35a6ecf8aee168a34d9524353534e389508a652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 116, "num_lines": 46, "path": "/app.py", "repo_name": "hwang106/flask-retirement", "src_encoding": "UTF-8", "text": "# ---- adding test comment3 YOUR APP STARTS HERE ----\n# -- Import section --\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask_pymongo import PyMongo\nfrom flask import redirect\nfrom flask import session, url_for\n# -- Initialization section --\napp = Flask(__name__)\napp.secret_key = '_5#y2L\"F4Q8z\\n\\xec]/'\n# name of database\napp.config['MONGO_DBNAME'] = 'goals'\n# URI of database\napp.config['MONGO_URI'] = 'mongodb+srv://admin:[email protected]/myFirstDatabase?retryWrites=true&w=majority'\nmongo = PyMongo(app)\n# -- Routes section --\[email protected]('/')\[email protected]('/index.html')\ndef index():\n return render_template('index.html')\[email protected]('/smart.html')\ndef smart():\n return render_template('smart.html')\[email protected]('/timeline.html')\ndef timeline():\n return render_template('timeline.html')\[email protected]('/info.html')\ndef info():\n return render_template('info.html')\[email protected]('/about.html')\ndef about():\n return render_template('about.html')\[email protected]('/signlog.html', methods = ['GET', 'POST'])\ndef signlog():\n return render_template('signlog.html')\n# @app.route('/profile.html')\n# def profile():\n\[email protected]('/lists/new', methods = ['GET', 'POST'])\ndef smartgoal():\n if request.method == 'POST':\n new_goal = request.form['new_goal']\n print(new_goal)\n \n return render_template('smart.html')" } ]
2
Allen-Bayern/algo-questions
https://github.com/Allen-Bayern/algo-questions
1dfb2e1ee79617fdd43668b6646b2fce55ba89d4
71ec1685d849b245ab91ef8f34fd23287c75077f
3d680e95be43b2fe1794ea610a8a27e0686ebff5
refs/heads/master
2023-07-12T12:52:21.081499
2021-08-31T13:59:35
2021-08-31T13:59:35
393,436,650
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4535379409790039, "alphanum_fraction": 0.46121057868003845, "avg_line_length": 22.479999542236328, "blob_id": "26c2f835c45655e2ee1d8c75e8fde3f3502466df", "content_id": "8863bd9ee5de33330a791b8980f12a0703485d5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1173, "license_type": "permissive", "max_line_length": 127, "num_lines": 50, "path": "/src/localMax/localMax.cpp", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#include<cstdio>\n#include<vector>\n#include<numeric>\n#include<algorithm>\n#include<cmath>\nusing namespace std;\n\nint solution(vector<int> arr){\n int res = *min_element(arr.begin(), arr.end()) + accumulate(arr.begin(), arr.end(), 0);\n\n for(int len = 1; len < arr.size(); len++){\n if(len == 1){\n for(auto elem : arr){\n int tmp = (int)pow(elem, 2);\n if(tmp > res)\n res = tmp;\n }\n }else{\n int start = 0;\n int terminal = len - 1;\n while(terminal < arr.size()){\n vector<int> localArea(&arr[start], &arr[terminal]);\n int tmp = *min_element(localArea.begin(), localArea.end()) + accumulate(localArea.begin(), localArea.end(), 0);\n if(tmp > res)\n res = tmp;\n ++start;\n ++terminal;\n }\n }\n }\n\n return res;\n}\n\nint main(){\n int len;\n scanf(\"%d\", &len);\n\n vector<int> arr;\n\n for(int i = 0; i < len; ++i){\n int elem;\n scanf(\"%d\", &elem);\n arr.push_back(elem);\n }\n\n printf(\"%d\", solution(arr));\n\n return 0;\n}" }, { "alpha_fraction": 0.4480000138282776, "alphanum_fraction": 0.46933332085609436, "avg_line_length": 22.957447052001953, "blob_id": "65e0ed4d3fa7af1c8d102c2ab61ef6bd09217da8", "content_id": "420e7f3c0922ccea945bd9479e1795c4b3458a7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1391, "license_type": "permissive", "max_line_length": 55, "num_lines": 47, "path": "/src/voucher/voucher_dp1.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n\nfrom sys import maxsize as MAX\n\ndef solution(n, amount, vouchers):\n # n为价钱数\n # vouchers即为代金券面值\n # amount为种类数\n\n # 判空是好习惯。如果是空数组,那么直接返回-1\n # 不过在原题没有太大必要判空\n # if not vouchers:\n # return -1\n \n # 对原数组进行排序,让最小的排前面\n vouchers = sorted(vouchers)\n\n # 申请额外空间,用于存放重复的计算结果\n dp = [MAX for i in range(n + 1)]\n\n # 代码优化\n for voucher in vouchers:\n if voucher <= len(vouchers):\n dp[voucher] = 1\n\n # for i in range(1, n + 1):\n for i in range(n + 1):\n # if (i < vouchers[0]) or (i in vouchers):\n # # 如果商品价值比最小面值的优惠券都小\n # # 或者刚好等于一个面值\n # # 那么一张即可\n # dp[i] = 1\n # else:\n for voucher in vouchers:\n if (i - voucher) > 0:\n dp[i] = min(dp[i], 1 + dp[i - voucher])\n # 代码优化,底下的可以完全不要\n # tmp = 1 + dp[i - voucher]\n # if tmp < dp[i]:\n # dp[i] = tmp\n \n return dp[n]\n\nif __name__ == '__main__':\n vouchers = [50, 30, 20, 5]\n print(solution(65, 4, vouchers))" }, { "alpha_fraction": 0.4035087823867798, "alphanum_fraction": 0.45087718963623047, "avg_line_length": 18.03333282470703, "blob_id": "2e86a5d3fd99a0ffb197cfe0781119fbf93817ed", "content_id": "2fa840ce9dc63c34f6e9205394aac02a830f2c1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "permissive", "max_line_length": 86, "num_lines": 30, "path": "/src/maze/maze_recursion.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef solution(maze):\n # 如果是空迷宫,直接返回0\n if not maze:\n return 0\n \n # 递归停止条件1:只有1行的迷宫\n if len(maze) == 1:\n return sum(maze[0])\n \n # 递归停止条件2:只有1列的迷宫\n if len(maze[0]) == 1:\n res = 0\n for pane in maze:\n res += pane[0]\n \n return res\n \n return maze[0][0] + min(solution(maze[1::]), solution([row[1::] for row in maze]))\n\nif __name__ == '__main__':\n maze = [\n [1, 3, 1],\n [1, 5, 1],\n [4, 2, 1]\n ]\n\n print(solution(maze))" }, { "alpha_fraction": 0.6695652008056641, "alphanum_fraction": 0.695652186870575, "avg_line_length": 15.571428298950195, "blob_id": "d38122d3bc6d91413d09c47005f242f4737955f3", "content_id": "e40aa486d6595e7b1de459aefa407aac6925d305", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "permissive", "max_line_length": 34, "num_lines": 7, "path": "/README.md", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "# README\n\n随手刷题记录本,不定时更新\n\n1. [美团优惠券](src/voucher/voucher.md)\n2. [排序](src/methodsOfSorts)\n3. [走迷宫](src/maze/maze.md)" }, { "alpha_fraction": 0.5289906859397888, "alphanum_fraction": 0.5411596298217773, "avg_line_length": 27.53061294555664, "blob_id": "84cf04375fd508a5be81acd952ab01c3d8678487", "content_id": "dad510539481756f625303b97c44556851ebb11b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1801, "license_type": "permissive", "max_line_length": 85, "num_lines": 49, "path": "/src/localMax/local_max.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom sys import stdin, stdout\n\ndef solution(arr):\n '''\n 公式:result = min(localArea) * sum(localArea)\n\n 局部最大值的思路:\n 1. 局部至少包含1个,至多包含所有。\n 也就是说,假设数组arr的长度为length。那么局部区间localArea的长度localLength必然是1<= localLength <= length。\n 2. 设定两个指针start与terminal,满足start + localLength - 1 = terminal。\n 然后按上一步推导,求每个满足条件的localLength下的局部区间localArea的result。\n 3. 不妨先给result设定一个初值:result = min(arr) * sum(arr),也就是全局情况下是多少。然后执行第2步。\n 如果局部的result一旦大于初值,就更新。如此一来便求得。\n '''\n\n # initialize result\n res = min(arr) * sum(arr)\n\n # 用for循环执行第二步\n for length in range(1, len(arr)):\n if length == 1:\n # 如果是仅有一个变量的区间,那么公式实质上变为该数自身的平方\n for elem in arr:\n # 用临时变量捕获计算结果\n temp = elem ** 2\n if temp > res:\n res = temp\n else:\n # 从第一个位置遍历起\n start = 0\n terminal = length - 1\n while terminal < len(arr):\n localArea = arr[start: terminal + 1 :]\n temp = min(localArea) * sum(localArea)\n if temp > res:\n res = temp\n start += 1\n terminal += 1\n \n return res\n\nif __name__ == '__main__':\n length = int(stdin.readline().strip())\n arr = map(int, stdin.readline().strip().split())\n\n stdout.write(\"%d\"%(solution(arr)))" }, { "alpha_fraction": 0.4815642535686493, "alphanum_fraction": 0.5240223407745361, "avg_line_length": 17.66666603088379, "blob_id": "dd367c4e0935305b7619d5e957d27d82fa1afb59", "content_id": "53776fa1bd950fb5c3bb94f8a8194ec6b6340939", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1171, "license_type": "permissive", "max_line_length": 52, "num_lines": 48, "path": "/src/voucher/voucher.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n\nfrom sys import maxsize as MAX\n\ndef solution(n, l, vouchers):\n # n为价钱数\n # vouchers即为代金券面值\n # l为种类数\n\n # 判空是好习惯。如果是空数组,那么直接返回-1不好么\n if not vouchers:\n return -1\n \n # 对原数组进行排序,让最大的排前面\n vouchers = sorted(vouchers, reverse = True)\n\n # 边界条件1:如果商品价格比最小面值优惠券还要小的话,那就直接等于1张\n if n <= vouchers[-1]:\n return 1\n \n # 边界条件2:如果商品价格和面值刚好相等,那么直接一张\n for voucher in vouchers:\n if n == voucher:\n return 1\n \n # 递推公式\n '''\n 65的解决方式是这样的:\n * 50 + 15\n * 30 + 35\n * 20 + 45\n * 5 + 60\n\n 四个当中找最小\n '''\n res = MAX # 要足够大\n for voucher in vouchers:\n # 递归\n tmp = 1 + solution(n - voucher, l, vouchers)\n if tmp < res:\n res = tmp\n \n return res\n\nif __name__ == '__main__':\n vouchers = [50, 30, 20, 5]\n print(solution(65, 4, vouchers))" }, { "alpha_fraction": 0.40414509177207947, "alphanum_fraction": 0.4455958604812622, "avg_line_length": 19.342105865478516, "blob_id": "9ed2bb73a78b5a16cde955e02128861c868b6ebf", "content_id": "2f61ae437d97d55299401ee843bd512edb72548f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 772, "license_type": "permissive", "max_line_length": 71, "num_lines": 38, "path": "/src/maze/mazeDP.cpp", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#include<vector>\n#include<algorithm>\n#include<cstdio>\nusing namespace std;\n\nint solution(vector<vector<int>> &maze){\n const int m = maze.size();\n const int n = maze[0].size();\n\n vector<vector<int>> aux(m, vector<int>(n, 0));\n\n aux[0][0] = maze[0][0];\n\n for(int i = 1; i < m; i++)\n aux[i][0] = aux[i - 1][0] + maze[i][0];\n \n for(int j = 1; j < n; j++)\n aux[0][j] = aux[0][j - 1] + maze[0][j];\n \n for(int i = 1; i < m; i++){\n for(int j = 1; j < n; j++)\n aux[i][j] = maze[i][j] + min(aux[i - 1][j], aux[i][j - 1]);\n }\n\n return aux[m - 1][n - 1];\n}\n\nint main(){\n\n vector<vector<int>> maze = {\n {1, 3, 1},\n {1, 5, 1},\n {4, 2, 1}\n };\n\n printf(\"%d\\n\", solution(maze));\n return 0;\n}" }, { "alpha_fraction": 0.5073327422142029, "alphanum_fraction": 0.5549954175949097, "avg_line_length": 16.325397491455078, "blob_id": "78fd00616757c5e396d5196cb4a4e75fc3664080", "content_id": "6b697cb17d98bc8112d4ff1fec436d53ddea21bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3248, "license_type": "permissive", "max_line_length": 86, "num_lines": 126, "path": "/src/maze/maze.md", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "# 美团走迷宫题 \n\n> 链接:https://www.nowcoder.com/questionTerminal/2a38bd658ba04a1dbd006107c51ff14e\n> 来源:牛客网\n\n给定一个包含非负整数的 M x N 迷宫,请找出一条从左上角到右下角的路径,使得路径上的数字总和最小。每次只能向下或者向右移动一步。\n\n\n输入描述:\n第一行包含两个整数M和N,以空格隔开,1≤N≤10,1≤N≤10。接下来的M行中,每行包含N个数字 。\n\n输出描述:\n找出总和最小的路径,输出路径上的数字总和。\n\n示例1\n\n输入\n```Python\n3 3\n1 3 1\n1 5 1\n4 2 1\n```\n\n输出\n```Python\n7\n```\n\n## 思路解析\n\n仔细分析一番便可看出,此题可以**递归**求解。思路如下:\n\n* 只要能拆成子问题的题目就可以用递归。在本题中,子问题是如果迷宫只有一行或者只有一列,该怎么走?\n* 只有一列的情况:有n行,即数组长度为n。但这n行的子数组每个只有一个元素。只需要将每一行的元素相加即可;\n* 只有一行的情况:将数组内唯一一个子数组中所有元素相加即可。\n* 递归写法:只需要从`maze[0][0]`出发,看是走向右一步的子数组值更小,还是向下一步的值更小。\n\n代码如下:\n\n```Python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef solution(maze):\n # 如果是空迷宫,直接返回0\n if not maze:\n return 0\n \n # 递归停止条件1:只有1行的迷宫\n if len(maze) == 1:\n return sum(maze[0])\n \n # 递归停止条件2:只有1列的迷宫\n if len(maze[0]) == 1:\n res = 0\n for pane in maze:\n res += pane[0]\n \n return res\n \n return maze[0][0] + min(solution(maze[1::]), solution([row[1::] for row in maze]))\n\nif __name__ == '__main__':\n maze = [\n [1, 3, 1],\n [1, 5, 1],\n [4, 2, 1]\n ]\n\n # expected output: 7\n print(solution(maze))\n```\n\n只不过递归**一定会超时**。\n\n## 上述代码优化\n\n用一个额外空间,存储从`maze[0][0]`出发到每个格子的最小数值:\n* 设`maze`本身是一个M * N的数组。那么申请一个同样大小的`aux`数组。\n* `aux`第一行的第一列值的求法和上面递归法是一样的。\n* 将上面的递归式改为状态转移方程:即,从上面过来最小还是从左边过来最小。这样一来便保证每一格存的都是最优解。\n* 将`aux[m-1][n-1]`作为返回值。\n\n代码如下:\n\n```C++\n#include<vector>\n#include<algorithm>\n#include<cstdio>\nusing namespace std;\n\nint solution(vector<vector<int>> &maze){\n const int m = maze.size();\n const int n = maze[0].size();\n\n vector<vector<int>> aux(m, vector<int>(n, 0));\n\n aux[0][0] = maze[0][0];\n\n for(int i = 1; i < m; i++)\n aux[i][0] = aux[i - 1][0] + maze[i][0];\n \n for(int j = 1; j < n; j++)\n aux[0][j] = aux[0][j - 1] + maze[0][j];\n \n for(int i = 1; i < m; i++){\n for(int j = 1; j < n; j++)\n aux[i][j] = maze[i][j] + min(aux[i - 1][j], aux[i][j - 1]);\n }\n\n return aux[m - 1][n - 1];\n}\n\nint main(){\n\n vector<vector<int>> maze = {\n {1, 3, 1},\n {1, 5, 1},\n {4, 2, 1}\n };\n\n printf(\"%d\\n\", solution(maze));\n return 0;\n}\n```" }, { "alpha_fraction": 0.4569620192050934, "alphanum_fraction": 0.4949367046356201, "avg_line_length": 20.37837791442871, "blob_id": "24561eec45e7b150f9a4698a2f7d7c0b0861d30e", "content_id": "d650bbc532585452950f57d104ff1a2c45e37d7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "permissive", "max_line_length": 51, "num_lines": 37, "path": "/src/methodsOfSorts/heap_v1.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef heapify(arr, i):\n largest = i\n\n left = 2 * i + 1\n right = 2 * (i + 1)\n\n if left < arrL and arr[left] > arr[largest]:\n largest = left \n \n if right < arrL and arr[right] > arr[largest]:\n largest = right \n \n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n heapify(arr, largest)\n\ndef build(arr):\n for i in range(len(arr) // 2, -1, -1):\n heapify(arr, i)\n\ndef heapsort(arr):\n global arrL \n arrL = len(arr)\n build(arr)\n for i in range(len(arr) - 1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i]\n arrL -= 1\n heapify(arr, 0)\n \n return arr \n\nif __name__ == '__main__':\n arr = [5, 13, 2, 25, 7, -66, 9, 12, 15]\n print(heapsort(arr))" }, { "alpha_fraction": 0.5232936143875122, "alphanum_fraction": 0.5276272892951965, "avg_line_length": 26.176469802856445, "blob_id": "781d33802751968803c77c3e6498e84f957c5099", "content_id": "b4534cf783b5e9456c1c82ffad69c1a16ba7109a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 981, "license_type": "permissive", "max_line_length": 49, "num_lines": 34, "path": "/src/methodsOfSorts/Tree.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 树节点类\nclass TreeNode:\n def __init__(self, value):\n self.value = value \n self.left = None \n self.right = None\n\n# 树类\nclass Tree:\n def __init__(self):\n self.root = None \n self.nodes = list() # 用于存放节点地址\n \n def add_single(self, data):\n if self.root is None:\n self.root = TreeNode(data)\n self.nodes.append(self.root)\n else:\n rootNode = self.nodes[0]\n if rootNode.left is None:\n rootNode.left = TreeNode(data)\n self.nodes.append(rootNode.left)\n elif rootNode.right is None:\n rootNode.right = TreeNode(data)\n self.nodes.append(rootNode.right)\n self.nodes.pop(0) # 弹出nodes第一个元素\n \n # 有点jQuery的封装风格了\n def add_array(self, arr):\n for elem in arr:\n self.add_single(elem)" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.5436363816261292, "avg_line_length": 21.95833396911621, "blob_id": "e7d3138b066e6312e14e9e60807badb759bff7ca", "content_id": "49d9d1457ac1f631f6a2ba79c007e2b9bde428e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "permissive", "max_line_length": 57, "num_lines": 24, "path": "/src/methodsOfSorts/quick.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef quick_sort(arr):\n if (not arr) or len(arr) == 1:\n # 如果arr是空数组或单元素数组,那么直接return它自身即可\n return arr\n\n # 选中基准点为最右一位\n pivot = arr[-1]\n # 建立空数组left和right\n left, right = list(), list()\n\n for elem in arr[:len(arr) - 1:]:\n # 像上面这样写可以保证不遍历到最后一位\n if elem <= pivot:\n left.append(elem)\n else:\n right.append(elem)\n\n return quick_sort(left) + [pivot] + quick_sort(right)\n\nif __name__ == '__main__':\n\tprint(quick_sort([2,1,5,3,8,4,9,5]))" }, { "alpha_fraction": 0.5416207313537598, "alphanum_fraction": 0.5865490436553955, "avg_line_length": 20.730539321899414, "blob_id": "2d2b5bf59eb7199d0e28d3e4f9cea5ef7723bf46", "content_id": "806a26948305e24b57678bcf9f61345644ef2568", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5716, "license_type": "permissive", "max_line_length": 263, "num_lines": 167, "path": "/src/voucher/voucher.md", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "> 链接:https://www.nowcoder.com/questionTerminal/5d2405da8d364eafbaca1de9bc2a0d4e\n> \n> 来源:牛客网\n> \n> 近期某商场由于周年庆,开启了“0元购”活动。活动中,消费者可以通过组合手中的代金券,实现0元购买指定商品。聪明的小团想要用算法来帮助他快速计算:对于指定价格的商品,使用代金券凑出其价格即可,但所使用的代金券总面额不可超过商品价格。由于代金券数量有限,使用较少的代金券张数则可以实现价值最大化,即最佳优惠。假设现有100元的商品,而代金券有50元、30元、20元、5元四种,则最佳优惠是两张50元面额的代金券;而如果现有65元的商品,则最佳优惠是两张30元代金券以及一张5元代金券。请你帮助小团使用一段代码来实现代金券计算。\n> \n> \n> 输入描述:\n> \n> 多组输入输出,读到s=0时结束\n> 输入可以有多个测试样例,每个测试由两行组成。\n> 其中第一行包含一个整数P,表示商品的价格,1≤P≤10000;输入P为0时表示结束。\n> 第二行包含若干整数,使用空格分割。其中第一个整数N(1≤N≤20)表示有多少种代金券,其后跟随M个整数,表示手中持有的代金券面额(1≤N≤1000),每种代金券数量不限。\n> \n> 输出描述:\n> \n> 找到最少张数的代金券,使其面额恰好等于商品价格。输出所使用的代金券数量;\n> 如果有多个最优解,只输出其中一种即可;\n> 如果无解,则需输出“Impossible”。\n\n分析:\n\n这是个**完全背包**问题。\n\n先穷举一把,以65元为例。\n\n* 情况1:先拿最大的50元。然后5+5+5。4张\n* 情况2:先拿30元。30+30+5,3张。\n* 情况3:先拿20元,20+20+20+5,4张;\n* 情况4:只拿5元,13张。\n\n这四种情况下,每次拿一张之后,都去找剩下的钱可以拿到最大的是哪个。那么,65元可以拆解如下:\n* 65 = 50 + 15\n* 65 = 30 + 35\n* 65 = 20 + 45\n* 65 = 5 + 60\n\n然后比哪个是最小值。写Python代码如下:\n\n代码有误,需要修改\n\n```Python\nfrom sys import maxsize as MAX\n\ndef solution(n, amount, vouchers):\n # n为价钱数\n # vouchers即为代金券面值\n # amount为种类数\n\n # 判空是好习惯。如果是空数组,那么直接返回-1不好么\n if not vouchers:\n return -1\n \n # 对原数组进行排序\n vouchers = sorted(vouchers)\n\n # 边界条件1:如果商品价格比最小面值优惠券还要小的话,那就直接等于1张\n if n <= vouchers[0]:\n return 1\n \n # 边界条件2:如果商品价格和面值刚好相等,那么直接一张\n for voucher in vouchers:\n if n == voucher:\n return 1\n \n # 递推公式\n '''\n 65的解决方式是这样的:\n * 50 + 15\n * 30 + 35\n * 20 + 45\n * 5 + 60\n\n 四个当中找最小\n '''\n res = MAX # 要足够大\n for voucher in vouchers:\n # 递归\n if n >= voucher:\n tmp = 1 + solution(n - voucher, amount vouchers)\n if tmp < res:\n res = tmp\n \n return res\n```\n\n但是递归会导致大量重复计算,数据量一上来只有等着爆栈的份儿。所以,如果能用额外空间存起来已经计算的结果呢?\n\n于是,动态规划第一步:**以空间换时间**,在《算法导论》中叫做**time-memory trade-off**。实际操作中,既要时间度低又要空间复杂度低的情况几乎不存在,要么时转空要么空转时。\n\n动态第一式:\n\n```Python\nfrom sys import maxsize as MAX\n\ndef solution(n, amount, vouchers):\n # n为价钱数\n # vouchers即为代金券面值\n # amount为种类数\n\n # 判空是好习惯。如果是空数组,那么直接返回-1\n if not vouchers:\n return -1\n \n # 对原数组进行排序,让最小的排前面\n vouchers = sorted(vouchers)\n\n # 申请额外空间,用于存放重复的计算结果\n dp = [MAX for i in range(n + 1)]\n dp[0] = -1\n for i in range(1, n + 1):\n if (i < vouchers[0]) or (i in vouchers):\n # 如果商品价值比最小面值的优惠券都小\n # 或者刚好等于一个面值\n # 那么一张即可\n dp[i] = 1\n else:\n for voucher in vouchers:\n if (i - voucher) > 0:\n tmp = 1 + dp[i - voucher]\n if tmp < dp[i]:\n dp[i] = tmp\n \n return dp[n]\n```\n\n最终题解:\n\n因为牛客网要自己写输入输出,且不能小于5的抵5,所以最终提交版本如下(于2021.8.7优化代码):\n\n```Python\n#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n\nfrom sys import stdin, stdout\nfrom sys import maxsize as MAX\n\ndef solution(price, vouchers):\n # 根据题意,无需判空。故没有判空\n vouchers = sorted(vouchers)\n \n # 动态规划数组\n dp = [MAX for i in range(price + 1)]\n for voucher in vouchers:\n if voucher <= price:\n dp[voucher] = 1\n \n for i in range(price + 1):\n for voucher in vouchers:\n if (i - voucher) > 0:\n dp[i] = min(dp[i], 1 + dp[i - voucher])\n \n return dp[price]\n\nif __name__ == '__main__':\n while price := int(stdin.readline().strip()): # 本行在Python 3.8以后可用。:=(冒等运算符)\n if not price: # 输0就跳出\n break\n vouchers = list(map(int, stdin.readline().strip().split()))\n \n res = solution(price, vouchers[1::])\n \n if res == MAX:\n stdout.write('Impossible\\n')\n else:\n stdout.write('%d\\n'%res)\n```" }, { "alpha_fraction": 0.26923078298568726, "alphanum_fraction": 0.29890111088752747, "avg_line_length": 21.219512939453125, "blob_id": "917bb8dbe5bef57c6450def46e84d5ac8b421180", "content_id": "5c6fdfdf4b22e18e33c1a21082209f782a1bf0fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 946, "license_type": "permissive", "max_line_length": 61, "num_lines": 41, "path": "/src/voucher/voucher.cpp", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#include <iostream>\nusing namespace std;\n\nint n, m, dp[100001], temp, a[20];\n\nint main(){\n // 首先输入商品价格。 cin >> n是用来干这件事的。\n while(cin >> n){\n for(int i = 0; i < 100001; i++)\n dp[i] = -1;\n if(n == 0)\n break;\n \n cin >> m;\n\n for(int i = 0; i < m; i++){\n cin >> temp;\n a[i] = temp;\n if(temp <= n)\n dp[temp] = 1;\n }\n\n for(int i = 0; i <= n; i++){\n for(int j = 0; j < m; j++){\n if(a[j] < i && dp[i - a[j]] != -1){\n if(dp[i] == -1)\n dp[i] = dp[i - a[j]] + 1;\n else\n dp[i] = min(dp[i], dp[i - a[j]] + 1);\n }\n }\n }\n if(dp[n] == -1)\n cout << \"Impossible\" << endl;\n else\n cout << dp[n] << endl;\n\n }\n\n return 0;\n}" }, { "alpha_fraction": 0.4986376166343689, "alphanum_fraction": 0.5075904726982117, "avg_line_length": 25.224489212036133, "blob_id": "8293fd4ec8dbe97f454a2e335fa9ce0a650a8a41", "content_id": "c856d2b074c73930da7be7dbd32b6add22218fe7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3055, "license_type": "permissive", "max_line_length": 56, "num_lines": 98, "path": "/src/methodsOfSorts/heap_v2.py", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 树节点类\nclass TreeNode:\n def __init__(self, value):\n self.value = value \n self.left = None \n self.right = None\n\n# 树类\nclass Tree:\n def __init__(self):\n self.root = None \n self.nodes = list() # 用于存放节点地址\n \n def add_single(self, data): # 添加单个节点\n if self.root is None:\n self.root = TreeNode(data)\n self.nodes.append(self.root)\n else:\n rootNode = self.nodes[0]\n if rootNode.left is None:\n rootNode.left = TreeNode(data)\n self.nodes.append(rootNode.left)\n elif rootNode.right is None:\n rootNode.right = TreeNode(data)\n self.nodes.append(rootNode.right)\n self.nodes.pop(0) # 弹出nodes第一个元素\n \n # 添加整个数组\n def add_array(self, arr):\n for elem in arr:\n self.add_single(elem)\n \n # 层序遍历,将二叉树还原为数组\n def level_order(self):\n if not self.root:\n return list()\n \n res = list()\n\n def DFS(node, dph = 0):\n if node:\n while len(res) <= dph:\n res.append(list())\n res[dph].append(node.value)\n DFS(node.left, dph + 1)\n DFS(node.right, dph + 1)\n \n DFS(self.root)\n res_val = list()\n for elem in res:\n res_val.append(elem[0].value)\n return res_val\n\n# 从上面的树类派生一个堆类\nclass Heap(Tree):\n def __init__(self, is_max_heap = True): # 不必改变父类构造方法\n super().__init__()\n # 增加is_max_heap形参,以判定是生成最大堆还是最小堆。默认生成最大堆\n self.is_max_heap = is_max_heap\n \n # 递归重写父类的add_array方法。默认生成最大堆\n def add_array(self, arr):\n # 停止条件:如果数组为空,结束方法,并返回是否为最大堆\n if not arr:\n return self.is_max_heap\n \n # 找到数组中的最大值,并将其转化为节点(TreeNode类)\n if self.is_max_heap:\n val = max(arr)\n else:\n val = min(arr)\n node_ = TreeNode(val)\n\n # 调用父类的add_single()方法将其插入树中\n super().add_single(node_)\n # 删除arr中的最大值\n arr.remove(val)\n # 代入新数组,递归执行add_array方法\n self.add_array(arr)\n \n # 堆排序\n def heapsort(self):\n # 根据初始化中指定的种类是否需要反转。默认为是\n # 最终输出从小到大的数组\n if self.is_max_heap:\n return super().level_order()[::-1]\n return super().level_order()\n\nif __name__ == '__main__':\n arr = [5, 13, 2, 25, 7, -66, 9, 12, 15]\n new_heap = Heap()\n new_heap.add_array(arr)\n\n # 通过层序遍历的方法将Heap类输出出来\n print(new_heap.heapsort())" }, { "alpha_fraction": 0.5032397508621216, "alphanum_fraction": 0.5280777812004089, "avg_line_length": 19.600000381469727, "blob_id": "127ee669a44165de6bb8726caa1c5afe3f0d0cd1", "content_id": "341deb88cd2ef7d6d15b9bc490c637f969d963b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 966, "license_type": "permissive", "max_line_length": 67, "num_lines": 45, "path": "/src/maze/mazeRecursion.cpp", "repo_name": "Allen-Bayern/algo-questions", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <cstdio>\n#include <numeric>\n#include <algorithm>\nusing namespace std;\n\nint solution(vector<vector<int>> maze){\n if(maze.empty())\n return 0;\n\n if(maze.size() == 1)\n return accumulate(maze[0].begin(), maze[0].end(), 0);\n \n if(maze[0].size() == 1){\n auto res = 0;\n for(vector<int> pane : maze)\n res += pane[0];\n \n return res;\n }\n\n // 向右访问之后的子向量\n vector<vector<int>> rightSub;\n for(vector<int> row : maze){\n vector<int> everyRow {row.begin() + 1, row.end()};\n rightSub.push_back(everyRow);\n }\n\n // 向下访问之后的子向量\n vector<vector<int>> downSub {maze.begin() + 1, maze.end()};\n\n return maze[0][0] + min(solution(downSub), solution(rightSub));\n}\n\nint main(){\n\n vector<vector<int>> maze = {\n {1, 3, 1},\n {1, 5, 1},\n {4, 2, 1}\n };\n\n printf(\"%d\\n\", solution(maze));\n return 0;\n}" } ]
15
yl2526/email_download
https://github.com/yl2526/email_download
f311054db775f93cf8522096efa141658a2eff46
fe8b2ce088a17b27007952cd09cd464f562d11f6
82e1de0ffbb26c91c92232b39aa690d23e348333
refs/heads/master
2020-12-24T18:32:23.528832
2016-04-27T19:45:35
2016-04-27T19:45:35
57,073,851
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5366785526275635, "alphanum_fraction": 0.5415817499160767, "avg_line_length": 48.8248405456543, "blob_id": "af7bcb959ae24c02a137059ff5cff9aa08eff2d1", "content_id": "1fffbaa6d4468c3895143140aa79d0c03733287f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15704, "license_type": "no_license", "max_line_length": 146, "num_lines": 314, "path": "/downloader.py", "repo_name": "yl2526/email_download", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 03 13:33:49 2015\n\n@author: yliu\n\nThis scrit is to create a email downloader object\n\nThe object will create a imap connection. Then you can pass the list of attachmet \nor certaim type of string to download.\n\n\"\"\"\n\nimport os\nimport getpass\nimport imaplib\nimport email\nimport re\n\nclass downloader:\n '''\n class for email attachment downloading\n '''\n def __init__(self, userName = None, passWord = None, server = 'imap.gmail.com', folder = 'Inbox', search = 'All', target = 'All'):\n '''\n initialize downloader with following:\n server: default to imap server address \n username: default to None, no '@somethong.com' part\n passowrd: default to None\n login_now: default to True, is or not login immediately\n search: search rules default to 'ALL' if '' pass\n target: target attachment list if default to None, all attachment will be downloaded\n folder: folder of email to look\n \n returns a downloader with follosing attributes:\n _server: imap server address to connect\n _username: username of the individual email\n _imap: imap connection to the server\n _search: current search rules, default to 'All'\n _target: list of dicts, each have subject, attachment keys to store names, \n default string 'All' will download all attachments \n _folder: name of folder to select\n '''\n self._server = server\n self._imap = imaplib.IMAP4_SSL(server)\n if userName == None:\n self._username = raw_input('Enter your username:')\n else:\n self._username = userName \n if passWord == None:\n passWord = getpass.getpass('Enter your password: ')\n login_result = self._imap.login(self._username, passWord)\n assert login_result[0] == 'OK', 'unable to login for user {0}, at server {1}'.format(self._userName, self._server)\n self._search = search\n self._target = target\n self._folder = folder\n result, _ = self._imap.select(folder)\n assert (result == 'OK'), 'unable to select the {0} folder'.format(folder)\n \n def __repr__(self):\n '''\n the general representaiton\n '''\n return \"User: {0} Server: {1} Folder: {2}\".format(self._username, self._server, self._folder)\n\n def __str__(self):\n '''\n generate string description for downloader.\n '''\n description = 'This downloader connects to server: {0}\\n'.format(self._server)\n description += 'Currently loged in as {0}\\n'.format(self._username)\n description += 'Currently slecting the {0} folder\\n'.format(self._folder)\n description += 'Restricted by search rules: {0}\\n'.format(self._search)\n if self._target == 'All':\n description += 'Targeting all attachments'\n else:\n if self._target:\n description += 'With following specific target\\n {0}'.format([str(target) for target in self._target])\n else:\n description += 'With no target\\n'\n return description\n \n def close(self):\n '''\n logout and close the imap connection\n '''\n self._imap.close()\n self._imap.logout()\n self._imap = None\n \n \n def changeFolder(self, folder, reconnect = True):\n '''\n change the folder of connection\n in case failed to connect to the new folder, \n if reconnect is true, it will try to reselect the old folder\n '''\n result, detail = self._imap.select(folder)\n if result == 'OK':\n self._folder = folder\n else:\n print 'unable to select the {0} folder\\nError: {1}'.format(folder, detail)\n if reconnect:\n result, _ = self._imap.select(self._folder)\n assert result == 'OK', 'unable to reselect old {0} folder'.format(self._folder)\n print 'reconnected to {0} folder'.format(self._folder)\n \n def search(self, keyWord = '', gmail = None):\n '''\n If gmail is true, it will use a gmail_search instead of simple search.\n This gmail_search will behave like the search in the web side.\n If gmail is false, keyWord much be a search string.\n '(FROM \"Sender Name\")' '(Unseen)' 'CC \"CC Name\")' \n 'Body \"word in body\"' '(Since \"date only string\")'\n https://tools.ietf.org/html/rfc3501.html#section-6.4.4\n If gmail is None, it will try to check if server is imap.gmail.com.\n return a list of mail id, larger id is for newer email\n '''\n self._search = keyWord\n if gmail is None:\n if self._server == 'imap.gmail.com':\n gmail = True\n else:\n gmail = False\n if gmail:\n result, emailIndex = self._imap.gmail_search(None, self._search)\n else:\n result, emailIndex = self._imap.search(None, self._search)\n assert result == 'OK', 'unable to search for {0}'.format(self._search)\n return [id for id in reversed(emailIndex[0].split())]\n \n def addTarget(self, attachment = None, subject = None, target = None, renew = False):\n '''\n update target\n if renew is false the target will be added to the existings target or initilize a new one\n if renew is true thea target will alwasy initilize to a new one\n subject is name for the email subject\n '''\n if isinstance(target, list):\n target = target\n else:\n if isinstance(attachment, str):\n target = [ {'subject': subject, 'attachment': attachment} ]\n elif isinstance(attachment, list):\n if subject == None:\n target = [ {'subject': subject, 'attachment': att} for att in attachment]\n else:\n target = [ {'subject': sub, 'attachment': att} for sub, att in zip(subject, attachment)]\n assert isinstance(target, list), ' Target should be All, a string or list of attachment names and target_subject should be corresponding!'\n \n if renew | (self._target == 'All'):\n self._target = target\n else:\n \n self._target.extend(target)\n \n def isTarget(self, email_attachment, email_subject):\n '''\n To judge if certain email is target or not based on its attachment or subject name\n if attavhment or subject name is None, then those part will be ignored\n '''\n if self._target == 'All':\n return True\n for tar in self._target:\n sub = tar['subject']\n att = tar['attachment']\n if ((sub == None) | (sub == email_subject)) & ((att == None) | (att == email_attachment)):\n self.removeTarget(target = {'subject': sub, 'attachment': att})\n return True \n return False\n \n def removeTarget(self, target):\n '''\n remove a found attachment\n '''\n self._target.remove(target)\n \n def isEmptyTarget(self):\n '''\n check if target is empty or not\n '''\n return not bool(self._target) \n \n def download(self, emailIndex, target = '', fetchLimit = 500, appendSubject = False):\n '''\n emailIndex: iterable of all email id to be fetched\n target: target file name list including extension\n fetchLimit: maximum number of email to fetch\n '''\n print '******************************************\\nBegin\\n******************************************'\n if target:\n self._target = target\n print 'Target updated to {0}'.format(target) \n # if attachments folder is not in the file directiory, a empty folder will be created\n baseDir = '.'\n if 'attachments' not in os.listdir(baseDir):\n os.mkdir('attachments')\n print 'made the new attachments folder'\n for fetched, index in enumerate(emailIndex):\n if fetched >= fetchLimit:\n print '******************************************\\nFetch Limit {0} Reached\\n'.format(fetchLimit)\n break\n if self.isEmptyTarget():\n print '******************************************\\nNo More Target\\n'\n break\n # the emailPackage contains email itself and some other meta data about it\n result, emailPackage = self._imap.fetch(index, '(RFC822)')\n assert result == 'OK', 'unable to fetch email with index {0}'.format(index)\n print '\\nemail {0} fetched'.format(index)\n # the emailAll contains sll and different type of elements for a email\n # itself is a huge string will some filed in it\n # email.walk() will be the way to look the element in emailAll\n # email.get_content_maintype() can get content type\n # first a coupl eis usually of multipart type, which contians further subject, body and attachment, \n # seemingly one multipart for each level of email, if the email has been forwarded or replyed,\n # it will has corresponding number of multipart\n # then the text element, which is usually the message body of the email\n # then the attachments\n # email.get('Content-Disposition') will get description for the part of email\n # description is always None fo rmultipart and text\n emailAll = email.message_from_string(emailPackage[0][1])\n for part in emailAll.walk():\n if (part.get_content_maintype() == 'multipart') | (part.get('Content-Disposition') == None):\n continue\n # don't be confused be the name of variable, there are possibility it is a None and not a attachment\n attachmentName = part.get_filename() # with extension\n if attachmentName:\n attachmentName = attachmentName.replace('\\r', '').replace('\\n', '')\n subjectName = emailAll['subject'].replace('\\r', '').replace('\\n', '')\n if self.isTarget(email_attachment = attachmentName, email_subject = subjectName):\n print '{0} found at email {1} ({2})\\n'.format(attachmentName, index, subjectName)\n if appendSubject:\n newName = subjectName.replace(':', '').replace('.', '') + ' ' + attachmentName\n print '{0} was renamed to {1}!!\\n'.format(attachmentName, newName)\n attachmentName = newName\n filePath = os.path.join(baseDir, 'attachments', attachmentName)\n if not os.path.isfile(filePath):\n fp = open(filePath, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n else:\n print '{0} already exist!!\\n'.format(attachmentName)\n os.remove(filePath)\n fp = open(filePath, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n print '{0} was replaced!!\\n'.format(attachmentName)\n\n print 'email {0} ({1}) processed\\n'.format(index, emailAll['subject'])\n print 'End\\n******************************************\\n'\n\n @staticmethod\n def extractPhrase(urlfile, pattern = 'url'):\n '''\n this functino will extract the phrase using the regex pattern,\n url and email could be two commonn pattern, \n otherwise, just pass the pattern string directly not the re compiled object\n '''\n assert isinstance(pattern, str), 'wrong pattern type, should be str'\n common_patterns = {'url': r'(https|http|ftp|www)(://|.)([\\w.,@?![\\]^=%&:/~+#-]*[\\w@?![\\]^=%&/~+#-])+', \n 'email': r'([\\w+_.]+@[\\w-]+\\.[\\w.-]+)'}\n if pattern in common_patterns:\n pattern = common_patterns[pattern]\n groups = re.findall(pattern, urlfile, flags = re.MULTILINE)\n phrases = [''.join(group) for group in groups]\n return phrases\n \n def downloadPhrase(self, emailIndex, pattern = 'url', fetchLimit = 500, appendSubject = False):\n '''\n emailIndex: iterable of all email id to be fetched\n target: target file name list including extension\n fetchLimit: maximum number of email to fetch\n '''\n print '******************************************\\nBegin\\n******************************************'\n phraseFileName = 'Phrases List'\n if os.path.isfile(os.path.join('.', phraseFileName + '.txt')):\n index = 1\n while os.path.isfile(os.path.join('.', phraseFileName + ' ' + str(index) + '.txt')):\n index += 1\n phraseFileName = phraseFileName + ' ' + str(index)\n fp = open(os.path.join('.', phraseFileName + '.txt'), 'wb')\n fp.write('*********************\\r\\n\\r\\nALL Phrases List ({0})\\r\\n*********************\\r\\n\\r\\n'.format(self._search))\n for fetched, index in enumerate(emailIndex):\n if fetched >= fetchLimit:\n print '******************************************\\nFetch Limit {0} Reached\\n'.format(fetchLimit)\n break\n # the emailPackage contains email itself and some other meta data about it\n result, emailPackage = self._imap.fetch(index, '(RFC822)')\n assert result == 'OK', 'unable to fetch email with index {0}'.format(index)\n print '\\nemail {0} fetched'.format(index)\n # the emailAll contains sll and different type of elements for a email\n # itself is a huge string will some filed in it\n # email.walk() will be the way to look the element in emailAll\n # email.get_content_maintype() can get content type\n # first a coupl eis usually of multipart type, which contians further subject, body and attachment, \n # seemingly one multipart for each level of email, if the email has been forwarded or replyed,\n # it will has corresponding number of multipart\n # then the text element, which is usually the message body of the email\n # then the attachments\n # email.get('Content-Disposition') will get description for the part of email\n # description is always None fo rmultipart and text\n emailAll = email.message_from_string(emailPackage[0][1])\n for part in emailAll.walk():\n if part.get_content_maintype() == 'text':\n phrase_list = self.extractPhrase(part.get_payload(decode = True), pattern) # with extension\n else:\n continue\n fp.write(\"Phrase From {0}\\r\\n----------\\r\\n\".format(emailAll['subject']))\n for phrase in phrase_list:\n fp.write(\"%s\\r\\n\" % phrase)\n fp.write(\"----------\\r\\n\\r\\n\")\n print 'email {0} ({1}) processed\\n'.format(index, emailAll['subject'])\n fp.close()\n print '******************************************\\nEnd\\n******************************************\\n'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n " }, { "alpha_fraction": 0.8118811845779419, "alphanum_fraction": 0.8118811845779419, "avg_line_length": 66.66666412353516, "blob_id": "89d787f976e80c3c6e337b090217b29391547015", "content_id": "89b669bdfc7ba9827b9fe7ca6de7be9a305528a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 202, "license_type": "no_license", "max_line_length": 184, "num_lines": 3, "path": "/README.md", "repo_name": "yl2526/email_download", "src_encoding": "UTF-8", "text": "# email download\n\nThis file contains a downloader object. The object is to setup a imap connection and download certain attachments or to extract some pharases such as email address or urls in the text." } ]
2
sjoerdk/sjoerdkerkstra
https://github.com/sjoerdk/sjoerdkerkstra
15fb9b31f7dee35b0a4df297badd48c053cbb4d4
9ca6733d152707f367cd308bb29baa85b6c6e33e
66d8586ece5dbd0da4b9402f40c64ce9d06386af
refs/heads/master
2021-01-13T01:59:47.049094
2014-04-06T22:18:32
2014-04-06T22:18:32
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5590550899505615, "alphanum_fraction": 0.5721784830093384, "avg_line_length": 37.099998474121094, "blob_id": "e66bce8aae7e6afe63735532b4a08f4d249f5d5f", "content_id": "d28b08361cd8a9fd946d316a754c78d2cbf1a45f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 116, "num_lines": 20, "path": "/sjoerdkerkstra/mixes_site/models.py", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib import admin\n# Create your models here.\n\n\nclass Mixclouditem(models.Model):\n user_name = models.CharField(max_length=200,default=\"\",\n help_text = \"Will be used to generate link url\") \n mix_name = models.CharField(max_length=200,default=\"\",\n help_text = \"Name of the mix, no-spaces-but-with-hyphens\") \n description = models.TextField(max_length = 1024, default=\"\",\n blank=True,help_text = \"Print this below widget\")\n \n \n def __unicode__(self):\n \"\"\" string representation for this object\"\"\"\n return self.mix_name\n\n\nadmin.site.register(Mixclouditem)\n" }, { "alpha_fraction": 0.7387944459915161, "alphanum_fraction": 0.7403400540351868, "avg_line_length": 34.88888931274414, "blob_id": "ba93da55e6c4cd1ebabbbe315b07cc623a02e6e1", "content_id": "866ec5db3f074058d1c80de91a71cda30d263125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 154, "num_lines": 18, "path": "/sjoerdkerkstra/sjoerdkerkstra/settings.py", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "import os\nimport glob \nfrom django.core.exceptions import ImproperlyConfigured\n\n# Scheme for using different settings wihout having everything in git:\n# Get settings/*.conf and execute these in alphabetic order. \n# later values overwrite previous settings. \n\npath = os.path.join(os.path.dirname(__file__), 'settings', '*.conf')\nconffiles = glob.glob(path)\n\nif(len(conffiles) == 0):\n msg = \"Could not find any files matching '\" + path + \"'. There should be at least one configuration file containing django settings at that location.\"\n raise ImproperlyConfigured(msg) \n\nconffiles.sort()\nfor f in conffiles:\n execfile(os.path.abspath(f))\n\n" }, { "alpha_fraction": 0.5892857313156128, "alphanum_fraction": 0.5892857313156128, "avg_line_length": 27, "blob_id": "cfda45d118e23507c4871b14dbbb9af854712bf6", "content_id": "1cd75a4458809e5b21b08de30c2f2b10b2d01e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 61, "num_lines": 6, "path": "/sjoerdkerkstra/mixes_site/urls.py", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('',\n \n url(r'^$', 'mixes_site.views.mainpage', name='mainpage'),\n)\n" }, { "alpha_fraction": 0.6805111765861511, "alphanum_fraction": 0.6805111765861511, "avg_line_length": 33.77777862548828, "blob_id": "adb9828a9db33a28bbbecbcfe572b8afef61f2a8", "content_id": "752dc64f96cdc4a860798fb0f531ee58be7067ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/sjoerdkerkstra/sjoerdkerkstra/urls.py", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'sjoerdkerkstra.views.home', name='home'),\n # url(r'^sjoerdkerkstra/', include('sjoerdkerkstra.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', include('sjoerdkerkstra_site.urls')),\n url(r'^mixes/$', include('mixes_site.urls')),\n)\n" }, { "alpha_fraction": 0.6397515535354614, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 10.5, "blob_id": "6fda8895a31efa7e6a728ead67b62f8d3b00aa89", "content_id": "27e98a84d30b0a8699c9b3c2f1a0237ab5cf40e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 19, "num_lines": 14, "path": "/README.md", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "sjoerdkerkstra\n==============\n\nMy personal website\n\n\n= Requirements =\n* Django 1.4+\n* South\n\n= Installation =\n* pip South\n* manage.py syncdb\n* manage.py migrate\n" }, { "alpha_fraction": 0.7408758997917175, "alphanum_fraction": 0.7664233446121216, "avg_line_length": 33.25, "blob_id": "715c92b967e5e17f97900e34e5efb95576c6a73d", "content_id": "1ef9fc33e0d5e0442e2158bcb3a1d1a467cb45b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 65, "num_lines": 8, "path": "/sjoerdkerkstra/sjoerdkerkstra_site/views.py", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse,Http404\nfrom django.shortcuts import render_to_response,get_object_or_404\n\n\ndef home(request):\n return render_to_response(\"sjoerdkerkstra_site/base.html\")\n #return render_to_response(\"base2.html\")\n #return HttpResponse(\"MIXES\")\n" }, { "alpha_fraction": 0.7437810897827148, "alphanum_fraction": 0.7587064504623413, "avg_line_length": 27.714284896850586, "blob_id": "9b73106af18650c938c0c11b1879e7123f4da708", "content_id": "d6efdda1a3650316097d5413bc536ceb02180d9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/sjoerdkerkstra/mixes_site/views.py", "repo_name": "sjoerdk/sjoerdkerkstra", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse,Http404\nfrom django.shortcuts import render_to_response,get_object_or_404\n\nfrom mixes_site.models import Mixclouditem\n\n\ndef home(request):\n return render_to_response(\"mixes_site/base.html\")\n #return HttpResponse(\"MIXES\")\n\n\ndef mainpage(request):\n mixes = Mixclouditem.objects.all()\n return render_to_response(\"mixes_site/mainpage.html\",{\"mixes\":mixes})\n" } ]
7
voodoonofx/ROAutoLoot
https://github.com/voodoonofx/ROAutoLoot
e1526cc36cbb88432880fd0d18d9c27e046f8636
e53be521ba3d86f61fba7a78e34e8486bacf285b
cbce4a8b4cf5632fe1517b9b59fccfaac6c6e2fc
refs/heads/master
2021-01-18T20:39:28.167971
2017-04-04T19:31:15
2017-04-04T19:31:15
86,983,132
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.6837209463119507, "avg_line_length": 17, "blob_id": "b5a9904af23f199c23db643dfe510a3a199d17a6", "content_id": "a76d2b19d82022879622dce1395d0c4b80d069f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 65, "num_lines": 12, "path": "/__init__.py", "repo_name": "voodoonofx/ROAutoLoot", "src_encoding": "UTF-8", "text": "__version__ = '0.1'\n__author__ = 'VooDooNOFX'\n\nimport bot\n\n'''\nThis plugin is released for the exlcusive use by ROPlus users.\n\nDon't be a dick and steal our ideas. Fucking unoriginal bastards.\n'''\n\ng_Bot = bot.Bot()" }, { "alpha_fraction": 0.5736917853355408, "alphanum_fraction": 0.5789726376533508, "avg_line_length": 40.68000030517578, "blob_id": "5e78f58dc671545102eb0f76adc5af725dac43b8", "content_id": "bd127b5c3e31df5cdc142634a0b95fa3c5be55f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2083, "license_type": "no_license", "max_line_length": 124, "num_lines": 50, "path": "/bot.py", "repo_name": "voodoonofx/ROAutoLoot", "src_encoding": "UTF-8", "text": "import itertools\nfrom datetime import datetime\n\nimport roplus\nfrom gui import main_window\n\nimport BigWorld\n\nclass Bot(object):\n def __init__(self):\n roplus.log('AutoLoot Loading')\n\n # A few defaults in case onPulse hasn't triggered yet.\n self.player = self.p = BigWorld.player()\n self.entities = {}\n self.items = []\n\n # Some configuration options\n self.entity_range = 100\n\n # And some internals\n self.last_loot_attempt_time = datetime.now()\n self.key_func = lambda e: e.__module__\n\n self.mainWindow = main_window.MainWindow(self)\n self.mainWindow.show()\n roplus.registerCallback('ROPlus.OnPulse', self.onPulseCallback)\n roplus.log(' ... Done')\n\n def onPulseCallback(self, *args, **kw):\n # roplus.log('self: {0}'.format(self), 'args: {0}'.format(args), 'kw: {0}'.format(kw))\n grouper = itertools.groupby(sorted(self.p.entitiesInRange(self.entity_range), key=self.key_func), key=self.key_func)\n self.entities = dict((k, list(v)) for (k, v) in grouper)\n self.items = self.entities.get('DroppedItem', []) + self.entities.get('TreasureBox', [])\n # roplus.log('self.enabled_auto_loot:', self.enabled_auto_loot)\n for item in self.items:\n if not item:\n continue\n # Loot it, if we can\n item_dist = self.p.position.distTo(item.position)\n if item_dist < 4.0 and self.mainWindow.enabled_auto_loot:\n # Try looting it!\n if (datetime.now() - self.last_loot_attempt_time).total_seconds() > 1:\n if item.__module__ == 'TreasureBox':\n roplus.log('Opening Treasure Box: {0}'.format(item.roleName))\n item.use()\n elif item.__module__ == 'DroppedItem' and item._checkPickItem(self.p):\n roplus.log('Looting Nearby Item: {0}'.format(item.roleName))\n self.p.pickNearByItems(True)\n self.last_loot_attempt_time = datetime.now()" }, { "alpha_fraction": 0.4397488534450531, "alphanum_fraction": 0.4443371295928955, "avg_line_length": 45.51685333251953, "blob_id": "5dc83e3436c3a96a48d43418a0a9616c226ef54e", "content_id": "227dcc33fd15ddd2e997efd77e8aad642f9f489b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4141, "license_type": "no_license", "max_line_length": 140, "num_lines": 89, "path": "/gui/main_window.py", "repo_name": "voodoonofx/ROAutoLoot", "src_encoding": "UTF-8", "text": "\nfrom datetime import datetime\n\nimport BigWorld\n\nimport imgui\nimport roplus\nfrom roplus.helpers import nav\nimport ROAutoLoot as autoloot\n\n\nclass MainWindow(object):\n def __init__(self, botInstance):\n self.bot = botInstance\n self.window_visable = False\n self.enabled_auto_loot = False\n\n\n # Register functions\n roplus.registerCallback('ROPlus.OnDrawGUI', self.onDrawGuiCallback)\n\n def show(self):\n self.window_visable = True\n\n def onDrawGuiCallback(self, *args, **kw):\n if self.window_visable:\n try:\n if imgui.begin('AutoLoot {0} - {1}##Entity_mainwindow'.format(autoloot.__version__, autoloot.__author__), (600,350)):\n # button bar\n if imgui.checkbox('Enable Auto-Loot', self.enabled_auto_loot):\n self.enabled_auto_loot = not self.enabled_auto_loot\n \n if imgui.collapsingHeader('Available Loot ({0} items)'.format(len(self.bot.items))):\n imgui.columns(4)\n for item in self.bot.items:\n if not item:\n continue\n\n imgui.text(item.roleName)\n imgui.nextColumn()\n\n if item.__module__ == 'DroppedItem':\n try:\n imgui.text('Lootable' if item._checkPickItem(self.bot.p) else 'Not Lootable')\n except AttributeError:\n imgui.text('Not _checkPickItem')\n else:\n imgui.text('Openable?')\n imgui.nextColumn()\n\n imgui.text('{0}'.format(self.bot.p.position.distTo(item.position)))\n imgui.nextColumn()\n\n if imgui.button('Go To {0}##NavToEntity'.format(item.__module__)):\n nav.moveToEntityPathFind(item)\n imgui.nextColumn()\n imgui.separator()\n imgui.columns(1)\n\n if imgui.collapsingHeader('Debug All Entities'):\n for entity_name, entities in sorted(self.bot.entities.iteritems()):\n for entity in entities:\n imgui.columns(5)\n imgui.separator()\n imgui.text('{0}'.format(entity_name))\n imgui.nextColumn()\n imgui.text('{0}'.format(entity.id))\n imgui.nextColumn()\n if entity_name == 'DroppedItem' and hasattr(entity, '_checkPickItem') and entity._checkPickItem(self.bot.p):\n imgui.text('{0}'.format('Lootable'))\n elif not entity_name == 'DroppedItem':\n imgui.text('No Data Available')\n else:\n imgui.text('Not your Loot!')\n imgui.nextColumn()\n if entity and hasattr(entity, 'position') and self.bot.p and hasattr(self.bot.p, 'position'):\n imgui.text('{0}'.format(self.bot.p.position.distTo(entity.position)))\n else:\n imgui.text('No Position Information')\n imgui.nextColumn()\n if imgui.button('NavToEntity##NavToEntity'):\n nav.moveToEntityPathFind(entity)\n imgui.nextColumn()\n imgui.columns(1)\n imgui.end()\n except Exception:\n import traceback\n for line in traceback.format_exc().splitlines():\n roplus.log(line)\n self.window_visable = False\n" }, { "alpha_fraction": 0.7729257345199585, "alphanum_fraction": 0.7729257345199585, "avg_line_length": 18.08333396911621, "blob_id": "5e07e623b5c37be06f04989ffc67671e1c63553b", "content_id": "ecc4dff1b1d318c6493515813d5c020f61cdbb68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 229, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/README.md", "repo_name": "voodoonofx/ROAutoLoot", "src_encoding": "UTF-8", "text": "# ROAutoLoot\nAn automated vacuum cleaner, which autoloots everything it can.\n\n## Installation\nJust checkout ROAutoloot into your Scripts directory.\n\n## Usage:\nInside ROPlus's Python Editor, type:\n\n```python\nimport ROAutoLoot\n```\n" } ]
4
zonyzhao/HRReference
https://github.com/zonyzhao/HRReference
0826680c046cec6ef5ee8759abcc88956ba7d090
8c0c0f20b0faa327c56075bc90a631ac91f41003
cdce025e2399b176a9df35b4495a12ab798d9942
refs/heads/master
2023-08-18T14:53:08.714691
2021-10-12T01:51:30
2021-10-12T01:51:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6596858501434326, "alphanum_fraction": 0.727748692035675, "avg_line_length": 20.22222137451172, "blob_id": "4f5824c18e4bdcd0814bbe8b538f22acaf7f902f", "content_id": "a5103128dcd10cd3df05dc41afbf4cd8c9c1a726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 68, "num_lines": 9, "path": "/model/pytorch_test/nn_test.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nnet=nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX=torch.rand(2, 20)\nnet(X)\n\nimport torch.nn as nn\n" }, { "alpha_fraction": 0.5922077894210815, "alphanum_fraction": 0.6051948070526123, "avg_line_length": 32.39130401611328, "blob_id": "e3886a301e162907239e198d0fd24ebe2dfbcac1", "content_id": "e1cbc8bb7ed302194c2683f9813d5b113e002db5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 68, "num_lines": 23, "path": "/controller/data_ref_controller.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import numpy as np\nimport lib.ecg_processing.ecgdetectors as ecgdetector\n\n\nclass RefDataController:\n def __init__(self, ref_data, fs, mtime):\n self.ref_data = ref_data\n self.fs = fs\n self.mtime = mtime\n\n def __find_r_peaks(self):\n detector = ecgdetector.Detectors(self.fs)\n r_peaks = detector.engzee_detector(self.ref_data)\n return r_peaks\n\n def create_window_arr(self, mid_area_size=100, window_size=360):\n window_arr = np.zeros(len(self.ref_data))\n r_peaks = self.__find_r_peaks()\n for index, value in enumerate(r_peaks):\n lower = int(value - mid_area_size / 2) + 1\n upper = int(value + mid_area_size / 2)\n window_arr[lower:upper] = 1\n return window_arr\n\n\n" }, { "alpha_fraction": 0.6615384817123413, "alphanum_fraction": 0.6615384817123413, "avg_line_length": 25, "blob_id": "8eacd4843426e871644a7c43b6a9574fd8626a79", "content_id": "715947dc61ade0ed89b73cab5c7c3084f8db0824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 58, "num_lines": 5, "path": "/test/read_csv.test.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import csv\nimport pandas as pd\nwith open(\"../data/recorded_data_gen/data.csv\", 'r') as f:\n data=pd.read_csv(f)\n print(data)\n" }, { "alpha_fraction": 0.6698406338691711, "alphanum_fraction": 0.7116206884384155, "avg_line_length": 42.982906341552734, "blob_id": "de2bc8965c667e4d4c46bba8308c7c9a3034c457", "content_id": "efa628b2292996b6d92dd5527f4d4dd0fc655856", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5146, "license_type": "permissive", "max_line_length": 210, "num_lines": 117, "path": "/lib/radarsimpy/README.md", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "[![Build Status](https://dev.azure.com/rookiepeng/radarsimc/_apis/build/status/rookiepeng.radarsimc?branchName=master)](https://dev.azure.com/rookiepeng/radarsimc/_build/latest?definitionId=3&branchName=master)\n<a href=\"#\" target=\"_blank\" rel=\"nofollow\"><img src=\"https://img.shields.io/badge/Python-3.7%7C3.8%7C3.9-blue?style=flat&logo=python\" height=\"20\" ></a>\n<a href=\"https://rookiepeng.github.io/radarsimpy/\" target=\"_blank\" rel=\"nofollow\"><img src=\"https://img.shields.io/badge/Documentation-latest-brightgree?style=flat&logo=read-the-docs\" height=\"20\" ></a>\n[![DOI](https://zenodo.org/badge/282958664.svg)](https://zenodo.org/badge/latestdoi/282958664)\n\n# RadarSimPy\n\n<img src=\"./assets/radarsimpy.svg\" alt=\"logo\" width=\"200\"/>\n\nA **Radar** **Sim**ulator for **Py**thon\n\n***This module needs to be built/used together with RadarSimC (the C++ engine for radar simulator)***\n\n## Key Features\n\n1. **Radar**: Classes to define a radar system\n - `radarsimpy.Transmitter`: Radar transmitter\n - `radarsimpy.Receiver`: Radar receiver\n - `radarsimpy.Radar`: Radar system\n2. **Simulator**: Radar baseband signal simulator\n - `radarsimpy.simulator.simpy`: Simulates and generates raw time domain baseband data (Python engine)\n - `radarsimpy.simulator.simc`: Simulates and generates raw time domain baseband data (C++ engine)\n3. **Raytracing**: Raytracing module for radar scene simulation\n - `radarsimpy.rt.lidar_scene`: Simulates LiDAR's point cloud based on a 3D environment model with ray tracing\n - `radarsimpy.rt.rcs_sbr`: Simulates target's radar cross section (RCS) based on the 3D model with ray tracing\n - `radarsimpy.rt.scene`: Simulates radar's response signal in a 3D environment model with ray tracing\n4. **Processing**: Basic radar signal processing module\n5. **Tools**: Receiver operating characteristic analysis\n\nThis module supports CPU/GPU parallelization.\nCPU parallelization is implemented through OpenMP.\nGPU parallelization (CUDA) has been added since v6.0.0, and it is still working in progress.\n\n## Dependence\n\n- numpy\n- scipy\n- meshio\n- [Visual C++ Runtime](https://aka.ms/vs/16/release/vc_redist.x64.exe/) (*Windows only*)\n\n## Installation\n\n[Contact me](https://zpeng.me/#contact) if you are interested in this module.\n\nTo use the module, please put the radarsimpy folder within your project folder as shown below.\n\n> Windows\n>\n> - your_project.py\n> - your_project.ipynb\n> - radarsimpy\n> - \\_\\_init__.py\n> - radarsimc.dll\n> - scene.xxx.pyd\n> - ...\n>\n\n> Linux\n>\n> - your_project.py\n> - your_project.ipynb\n> - radarsimpy\n> - \\_\\_init__.py\n> - libradarsimc.so\n> - scene.xxx.so\n> - ...\n>\n\n## Coordinate Systems\n\n### Scene Coordinate\n\n- axis (m): `[x, y, z]`\n- phi (deg): angle on x-y plane. Positive x-axis is 0 deg, positive y-axis is 90 deg\n- theta (deg): angle on z-x plane. Positive z-axis is 0 deg, x-y plane is 90 deg\n- azimuth (deg): azimuth -90 ~ 90 deg equal to phi -90 ~ 90 deg\n- elevation (deg): elevation -90 ~ 90 deg equal to theta 180 ~ 0 deg\n\n### Object's Local Coordinate\n\n- axis (m): `[x, y, z]`\n- yaw (deg): rotation along z-axis. Positive yaw rotates object from positive x-axis to positive y-axis\n- pitch (deg): rotation along y-axis. Positive pitch rotates object from positive x-axis to positive z-axis\n- roll (deg): rotation along x-axis. Positive roll rotates object from positive z-axis to negative y-axis\n- origin (m): `[x, y, z]`\n- rotation (deg): `[yaw, pitch, roll]`\n- rotation (deg/s): rate `[yaw rate, pitch rate, roll rate]`\n\n## Usage\n\n- Radar system simulation\n - [Doppler radar](https://zpeng.me/index.php/2019/05/16/doppler-radar/)\n - [FMCW radar](https://zpeng.me/index.php/2018/10/11/fmcw-radar/)\n - [TDM MIMO FMCW radar](https://zpeng.me/index.php/2019/04/07/tdm-mimo-fmcw-radar/)\n - [PMCW radar](https://zpeng.me/index.php/2019/05/24/pmcw-radar/)\n - [Arbitrary waveform](https://zpeng.me/index.php/2021/05/10/arbitrary-waveform/)\n - [Phase noise](https://zpeng.me/index.php/2021/01/13/phase-noise/)\n - [CFAR](https://zpeng.me/index.php/2021/01/10/cfar/)\n \n- RCS simulation\n - [Corner reflector RCS](https://zpeng.me/index.php/2021/05/10/corner-reflector-rcs/)\n - [Plate RCS](https://zpeng.me/index.php/2021/05/10/plate-rcs/)\n - [Car RCS](https://zpeng.me/index.php/2021/05/10/car-rcs/)\n\n- Radar system and scene simulation with ray tracing\n - [FMCW radar with a corner reflector](https://zpeng.me/index.php/2021/05/10/fmcw-radar-with-a-corner-reflector/)\n - [FMCW radar with a plate](https://zpeng.me/index.php/2021/05/10/fmcw-radar-with-a-plate/)\n - [FMCW radar with a car](https://zpeng.me/index.php/2021/05/10/fmcw-radar-with-a-car/)\n - [Doppler of a rotating object](https://zpeng.me/index.php/2021/05/10/doppler-of-a-rotating-object/)\n - [Micro-Doppler](https://zpeng.me/index.php/2021/05/10/micro-doppler/)\n - [Multi-path effect](https://zpeng.me/index.php/2021/05/10/multi-path-effect/)\n\n- LIDAR (Experimental)\n - [LIDAR point cloud](https://zpeng.me/index.php/2020/02/05/lidar-point-cloud/)\n\n- Characterization\n - [Receiver operating characteristic (ROC)](https://zpeng.me/index.php/2019/10/06/receiver-operating-characteristic/)\n" }, { "alpha_fraction": 0.6262425184249878, "alphanum_fraction": 0.6545725464820862, "avg_line_length": 30.186046600341797, "blob_id": "17394426787f236a7b56e5465ae5076302ee4145", "content_id": "a453b2a58461e676db39c19e12c22f51681df7a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4032, "license_type": "no_license", "max_line_length": 97, "num_lines": 129, "path": "/test.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import os\n\nimport numpy as np\nfrom utils_v1.config_path import project as pr\nfrom utils_v1.file_utils import file_pandas\nimport controller.data_ref_controller as data_ref_controller\nimport controller.data_radar_controller as data_radar_controller\nimport plotly.express as px\nimport pandas as pd\nimport utils_v1.butterworth_filter as btwf\nimport controller.signal_generate as sng\nfrom scipy.fft import fft, fftfreq\n\n# ========config=============\nroot_pro = pr.get_root_file(__file__)\ndata_folder = str(os.path.join(root_pro, \"data/reference\"))\nwindow_size = 360\nfs = 250\nmid_area_size = 40\n\n\ndef get_data(data_folder_path, file_number=0):\n list_data_file = file_pandas.get_list_file(data_folder_path)\n data = list(file_pandas.read_file(list_data_file[file_number], 1).index)\n data_np = np.asarray(data, dtype=np.float64)\n return data_np\n\n\ndef plot(x, y, peaks, window, name):\n fig = px.line(x=x, y=y, title=name)\n fig.add_scatter(x=peaks, y=y[peaks], mode='markers')\n fig.add_scatter(x=x, y=window)\n fig.show()\n\n\ndef plot_with_peaks(x, y, peaks, name):\n fig = px.line(x=x, y=y, title=name)\n fig.add_scatter(x=peaks, y=y[peaks], mode='markers')\n fig.show()\n\n\ndef plot2(x, y, y2, name):\n fig = px.line(x=x, y=y, title=name)\n fig.add_scatter(x=x, y=y2)\n fig.show()\n\n\ndef ecg_peaks(data=[], fs=250):\n from lib.ecg_processing.ecgdetectors import Detectors\n detectors = Detectors(fs)\n r_peaks = detectors.engzee_detector(data)\n return r_peaks\n\n\ndef test():\n #\n data = get_data(data_folder, 0)\n data = data[2000:6999]\n ref_data_controller = data_ref_controller.RefDataController(\n ref_data=data,\n fs=250,\n mtime=10)\n window_arr = ref_data_controller.create_window_arr()\n\n mtime = np.arange(0, len(window_arr), 1)\n peaks = ecg_peaks(data)\n data = data / np.max(data)\n plot(x=mtime, y=data, peaks=peaks, window=window_arr, name='tín hiệu tham chiếu')\n\n # radar_data\n\n file_path = '/home/nvh/lab_project/hr_processing/data/radar/data_test_1626256597.csv'\n radar_data = pd.read_csv(file_path)\n radar_data = np.asarray(list(radar_data.values))\n radar_data = radar_data[:, 0]\n radar_data = radar_data[4450:6450]\n radar_data_controller = data_radar_controller.RadarDataController(\n radar_data=radar_data,\n fs_radar=100,\n ref_data=data,\n fs_ref=250,\n mtime=10\n )\n window_radar = radar_data_controller.create_window_arr()\n peaks_radar = btwf.find_hr(radar_data)\n\n mtime = np.arange(0, len(radar_data), 1)\n\n filtered_data = btwf.butter_bandpass_filter(data=radar_data)\n\n filtered_data = filtered_data / np.max(filtered_data)\n label = radar_data_controller.mark_label(window_size=360)\n wrong_label = label['wrong_data']\n correct_label = label['correct_data']\n plot(x=mtime, y=filtered_data, window=window_radar, peaks=peaks_radar, name='tín hiệu radar')\n\n fig = px.line(x=[x for x in range(0, 360)], y=wrong_label[0, :], title='label=0')\n fig2 = px.line(x=[x for x in range(0, 360)], y=correct_label[0, :], title='label=1')\n\n fig.show()\n fig2.show()\n\ndef read_matlab_data():\n import utils_v1.file_utils.file_pandas as read_file_pandas\n parent_folder= \"E:\\\\lab_project\\\\hr_processing\\\\data\\\\data_gen\"\n freq_file=parent_folder + \"\\\\\"+\"\\\\frequency.csv\"\n data_folder=parent_folder+ \"\\\\\"+\"\\\\data\"\n list_file= read_file_pandas.get_list_file(data_folder)\n\n #======read frequency====\n freq_list=read_file_pandas.read_csv_file(freq_file)\n freq_list=freq_list.freq[0:9]\n\n chosen_file=list_file[0]\n data=read_file_pandas.read_csv_file(chosen_file)\n data = data.data\n print(data)\n time=np.arange(0, 20, 0.01)\n fig=px.line(x=time, y=data, title='test')\n fig.show()\n\nif __name__ == '__main__':\n band_hr = (0.93, 2.23)\n band_rr = (0.167, 3.167)\n fs = 100\n mtime = 20\n snr = -10\n sig_gen = sng.SignalGenerate(band_hr=band_hr, band_rr=band_rr, snr=snr, fs=fs, mtime=mtime)\n read_matlab_data()\n\n" }, { "alpha_fraction": 0.6936127543449402, "alphanum_fraction": 0.7365269660949707, "avg_line_length": 36.074073791503906, "blob_id": "ddd8ea700584246ce5f6518dc07eac4e21be2517", "content_id": "938586201b9fa532108d1480f3aea9286d388e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1002, "license_type": "no_license", "max_line_length": 132, "num_lines": 27, "path": "/read_mat_file.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import scipy.io\nimport plotly.express as px\nimport numpy as np\nimport utils_v1.butterworth_filter as btwf\nparent_path='E:\\\\lab_project\\\\hr_processing\\\\data\\\\datasets'\nfile_name=parent_path+'\\\\measurement_data_person6\\\\PCG_front_radar_front\\\\PCG_2L_radar_5L\\\\DATASET_2017-02-06_09-01-56_Person 6.mat'\nmat=scipy.io.loadmat(file_name)\ni_signal=mat['radar_I']\ni_signal = np.asarray(i_signal)\ndata=[]\nfor index, val in enumerate(i_signal):\n data.append(val[0])\ndata=data[3446:]\ntime_serries=np.arange(0, len(data)/500, 1/500)\n# fig=px.line(x=time_serries, y=data, title='i signal')\n\npeaks=btwf.find_hr(data, fs=500)\nfiltered_signal=btwf.butter_bandpass_filter(data, fs=500)\nfiltered_signal=filtered_signal/np.max(np.abs(filtered_signal))\nfig=px.line(x=time_serries, y=filtered_signal)\n\n# fig.add_scatter(x=time_serries, y=data/np.max(np.abs(data)))\nfig.add_scatter(x=peaks/500, y=filtered_signal[peaks], mode='markers')\nfig.show()\n\n# fig2=px.line(x=time_serries, y=data, title='raw data')\n# fig2.show()\n\n" }, { "alpha_fraction": 0.47058823704719543, "alphanum_fraction": 0.47058823704719543, "avg_line_length": 17, "blob_id": "f010bd3a0402ab76dd610508a6651f5528ba6468", "content_id": "cc354e06e987d37e371bab9a3f12ed40464b0815", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/utils_v1/noise_handler/__init__.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "__all__=[\"noise\"]" }, { "alpha_fraction": 0.5223991870880127, "alphanum_fraction": 0.5418118238449097, "avg_line_length": 34.875, "blob_id": "06f655b8b442f3e46fd14b50021137b5d457aede", "content_id": "a504951a8a796dc8c565aa9ad02804504c544557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4018, "license_type": "no_license", "max_line_length": 111, "num_lines": 112, "path": "/controller/radar_gen.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import numpy as np\nimport control\nfrom komm import AWGNChannel\nfrom utils_v1 import butterworth_filter as btwf\n\n\nclass RadarGenV2():\n def __init__(self, fs=100, mtime=20):\n \"\"\"\n fs: sample rate\n mtime: measured time\n \"\"\"\n\n self.fs = fs\n self.mtime = mtime\n\n def gen_respirator_v1(self, **kargs):\n \"\"\"\n refernece: \"High Accuracy Heartbeat Detection from\n CW-Doppler Radar Using Singular Value Decomposition and Matched Filter\"\n - authors: Yuki Iwata, Han Trong Thanh, Guanghao Sun and Koichiro Ishibashi\n \"\"\"\n # ===config parameters====\n Kb = kargs['kb']\n Ti = kargs['ti']\n Te = kargs['te']\n T = Ti + Te\n mtime = self.mtime\n t_c = kargs['tc']\n\n time_arr = np.arange(0, mtime, 1 / self.fs)\n data_resp = []\n res_function_model = None\n\n for t in time_arr:\n # time in period\n tip = t - T * int(t / T)\n if tip >= 0 and tip <= Ti:\n res_function_model = (-Kb / (Ti * Te)) * tip * tip + (Kb * T / (Ti * Te)) * tip\n # res_function_model = 0\n elif tip > Ti and tip <= T:\n res_function_model = (Kb / (1 - np.exp(-Te / t_c))) * (\n np.exp(-(tip - Te) / t_c) - np.exp(-Te / t_c))\n data_resp.append(res_function_model)\n return data_resp\n\n def gen_respirator_v2(self, **kargs):\n \"\"\"\n reference: System Modeling and Signal Processing of Microwave Doppler Radar for Cardiopulmonary Sensing\n \"\"\"\n am = kargs['am']\n freq = kargs['freq']\n phase = kargs['phase']\n mtime = self.mtime\n\n time_arr = np.arange(0, mtime, 0.01)\n data_resp = []\n for t in time_arr:\n res_val = am * np.sin(2 * np.pi * freq * t + phase)\n data_resp.append(res_val)\n return data_resp\n\n def gen_heartbeat_movement_signal_v1(self, **kargs):\n am = kargs['am']\n freq = kargs['freq']\n phase = kargs['phase']\n mtime = self.mtime\n\n time_arr = np.arange(0, mtime, 0.01)\n data_heart = []\n for t in time_arr:\n heart_val = am * np.sin(2 * np.pi * freq * t + phase)\n data_heart.append(heart_val)\n return data_heart\n\n def vibration_target(self, am_hb= 1* pow(10, -3), fhb=1.5, am_res=10 * pow(10, -3), te=2, ti=2, snr=30):\n phase_hb = 2 * np.pi * np.random.rand()\n res_val = self.gen_respirator_v1(am=am_res, kb=am_res, ti=ti, te=te, tc=0.8)\n hb_val = self.gen_heartbeat_movement_signal_v1(am=am_hb, freq=fhb, phase=phase_hb)\n chess_vibra = am_hb + np.asarray(res_val) + np.asarray(hb_val)\n linear_snr = control.db2mag(snr)\n awgn = AWGNChannel(snr=linear_snr, signal_power='measured')\n chess_vibra = awgn(chess_vibra)\n self.location_target = chess_vibra\n self.hb_val=hb_val\n self.res_val=res_val\n return chess_vibra\n\n def i_signal(self, normal_distance_target=30 * pow(10, -2), fhb=1.0, te=2, ti=2, snr=-10):\n # unit: meter\n # snr in db\n location_target = self.vibration_target(fhb=fhb, te=te, ti=ti)\n c = 3 * pow(10, 8)\n f_carrying_wave = 24 * pow(10, 9)\n wavelength = c / f_carrying_wave\n constant_phase_shift = 4 * np.pi * normal_distance_target / wavelength\n i_data = []\n\n linear_snr = control.db2mag(snr)\n awgn = AWGNChannel(snr=linear_snr, signal_power='measured')\n for instantous_location in location_target:\n i_val = 5 * np.cos(constant_phase_shift + 4 * np.pi * instantous_location / wavelength)\n i_data.append(i_val)\n i_data = awgn(i_data)\n return i_data\n\n def filter_i_signal(self):\n normal_distance_target = 30 * pow(10, -2)\n snr = -10\n i_signal = self.i_signal(normal_distance_target)\n filtered_signal = btwf.butter_bandpass_filter(data=i_signal, order=3)\n return filtered_signal\n" }, { "alpha_fraction": 0.5883721113204956, "alphanum_fraction": 0.6325581669807434, "avg_line_length": 33.83333206176758, "blob_id": "c0f9a5a59276ae862cee217dfb542eee646a16d7", "content_id": "3543431f83974621f32b27a0ba90a389c1724ffb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1290, "license_type": "no_license", "max_line_length": 108, "num_lines": 36, "path": "/utils_v1/butterworth_filter.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import scipy.signal as ss\r\nfrom scipy.signal import butter\r\nfrom scipy.signal import lfilter\r\n\r\n\r\ndef butter_bandpass_filter(data, lowcut=0.83, highcut=2.33, fs=100, order=3):\r\n nyq = 0.5 * fs\r\n low = lowcut / nyq\r\n high = highcut / nyq\r\n b, a = butter(order, [low, high], btype='bandpass', output='ba')\r\n y = lfilter(b, a, data)\r\n return y\r\n\r\n\r\ndef butter_lowpass_filter(data, fs=100, fr=0.5, order=5):\r\n nyq = 0.5 * fs\r\n f = fr / nyq\r\n c, d = butter(order, f, btype='lowpass')\r\n RR = lfilter(c, d, data)\r\n return RR\r\n\r\n\r\n# Loc tin hieu nhip tim\r\ndef find_hr(data, lowcut=0.83, highcut=2.33, fs=100, order=3, distance=400, width=0.3, prominence=0.5):\r\n hr = butter_bandpass_filter(data, lowcut=lowcut, highcut=highcut, fs=fs, order=order)\r\n threshold_hr = (max(hr) - min(hr)) * 0.002 # muc nguong\r\n peaks, _ = ss.find_peaks(hr, distance=distance, height=threshold_hr, width=width, prominence=prominence)\r\n return peaks\r\n\r\n\r\n# Loc tin hieu nhip tho\r\ndef find_rr(data, fs=100, fr=0.5, distance=250, width=2.5):\r\n rr = butter_lowpass_filter(data, fs=fs, fr=fr, order=5)\r\n threshold_rr = (max(rr) - min(rr)) * 0.01 - 1.5 # muc nguong\r\n peaks, _ = ss.find_peaks(rr, distance=distance, height=threshold_rr, width=width)\r\n return peaks\r\n" }, { "alpha_fraction": 0.519440770149231, "alphanum_fraction": 0.5378162264823914, "avg_line_length": 35.28019332885742, "blob_id": "4bfae37be1e0297f1c19ce41c24bc70fb5b86125", "content_id": "800f84fd33da9753e5685756366706855f2af434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7510, "license_type": "no_license", "max_line_length": 163, "num_lines": 207, "path": "/controller/signal_generate.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import errno\nimport pathlib\nimport random\n\nimport pandas as pd\nimport numpy as np\nfrom komm import AWGNChannel\nimport control\nimport os\nimport pathlib as pl\nfrom csv import writer\nimport random as rd\n\n\nclass SignalGenerate():\n def __init__(self, band_hr, band_rr, snr, fs, mtime):\n self.band_hr = band_hr\n self.band_rr = band_rr\n self.fs = fs\n self.mtime = mtime\n self.snr = snr\n self.hb = None\n self.hb_noise = None\n\n def awgn(self, s, SNRdB, L=1):\n # author - Mathuranathan Viswanathan (gaussianwaves.com\n # This code is part of the book Digital Modulations using Python\n from numpy import sum, isrealobj, sqrt\n from numpy.random import standard_normal\n \"\"\"\n AWGN channel\n Add AWGN noise to input signal. The function adds AWGN noise vector to signal 's' to generate a resulting signal vector 'r' of specified SNR in dB. It also\n returns the noise vector 'n' that is added to the signal 's' and the power spectral density N0 of noise added\n Parameters:\n s : input/transmitted signal vector\n SNRdB : desired signal to noise ratio (expressed in dB) for the received signal\n L : oversampling factor (applicable for waveform simulation) default L = 1.\n Returns:\n r : received signal vector (r=s+n)\n \"\"\"\n gamma = 10 ** (SNRdB / 10) # SNR to linear scale\n if s.ndim == 1: # if s is single dimensional vector\n P = L * sum(abs(s) ** 2) / len(s) # Actual power in the vector\n else: # multi-dimensional signals like MFSK\n P = L * sum(sum(abs(s) ** 2)) / len(s) # if s is a matrix [MxN]\n print(len(s))\n N0 = P / gamma # Find the noise spectral density\n if isrealobj(s): # check if input is real/complex object type\n n = sqrt(N0 / 2) * standard_normal(s.shape) # computed noise\n else:\n n = sqrt(N0 / 2) * (standard_normal(s.shape) + 1j * standard_normal(s.shape))\n r = s + n # received signal\n\n return r\n\n def _signal_hr_without_noise(self):\n numsig = self.fs * self.mtime\n center_freq = (np.max(self.band_hr) - np.min(self.band_hr)) * np.random.random_sample((numsig, 1)) + min(\n self.band_hr)\n sample = np.arange(0, numsig, 1)\n t = sample / self.fs\n print(np.pi)\n # hb = cos(\n # 2 * pi * (centerfreq * t - 0.1 * cos(2 * pi * t / 100 + 2 * pi * rand) / (2 * pi / 100)) + 2 * pi * rand);\n hb = np.cos(2 * np.pi *\n (center_freq * t - 0.1 * np.cos(2 * np.pi * t / 100)) /\n (2 * np.pi / 100)\n ) + 2 * np.pi * np.random.random_sample((numsig, numsig)\n )\n self.hb = hb\n return hb\n\n def gen_hr_with_noise(self):\n s = self._signal_hr_without_noise()\n # r = self.awgn(s=s, SNRdB=self.snr, L=0.1)\n linear_snr = control.db2mag(self.snr)\n awgn = AWGNChannel(snr=linear_snr, signal_power='measured')\n column_size = self.mtime * self.fs\n noise_matrix = np.empty_like(s)\n for i in range(0, column_size):\n data = s[i, :]\n r = awgn(data)\n noise_matrix[i, :] = r\n self.hb_noise = s + noise_matrix\n\n return s\n\n def _signal_rr(self):\n numsig = self.fs * self.mtime\n frr = (np.max(self.band_rr) - np.min(self.band_rr)) * np.random.random_sample((numsig, 1)) + min(self.band_rr)\n sample = np.arange(0, numsig, 1)\n t = sample / self.fs\n rb = 10 * np.cos(2 * np.pi * frr * t)\n return rb\n\n def gen_raw_singal(self):\n # r = self.gen_hr_with_noise() + self._signal_rr()\n r = self._signal_hr_without_noise()\n return r\n\n\nclass RadarGen():\n def __init__(self, hr_freq, rr_freq):\n self.hr_freq = hr_freq\n self.rr_freq = rr_freq\n\n def radar_data_gen(self):\n AI = 2\n wavelength = 0.0125\n freq_heart = 0.83 + np.random.rand() * (2.33 - 0.83)\n freq_lung = 0.167 + np.random.rand() * (0.367 - 0.167)\n heart_am = 0.1\n lung_am = 3\n t = np.arange(0, 20, 0.01)\n ran_phase_lung = 2 * np.pi * np.random.rand()\n ran_phase_heart = 2 * np.pi * np.random.rand()\n noise = np.empty((2, len(t)))\n for i in range(0, len(t)):\n ran_frequency = 12 * np.random.rand()\n noise[0, i] = ran_frequency\n ran_am = 5 * np.random.rand()\n noise[1, i] = ran_am\n\n i_signal = lung_am * np.sin(2 * np.pi * freq_lung * t + ran_phase_lung) + heart_am * np.sin(\n 2 * np.pi * freq_heart * t + ran_phase_heart)\n\n self.lung_signal=lung_am * np.sin(2 * np.pi * freq_lung * t + ran_phase_lung)\n self.heart_signal=heart_am * np.sin(2 * np.pi * freq_heart * t + ran_phase_heart)\n print(np.sin(np.pi/2))\n # self.noise=np.empty_like((1, len(t)))\n noise_total=np.empty((1, len(t)), dtype='float64')\n\n for j in range(0, len(t)):\n noise_total+=noise[1, j] * np.sin(2 * np.pi * noise[0, j]*t + 2 * np.pi * np.random.rand())\n self.noise=noise_total\n noise_total=noise_total/np.max(np.abs(noise_total))*7\n i_signal+=noise_total[0]\n print(self.noise)\n return i_signal\n\n def gen_hr_freq(self, center_fre):\n pass\n\n def gen_noise_in_hr_band(self, am_noise=0.006):\n mtime = 1689\n numsig = 1689\n am_arr = np.empty(numsig)\n freq_arr = np.empty(numsig)\n\n for i in range(0, mtime):\n amptitude = np.random.rand() * am_noise\n freq = np.random.rand() * (3 - 0.83) + 0.83\n am_arr[i] = amptitude\n freq_arr[i] = freq\n\n return am_arr, freq_arr\n\n def gen_freq_spectrum(self):\n pass\n\n def gen_freq_noise_out_hr_band(self):\n pass\n\n\nclass SignalHandle():\n def search_label_file(self, file_name=\"\"):\n \"\"\"\n if label file exist:\n return path of the file\n else:\n create file and return path of the file\n :param file_name: name of the file need to find\n :return: path of the file\n \"\"\"\n from sys import platform\n root_file = str(pl.Path(__file__).parent.parent)\n train_file_path = \"\"\n if platform == \"linux\" or platform == \"linux2\":\n train_file_path = \"/train_data/{}/{}\".format(file_name, file_name + '.csv')\n elif platform == \"win32\":\n train_file_path = \"\\\\train_data\\\\{}\\\\{}\".format(file_name, file_name + '.csv')\n train_file_path = root_file + train_file_path\n\n if not (os.path.isdir(train_file_path) or os.path.isfile(train_file_path)):\n try:\n os.makedirs(os.path.dirname(train_file_path))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n with open(train_file_path, 'w') as f:\n f.close()\n\n return train_file_path\n\n def save_data_with_label(self, data, label):\n \"\"\"\n :param data:\n :param label: int, heart rate number\n :return: void\n \"\"\"\n file_name = 'label_{}'.format(label)\n file_path = self.search_label_file(file_name)\n file_path = pathlib.Path(file_path)\n with open(file_path, 'a', newline='') as f:\n write_csv = writer(f)\n write_csv.writerow(data)\n f.close()\n" }, { "alpha_fraction": 0.46472302079200745, "alphanum_fraction": 0.5037900805473328, "avg_line_length": 32.07284927368164, "blob_id": "61c9bea14e42a613e7bd9dcb867af0e8d3997133", "content_id": "4bd42df2bc0fcba030d22e68d697507ee20f93d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5145, "license_type": "no_license", "max_line_length": 83, "num_lines": 151, "path": "/main.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import os\r\n# from src.utils_v1.file_utils import read_file_pandas\r\nfrom utils_v1.config_path import project as pr\r\nimport plotly.express as px\r\nimport numpy as np\r\nimport pywt\r\nimport scipy.signal as sig\r\nfrom utils_v1.file_utils import file_pandas\r\nfrom utils_v1 import butterworth_filter as btwf\r\nroot_pro = pr.get_root_file(__file__)\r\ndata_folder = str(os.path.join(root_pro, \"data/reference\"))\r\n\r\n\r\ndef plot(x, y, peaks):\r\n fig = px.line(x=x, y=y, title='custoam tick label')\r\n fig.add_scatter(x=peaks, y=y[peaks], mode='markers')\r\n fig.show()\r\n\r\n\r\ndef butter_lowpass(cutoff, fs, order=5):\r\n nyq = 0.5 * fs\r\n normal_cutoff = cutoff / nyq\r\n b, a = sig.butter(order, normal_cutoff, btype='low', analog=False)\r\n return b, a\r\n\r\n\r\n\r\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\r\n b, a = butter_lowpass(cutoff, fs, order=order)\r\n y = sig.lfilter(b, a, data)\r\n return y\r\n\r\n\r\ndef ecg_peaks(data=[], fs=250):\r\n from lib.ecg_processing.ecgdetectors import Detectors\r\n detectors = Detectors(fs)\r\n r_peaks = detectors.engzee_detector(data)\r\n return r_peaks\r\n\r\n\r\ndef labeling(ecg_data=[],data_from_radar=[], fs=250, mid_area=40, window_size=360):\r\n \"\"\"\r\n :param ecg_data:\r\n :param type: 0: P peak\r\n 1: Q peak\r\n 2: R peak\r\n :return: none\r\n \"\"\"\r\n from lib.ecg_processing.ecgdetectors import Detectors\r\n T = 100\r\n label_arr = np.zeros([1, len(ecg_data)])\r\n # init label\r\n cw1_wrong = np.zeros([len(ecg_data) * 30, 1])\r\n cw2_wrong = np.zeros([len(ecg_data) * 30, 1])\r\n cw1_correct = np.zeros([len(ecg_data) * 30, 1])\r\n cw2_correct = np.zeros([len(ecg_data) * 30, 1])\r\n hb_filt = np.zeros((len(ecg_data), T * fs))\r\n k1 = 1\r\n k2 = 1\r\n k_dif = np.zeros([len(ecg_data), 1])\r\n\r\n # detect peaks\r\n for i in range(1, len(ecg_data)):\r\n detectors = Detectors(fs)\r\n r_peaks = detectors.engzee_detector(data_np)\r\n differentiate_array=np.zeros(1, T*fs)\r\n hb_filt[i,:]=btwf.butter_bandpass_filter(data_from_radar, )\r\n for index, v in enumerate(r_peaks):\r\n if v - mid_area / 2 < 1:\r\n d1 = 1\r\n d2 = v + mid_area / 2 - 1\r\n elif v + mid_area / 2 - 1 > T * fs:\r\n d2 = T * fs\r\n d1 = v - mid_area / 2\r\n else:\r\n d1 = v - mid_area / 2\r\n d2 = v - mid_area / 2 - 1\r\n label_arr[d1:d2] = 1\r\n center_window = r_peaks\r\n done1 = 1\r\n done0 = 0\r\n for index, v in center_window:\r\n if done1 + done0 < 2:\r\n cw = v\r\n cw1 = v - window_size / 2\r\n cw2 = v + window_size / 2 - 1\r\n if cw > window_size / 2 + 1 and cw < T * fs - window_size / 2 + 2:\r\n if label_arr(v) == 1 and done1 == 0:\r\n cw1_correct[k1] = cw1\r\n cw2_correct[k2] = cw2\r\n k1 += 1\r\n if done0 == 1:\r\n k_dif[index] = 2\r\n else:\r\n k_dif[index] = 1\r\n done1 = 1\r\n elif label_arr[cw] == 0 and done0 == 0:\r\n cw1_wrong[k2] == cw1\r\n cw2_wrong[k2] == cw2\r\n k2 += 1\r\n if done1 == 1:\r\n k_dif[index] = 2\r\n else:\r\n k_dif[index] = 0\r\n done0 = 1\r\n else:\r\n break\r\n\r\n cw1_wrong = cw1_wrong[cw1_wrong[:, 2], :]\r\n cw1_correct = cw1_correct[cw1_correct[:, 2], :]\r\n cw1_correct[(len(cw1_wrong) + 1):len(cw1_correct), :] = []\r\n cw1_wrong[(len(cw1_correct) + 1):len(cw1_wrong), :] = []\r\n cw2_wrong = cw2_wrong[cw2_wrong[:, 2], :]\r\n cw2_correct = cw2_correct[cw2_correct[:, 2], :]\r\n cw2_correct[(len(cw2_wrong) + 1):len(cw2_correct), :] = []\r\n cw2_wrong[(len(cw2_correct)) + 1:len(cw2_wrong), :] = []\r\n\r\n wrong = np.zeros((len(cw1_wrong), window_size))\r\n correct = np.zeros((len(cw1_wrong), window_size))\r\n i1 = 1\r\n i2 = 1\r\n for index in range(1, len(ecg_data)):\r\n if k_dif[index] == 1 and i1 <= len(cw1_correct):\r\n correct[i1, :] =i2\r\n\r\n\r\nif __name__ == '__main__':\r\n list_data_file = file_pandas.get_list_file(data_folder)\r\n data = list(file_pandas.read_file(list_data_file[0], 1).index)\r\n data_np = np.asarray(data, dtype=np.float64)\r\n data_np = data_np[1000:3000]\r\n measure_time = np.arange(0, len(data_np), 1)\r\n\r\n a = pywt.families(True)\r\n coeffs = pywt.wavedec(data_np, 'bior3.7', level=3)\r\n b = pywt.waverec(coeffs, 'bior3.7')\r\n fs = 360\r\n fc = 0.667\r\n data_filtered = butter_lowpass_filter(data=data_np, cutoff=fc, fs=fs, order=3)\r\n\r\n r_peaks = ecg_peaks(data_np)\r\n\r\n plot(measure_time, data_np, r_peaks)\r\n # plt.plot(measure_time, data_np, 'r.')\r\n # plt.show()\r\n\r\n # x and y given as array_like objects\r\n import plotly.express as px\r\n\r\n fig = px.scatter(x=[0, 1, 2, 3, 4], y=[0, 1, 4, 9, 16])\r\n fig.show()\r\n" }, { "alpha_fraction": 0.5921282768249512, "alphanum_fraction": 0.5982507467269897, "avg_line_length": 36.69230651855469, "blob_id": "3fc3e0aa53291f1a2ffb6ce5ebbf14e9e302b444", "content_id": "c5aa58f2fac8e5625f23980c5069ae2194c01641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3430, "license_type": "no_license", "max_line_length": 107, "num_lines": 91, "path": "/controller/data_radar_controller.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import numpy as np\nimport utils_v1.butterworth_filter as btwf\nimport controller.data_ref_controller as data_ref_controller\n\n\nclass RadarDataController:\n def __init__(self, radar_data, fs_radar, ref_data, fs_ref, mtime):\n \"\"\"\n Handle Data taken from radar\n :param radar_data:\n :param fs_radar: in Hz\n :param ref_data:\n :param fs_ref: in Hz\n :param mtime: in seconds\n \"\"\"\n self.radar_data = radar_data\n self.ref_data = ref_data\n self.fs_radar = fs_radar\n self.fs_ref = fs_ref\n self.mtime = mtime\n\n def filted_hr_from_radar_data(self):\n filtered_data = btwf.butter_bandpass_filter(data=self.radar_data, order=5)\n return filtered_data\n\n def get_window_arr(self):\n return\n\n def create_window_arr(self):\n ref_data_controller = data_ref_controller.RefDataController(self.ref_data, self.fs_ref, self.mtime)\n window_arr_ref = ref_data_controller.create_window_arr(100, 360)\n window_arr_radar = np.zeros(len(self.radar_data))\n for i, v in enumerate(window_arr_ref):\n index_window_arr_radar = round(i * self.fs_radar / self.fs_ref)\n window_arr_radar[index_window_arr_radar] = v\n return window_arr_radar\n\n def get_center_window(self, center_from_ref_data=[]):\n rs = map(lambda x: round(x * self.fs_radar / self.fs_ref),\n center_from_ref_data)\n return rs\n\n def mark_label(self, window_size=360):\n # wrong=np.zeros(len())\n raw_data = self.radar_data\n peaks_radar = btwf.find_hr(raw_data)\n filtered_data = btwf.butter_bandpass_filter(raw_data)\n\n window_arr = self.create_window_arr()\n\n list_upper_window_correct = []\n list_lower_window_correct = []\n\n list_upper_window_wrong = []\n list_lower_window_wrong = []\n\n for i, v in enumerate(peaks_radar):\n upper_window = int(v + window_size / 2)\n lower_window = int(v - window_size / 2)\n if lower_window > 0 and upper_window<=len(self.radar_data):\n if window_arr[v] == 1:\n list_lower_window_correct.append(lower_window)\n list_upper_window_correct.append(upper_window)\n elif window_arr[v] == 0:\n list_lower_window_wrong.append(lower_window)\n list_upper_window_wrong.append(upper_window)\n\n number_wrong_data = len(list_lower_window_wrong)\n number_correct_data = len(list_lower_window_correct)\n\n wrong_data = np.empty([number_wrong_data, window_size])\n correct_data = np.empty([number_correct_data, window_size])\n\n for i in range(0, number_correct_data):\n upper_win_correct = list_upper_window_correct[i]\n lower_win_correct = list_lower_window_correct[i]\n\n correct_data[i, :] = filtered_data[lower_win_correct:upper_win_correct]\n\n for i in range(0, number_wrong_data):\n upper_win_wrong = list_upper_window_wrong[i]\n lower_win_wrong = list_lower_window_wrong[i]\n wrong_data[i, :] = filtered_data[lower_win_wrong:upper_win_wrong]\n\n return {\n 'wrong_data': wrong_data,\n 'correct_data': correct_data\n }\n def save_window(self, window, window_size=360):\n window_data_matrix = np.zeros((len(window), self.window_size))\n pass\n" }, { "alpha_fraction": 0.6226158142089844, "alphanum_fraction": 0.6226158142089844, "avg_line_length": 24.310344696044922, "blob_id": "e2a6a83db361534e7383f08d4ba7019ba3d8cc9a", "content_id": "99c9687f61d6da925681628c1827d30136beb992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 734, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/utils_v1/file_utils/file_pandas.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import csv\ndef get_list_file(folder_path=\"\"):\n import os\n rs = []\n for root, _dir, _filenames in os.walk(folder_path):\n for filename in _filenames:\n rs.append(os.path.join(folder_path, filename))\n return rs\n\ndef read_file(file_path, column):\n import pandas as pd\n return pd.read_excel(file_path, index_col=column)\n\ndef read_csv_file(file_path):\n import pandas as pd\n return pd.read_csv(file_path)\n\ndef write_csv_file(file_path, row):\n with open(file_path, 'w', newline='') as f:\n writer=csv.writer(f)\n writer.writerow(row)\n f.close()\n\n\ndef create_file(file_path):\n import os\n if not os.path.isfile(file_path):\n f = open(file_path, mode='x')\n f.close()\n" }, { "alpha_fraction": 0.6896551847457886, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 14, "blob_id": "fcc2eccc470865001edb925f82556b0dda9fa44a", "content_id": "ab7fa197f2cd0ec9c6a4a6f2423ca4cf198f3292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/model/signal_nn.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "class SignalModel():\n pass" }, { "alpha_fraction": 0.6627491116523743, "alphanum_fraction": 0.6852325201034546, "avg_line_length": 38.08000183105469, "blob_id": "6c9726c8e19cda99269d0316e0cdd8d042e6b0e6", "content_id": "dce22ee0f6c0bd47e37a242864029d89faa59de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1957, "license_type": "no_license", "max_line_length": 116, "num_lines": 50, "path": "/test/radar_gen.test.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import plotly.express as px\nfrom controller.radar_gen import RadarGenV2\nfrom utils_v1 import butterworth_filter as btwf\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nimport utils_v1.file_utils.file_pandas as fd\n# peaks = btwf.find_hr(data=signal_hb_res, fs=radar_gen.fs, order=3, distance=50, prominence=1)\n# print(len(peaks)*3)\n# time_arr_resp = np.arange(0, len(signal_hb_res), 1)\n\n# fig3 = px.line(x=time_arr_resp, y=radar_gen.location_target, title=\"test\")\n# fig3.add_scatter(x=time_arr_resp, y=radar_gen.hb_val, mode='lines')\n# fig3.add_scatter(x=time_arr_resp, y=radar_gen.res_val, mode='lines')\n# fig3.show()\n#\n# fig = px.line(x=time_arr_resp, y=filtered_signal, title=\"test\")\n# fig.add_scatter(x=peaks, y=filtered_signal[peaks], mode='markers')\n# fig.show()\n#\n# fig2 = px.line(x=time_arr_resp, y=signal_hb_res, title=\"test\")\n# fig2.add_scatter(x=time_arr_resp, y=hb_signal, mode='lines')\n# fig2.show()\n\nimport utils_v1.file_utils.file_pandas as fp\n\nfile_path = '../data/recorded_data_gen/data.csv'\n# fp.write_csv_file(file_path, ['value', 'label'])\n\nfre_hb_gen = 50\n\n#chay tu 50 - 140\nfor i in range(90):\n # label=np.zeros(90)\n # label[i]=1\n radar_gen = RadarGenV2(fs=100, mtime=20)\n data_list=[]\n quantity_signal_per_label=50\n for j in range(quantity_signal_per_label):\n signal_hb_res = radar_gen.i_signal(fhb=fre_hb_gen, te=2, ti=2, snr=0)\n filtered_signal = btwf.butter_bandpass_filter(data=signal_hb_res, fs=radar_gen.fs, order=3)\n filtered_signal = filtered_signal / np.max(np.abs(filtered_signal))\n labeled_filtered_signal=filtered_signal\n data_list.append(labeled_filtered_signal)\n data_frame = DataFrame(data_list)\n path_laptop = 'D:\\\\lab_project\\\\hr_processing\\\\data\\\\recorded_data_gen\\\\data{0}.csv'.format(\"_\"+str(fre_hb_gen))\n path_lab = ''\n fd.create_file(path_laptop)\n data_frame.to_csv(path_laptop, mode='w', header=True)\n fre_hb_gen += 1\n\n\n\n" }, { "alpha_fraction": 0.5823640823364258, "alphanum_fraction": 0.6031917929649353, "avg_line_length": 28.814516067504883, "blob_id": "e929528dfbf9a79414231a07423b414369a4b690", "content_id": "ec91a900ff02fe7b7412b01b2e51cf7271e7bfba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3697, "license_type": "no_license", "max_line_length": 103, "num_lines": 124, "path": "/fourier_transform_radar.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import pathlib\n\nfrom scipy.fft import fft, fftfreq, ifft\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport controller.signal_generate as sng\nimport plotly.express as px\nimport os\nimport pathlib as pl\n\ndef read_txt_data(file_path='/home/nvh/lab_project/hr_processing/data/radar/data_test_1626256898.csv'):\n with open(file_path, 'r') as f:\n df = pd.read_csv(f).values\n df = df[:, 0]\n f.close()\n return df\n\n\ndef fourier_transform(all, start=0, stop=10):\n data = read_txt_data()\n if not all:\n data = data[start:stop]\n data_f = fft(data)\n return data_f\n\n\n\ndef test_signal_render():\n #============noise in hr band========\n am_noise_in_hr_band = 0.006\n ran_hr_freq = 0.83 + np.random.rand() * (2.33 - 0.83)\n ran_rr_freq = 0.167 + np.random.rand() * (0.367 - 0.167)\n\n radar_gen = sng.RadarGen(ran_hr_freq, ran_rr_freq)\n am_arr, freq_arr = radar_gen.gen_noise_in_hr_band(am_noise_in_hr_band)\n # fig = px.scatter(x=np.arange(0, len(freq_arr), 1), y=freq_arr)\n # fig.show()\n\n #=========gen movement signal========\n i_signal=radar_gen.radar_data_gen()\n fs = 100\n mtime=np.arange(0, len(i_signal)/fs, 1/fs)\n\n i_signal_fig=px.line(x=mtime, y=i_signal, title='i signal')\n i_signal_fig.add_scatter(x=mtime, y=radar_gen.lung_signal)\n i_signal_fig.add_scatter(x=mtime, y=radar_gen.heart_signal)\n # i_signal_fig.add_scatter(x=mtime, y=radar_gen.noise[0])\n print(radar_gen.noise)\n i_signal_fig.show()\n\n #fourier transform\n data_f = fft(i_signal)\n xf = fftfreq(len(data_f), 1 / fs)\n size_data=len(data_f)\n freq_spec = px.line(x=xf, y=2.0/size_data*np.abs(data_f), title='frequency domain')\n\n freq_spec.show()\n\n\n\n\n\ndef analyse_real_data(index):\n parent_folder=pathlib.Path(__file__).parent\n parent_folder = str(parent_folder)+\"/data/radar\"\n list_path=[]\n for root, _, filenames in os.walk(parent_folder):\n for f in filenames:\n file_path=root + '/' + f\n list_path.append(file_path)\n i_path=index\n get_file = list_path[i_path]\n\n #==========================analyse real data =====================\n data = read_txt_data(file_path=get_file)\n fs=100\n time=np.arange(0, len(data)/fs, 1/fs)\n fig = px.line(x=time, y=data, title='radar data')\n fig.show()\n\n #==================analyse frequency spectrum============\n data_f=fft(data)\n len_data=len(data_f)\n xf=fftfreq(len(data), 1/fs)\n fig2=px.line(x=xf, y=2/len_data*np.abs(data_f), title='frequency domain')\n fig2.show()\n\ndef analyse_test_gened_data():\n import utils_v1.file_utils.file_pandas as read_file_pandas\n parent_folder = \"E:\\\\lab_project\\\\hr_processing\\\\data\\\\data_gen\"\n freq_file = parent_folder + \"\\\\\" + \"\\\\frequency.csv\"\n data_folder = parent_folder + \"\\\\\" + \"\\\\data\"\n list_file = read_file_pandas.get_list_file(data_folder)\n\n # ======read frequency====\n freq_list = read_file_pandas.read_csv_file(freq_file)\n freq_list = freq_list.freq[0:9]\n\n chosen_file = list_file[2]\n data = read_file_pandas.read_csv_file(chosen_file)\n data = data.data\n print(data)\n time = np.arange(0, 20, 0.01)\n fig = px.line(x=time, y=data, title='test')\n fig.show()\n\n\n #==================analyse frequency spectrum============\n data = np.asarray(data)\n data_f = fft(data)\n len_data = len(data_f)\n xf = fftfreq(len(data), 1 / 100)\n fig2 = px.line(x=xf, y=2 / len_data * np.abs(data_f), title='frequency domain')\n fig2.show()\n\n\nif __name__ == '__main__':\n index_file=1\n\n analyse_real_data(index_file)\n analyse_test_gened_data()\n # test_signal_render()\n" }, { "alpha_fraction": 0.5357142686843872, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 28, "blob_id": "e01526ee0e7829756726063e40a2556ce43e7c46", "content_id": "452832cc05d01d6405aa80cba67a289b1d4cadad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/utils_v1/file_utils/__init__.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "__all__ = [\"file_pandas.py\"]" }, { "alpha_fraction": 0.6853932738304138, "alphanum_fraction": 0.6853932738304138, "avg_line_length": 27.66666603088379, "blob_id": "fcd355da3f17c03a1c95ad8fd08a9c46f44db918", "content_id": "4d11bc9ccb9842f08acf5dc3623c0116fac1b5d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/utils_v1/config_path/project.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "def get_root_file(file):\r\n import pathlib\r\n return str(pathlib.Path(file).parent)\r\n" }, { "alpha_fraction": 0.5611921548843384, "alphanum_fraction": 0.5967025756835938, "avg_line_length": 24.852458953857422, "blob_id": "9791474b9f97a193f1fe191b552840c1bc98bd8b", "content_id": "41913839871242283b799c05af0acceb375d8fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1577, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/demolstm.py", "repo_name": "zonyzhao/HRReference", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\n\n# import plotly.express as px\n\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\n\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i:T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\n\nbatch_size, n_train = 16, 600\n# only the first 'n_train' examples are used for training\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\n\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform(m.weight)\n\n\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1))\n net.apply(init_weights)\n return net\n\n\nloss = nn.MSELoss()\n\n\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n print(f'epoch {epoch + 1}, '\n f'loss: {d2l.evaluate_loss(net, train_iter, loss):f}')\n\n\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\n\nonestep_preds = net(features)\nd2l.plot(\n [time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x',\n legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)\n)\n# print(1)\n# fig = px.line(x=time, y=x.detach().numpy())\n# fig.show()\n# print(2)\n" } ]
19
rongyux/FilterComments
https://github.com/rongyux/FilterComments
e5f9cc34882471301c5a9c4dd222c7b785b7c763
b80976f099adb36f64ab1a07d34dd1b0a34f7928
ac175a744e120cba27430a32aa8f01e2ec11a7fd
refs/heads/master
2020-04-10T16:55:11.904504
2016-10-16T13:47:04
2016-10-16T13:47:04
41,923,352
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8012422323226929, "alphanum_fraction": 0.8012422323226929, "avg_line_length": 79.5, "blob_id": "cb519d28d9b14a1b68855486400e734dfad85689", "content_id": "4b4415c929090cb357e7d553902d43f78eed51cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 81, "num_lines": 2, "path": "/README.md", "repo_name": "rongyux/FilterComments", "src_encoding": "UTF-8", "text": "The function is to get the comments from a bar website through a pytho spider.\nAnd then the machine learning models of bayes is trained by the data of comments.\n" }, { "alpha_fraction": 0.5971564054489136, "alphanum_fraction": 0.6211916208267212, "avg_line_length": 32.348838806152344, "blob_id": "0fc1bfcdf2db5984afe5505f84126b8321403481", "content_id": "492bcd25ff4ea3547ba54fd58f633c2006c2d58b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2954, "license_type": "no_license", "max_line_length": 74, "num_lines": 86, "path": "/bayes.py", "repo_name": "rongyux/FilterComments", "src_encoding": "UTF-8", "text": "\r\nfrom numpy import *\r\n\r\n\r\ndef loadDataSet():\r\n fd = open('comments.txt')\r\n postingList = []\r\n classVec = []\r\n for line in fd.readlines():\r\n \ttmp = line.split()\r\n \tpostingList.append(tmp[1:])\r\n \tclassVec.append(int(tmp[0]))\r\n return postingList,classVec\r\n\r\ndef createVocabList(dataSet):\r\n vocabSet = set([]) #create empty set\r\n for document in dataSet:\r\n vocabSet = vocabSet | set(document) #union of the two sets\r\n return list(vocabSet)\r\n\r\ndef setOfWords2Vec(vocabList, inputSet):\r\n returnVec = [0]*len(vocabList)\r\n for word in inputSet:\r\n if word in vocabList:\r\n returnVec[vocabList.index(word)] = 1\r\n else: print \"the word: %s is not in my Vocabulary!\" % word\r\n return returnVec\r\n\r\ndef trainNB0(trainMatrix,trainCategory):\r\n #calculate the prior probablity p(ci) and condition probablity p(w/ci)\r\n numTrainDocs = len(trainMatrix)\r\n numWords = len(trainMatrix[0])\r\n pAbusive = sum(trainCategory)/float(numTrainDocs)\r\n p0Num = ones(numWords); p1Num = ones(numWords) #change to ones() \r\n p0Denom = 2.0; p1Denom = 2.0 #change to 2.0\r\n for i in range(numTrainDocs):\r\n \t# print trainCategory[i]\r\n if trainCategory[i] == 1:\r\n \t\r\n p1Num += trainMatrix[i]\r\n p1Denom += sum(trainMatrix[i])\r\n else:\r\n \t\r\n p0Num += trainMatrix[i]\r\n p0Denom += sum(trainMatrix[i])\r\n p1Vect = log(p1Num/p1Denom) #change to log()\r\n p0Vect = log(p0Num/p0Denom) #change to log()\r\n\r\n return p0Vect,p1Vect,pAbusive\r\n\r\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\r\n # print log(pClass1),log(1.0 - pClass1)\r\n # print sum(vec2Classify * p1Vec),sum(vec2Classify * p0Vec)\r\n p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult\r\n p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)\r\n # print p0,p1\r\n if p1 > p0:\r\n return 1\r\n else: \r\n return 0\r\n \r\ndef bagOfWords2VecMN(vocabList, inputSet):\r\n returnVec = [0]*len(vocabList)\r\n for word in inputSet:\r\n if word in vocabList:\r\n returnVec[vocabList.index(word)] += 1\r\n return returnVec\r\n\r\ndef testingNB():\r\n listOPosts,listClasses = loadDataSet()\r\n myVocabList = createVocabList(listOPosts)\r\n # print myVocabList\r\n trainMat=[]\r\n for postinDoc in listOPosts:\r\n trainMat.append(setOfWords2Vec(myVocabList, postinDoc))\r\n # print trainMat\r\n p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))\r\n testEntry = ['friends', 'wish', 'classes']\r\n thisDoc = array(setOfWords2Vec(myVocabList, testEntry))\r\n # print thisDoc\r\n # print myVocabList\r\n print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)\r\n testEntry = ['stupid', 'garbage']\r\n thisDoc = array(setOfWords2Vec(myVocabList, testEntry))\r\n print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)\r\n\r\ntestingNB()" }, { "alpha_fraction": 0.5624227523803711, "alphanum_fraction": 0.6996291875839233, "avg_line_length": 36.57143020629883, "blob_id": "f2920a79e576a798c1c910e057043e7a0dfefff8", "content_id": "f7aee4dc8fec260b4ef407365bb068d2c818cf7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1618, "license_type": "no_license", "max_line_length": 381, "num_lines": 42, "path": "/spider.py", "repo_name": "rongyux/FilterComments", "src_encoding": "UTF-8", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport json\r\nimport time\r\n\r\nheaders = {\r\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',\r\n'Accept-Encoding': 'gzip, deflate, br',\r\n'Connection': 'keep-alive',\r\n'Cookie': '__cfduid=d653bf931cbde10f9243b63e991f70dc41466778585; loid=a5WUnHRHlleKL9OSSR; loidcreated=2016-06-24T14%3A29%3A45.413Z; _recent_srs=t5_2qu49; _ga=GA1.2.54465388.1466778724; pc=ne; __utma=55650728.54465388.1466778724.1466778728.1466843492.2; __utmz=55650728.1466778728.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmb=55650728.0.10.1466843492; __utmc=55650728',\r\n'Host': 'www.reddit.com',\r\n'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',\r\n}\r\n# url = 'https://www.reddit.com/r/AskReddit/comments/4qfh55/what_are_some_of_the_best_life_tips/'\r\n# url = 'https://www.reddit.com/r/community/comments/2fchpz/what_ever_happened_to_the_deans_dalmatian_fetish/'\r\nurl = 'https://www.reddit.com/r/AskReddit/comments/4qfh01/what_are_some_classes_you_must_take_in/'\r\n\r\nr = requests.get(url,headers=headers)\r\nr.encoding = r.apparent_encoding\r\n# print r.text\r\n\r\n# div class=\"md\"\r\n\r\nsoup = BeautifulSoup(r.text)\r\nres = soup.select(\"div.md\")\r\ncomments = []\r\nfor item1 in res[1:]:\r\n comments.append(item1.contents)\r\nprint comments\r\n\r\nfd = open('comments.txt','w+')\r\n\r\np_soup = BeautifulSoup(str(comments))\r\nres2 = p_soup.findAll('p')\r\nfor item2 in res2:\r\n ct = str(item2.contents).encode('utf-8')\r\n print ct[3:-2]\r\n fd.write(ct[3:-2] + '\\n')\r\n\r\nfd.close()" } ]
3
cheery/ratsrose
https://github.com/cheery/ratsrose
037eea428b4117410bbafd55e1bef33a0eff7d33
3f843e73ce7475b656437fab819da404eb8442c3
d76c17eb2cb5609affd9d8296f2d8af6ecd10601
refs/heads/master
2020-04-06T07:01:00.723156
2017-04-11T00:12:02
2017-04-11T00:12:02
37,348,775
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5773392915725708, "alphanum_fraction": 0.5811585187911987, "avg_line_length": 33.15217208862305, "blob_id": "482face20e07d0b1890a1a58c61083adbc15d48a", "content_id": "c7aefc21118a3ea4a96b2eced746d452d8231b4e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1571, "license_type": "permissive", "max_line_length": 72, "num_lines": 46, "path": "/register_alloc.py", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "\"\"\"\n Select location for every virtual register.\n\"\"\"\nfrom structures import *\n\ndef alloc(block, registers):\n # At this point, virtual registers are replaced with real registers.\n colors = {}\n\n # Our graph contains both interferences and coalescing knowledge.\n graph = {}\n def get(vreg):\n if vreg in graph:\n return graph[vreg]\n graph[vreg] = result = (set(), set())\n return result\n\n active = set()\n for cell in reversed(block):\n cell.active = active = (active ^ cell.defs) | cell.uses\n for vreg in active:\n get(vreg)[0].update(active ^ {vreg})\n\n if isinstance(cell, Motion):\n get(cell.src)[1].add(cell.dst)\n get(cell.dst)[1].add(cell.src)\n # Going through steps in chaitin-briggs algorithm\n stack = []\n # First simplification...\n while len(graph) > 0:\n for vreg, (interfere, coalesce) in graph.items():\n if len(interfere) < len(registers):\n for other in interfere:\n graph[other][0].discard(vreg)\n stack.append((vreg, graph.pop(vreg)))\n break\n else:\n # The code compiled doesn't cause this situation yet.\n assert False, \"XXX: the next part of coloring\"\n # Then an attempt to color, no failure recovery yet.\n while len(stack) > 0:\n vreg, (interfere, coalesce) = stack.pop()\n filled = set(colors[v] for v in interfere if v in colors)\n avail = registers ^ filled\n colors[vreg] = avail.pop()\n return colors\n" }, { "alpha_fraction": 0.815330982208252, "alphanum_fraction": 0.815330982208252, "avg_line_length": 56.400001525878906, "blob_id": "c43424d5d2403b7897288d31facd0d3e67aa8620", "content_id": "c27d88d4b2b29ac4629afb6aae0f339fa0caa267", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 287, "license_type": "permissive", "max_line_length": 149, "num_lines": 5, "path": "/dev/stack_reservation.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Stack reservation and allocation to allow spilling\n\nRather than implicitly reserving the location for register spilling, I'd prefer if it came from a code object that can do the reservation explicitly.\n\nThe spilling is allowed to fail if there is no reservation for spilled registers.\n" }, { "alpha_fraction": 0.7881944179534912, "alphanum_fraction": 0.7881944179534912, "avg_line_length": 95, "blob_id": "f99691c3d77b39d223d4a61dcbcff3848eb5216f", "content_id": "13f698f45c29a97b2cbf2193d0449f1e6b07ba18", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 288, "license_type": "permissive", "max_line_length": 189, "num_lines": 3, "path": "/dev/x86_bios_instruction_tilings.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# More instruction tilings and a simple program that is able to run in place of MenuetOS kernel.\n\nIn realmode, the program needs to use services of BIOS to print on screen. I think it would be useful to implement just few ones, so we'd have some programs that would really run somewhere.\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 50.66666793823242, "blob_id": "e98e24a7a9a62dbd063b241e112c58561aad695d", "content_id": "9ce8236b2b8a5c20dbd48e23ac308a33da9818aa", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 155, "license_type": "permissive", "max_line_length": 109, "num_lines": 3, "path": "/dev/target_considerations.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Considerations for other target machines.\n\nSince it's a parametric and retargetable compiler, it could be fun to compile to ARM or MIPSEL architectures.\n" }, { "alpha_fraction": 0.7693014740943909, "alphanum_fraction": 0.7729779481887817, "avg_line_length": 59.44444274902344, "blob_id": "2f4acf1ff4f49826f4eac8b727b21294262b2574", "content_id": "b77a1b8ad6c915a8afb6f6b3d42ee36e67a229fe", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1088, "license_type": "permissive", "max_line_length": 343, "num_lines": 18, "path": "/README.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Rat's Rose Compiler\n\nIt is possible the project doesn't ever finish. The primary goal of the project is to create a compiler that can do 1:1 translation from the source code to target machine code, similar to what's being done in some principled fasm assembly projects. The goal is deemed complete when the compiler can compile code equivalent for MenuetOS kernel.\n\n## Getting Started\n\nRun the `scratch.py` in python 2.7 or newer. Check inside.\n\n## Contributing\n\nUse of this project will likely involve contributions. Here's how to do it:\n\n * Read the `dev/`, consider whether your contribution matches the projects nature.\n * Fork the project in github.\n * Write in the changes, add some project notes and insights into `dev/`, possibly move items into `dev/done/` if you think your work completes some task.\n * Do a pull request in github for `cheery/ratsrose`.\n\nIf you pass the quality requirements, that is the code remains in the quality where it was. Then you'll eventually see the changes in the main repository, otherwise I will tell what's wrong and you may or may not fix it.\n" }, { "alpha_fraction": 0.8231707215309143, "alphanum_fraction": 0.8231707215309143, "avg_line_length": 53.66666793823242, "blob_id": "4f42ed24c27745ad2291ef1125c920cdc5171c58", "content_id": "0c7cc6a202ffe0e1b7d40125c8c3ed0427552304", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 164, "license_type": "permissive", "max_line_length": 112, "num_lines": 3, "path": "/dev/coalescing_and_spilling.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Coalescing and spilling into register allocator\n\nThe compiler doesn't do any spilling or coalescing during register allocation. Therefore the code is fairly bad.\n" }, { "alpha_fraction": 0.5496394634246826, "alphanum_fraction": 0.5551857948303223, "avg_line_length": 25.130434036254883, "blob_id": "9bdcee2fb556b50f133d698c9fb762076e6d8f8a", "content_id": "10419c6955ec98dc1cd99b55b84063a3e2b6b3af", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1803, "license_type": "permissive", "max_line_length": 71, "num_lines": 69, "path": "/structures.py", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "\"\"\"\n Intermediate representation for\n programs.\n\"\"\"\nclass Const(object):\n def __init__(self, value, klass=None):\n self.value = value\n self.klass = klass\n self.tile = None\n\n def postorder(self, prefix):\n prefix.append(self)\n return prefix\n\nclass Op(object):\n def __init__(self, name, operands):\n self.name = name\n self.operands = operands\n self.tile = None\n\n def __getitem__(self, index):\n return self.operands[index]\n\n def __len__(self):\n return len(self.operands)\n\n def postorder(self, prefix):\n for operand in self.operands:\n operand.postorder(prefix)\n prefix.append(self)\n return prefix\n\nclass VReg(object):\n next_uid = 1\n def __init__(self, assign=None, klass=None):\n self.assign = assign\n self.klass = klass\n self.uid = VReg.next_uid\n VReg.next_uid += 1\n\n def __repr__(self):\n return 'vreg{}'.format(self.uid)\n\nclass Code(object):\n def __init__(self, form, uses=None, defs=None):\n self.form = form\n self.uses = set() if uses is None else uses\n self.defs = set() if defs is None else defs\n\n def __str__(self):\n line = self.form[0].format(*self.form[1:])\n if len(self.defs) > 0:\n line = line.ljust(25) + format_vregs(\" def[{}]\", self.defs)\n if len(self.uses) > 0:\n line = line.ljust(40) + format_vregs(\" use[{}]\", self.uses)\n return line\n\nclass Motion(object):\n def __init__(self, dst, src):\n self.dst = dst\n self.src = src\n self.uses = {src}\n self.defs = {dst}\n\n def __str__(self):\n return \"{} = {}\".format(self.dst, self.src)\n\ndef format_vregs(form, vregs):\n return form.format(', '.join(map(repr, vregs)))\n" }, { "alpha_fraction": 0.8296943306922913, "alphanum_fraction": 0.8296943306922913, "avg_line_length": 75.33333587646484, "blob_id": "ef372eb90c1d371f38d1cb853c986bf7a640d205", "content_id": "e572d2cea00195e1867093697dff6ab4fbc88936", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 229, "license_type": "permissive", "max_line_length": 168, "num_lines": 3, "path": "/dev/type_differentiation_patterns.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Type differentation into instruction selection patterns.\n\nCurrently there's no way to handle floating point operations, or to differentiate between pointers and other kind of integers. It will likely affect instruction tiling.\n" }, { "alpha_fraction": 0.7672811150550842, "alphanum_fraction": 0.7672811150550842, "avg_line_length": 61, "blob_id": "70562d5484e77b513ed3098a48b6f6d88fc72f72", "content_id": "209daaf2afe5d6cc551cf98f22abac6bd60d11c4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 434, "license_type": "permissive", "max_line_length": 239, "num_lines": 7, "path": "/dev/done/name_and_repository.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Name and Repository (and license, and readme)\n\nI decided to give it a name after a locution: \"I don't give a rat's rosy rumpus what\"... Derived to \"Rat's rose\", as it describes the severity and attitude of the project very well. This is a project that was abandoned when it was created.\n\nRevised BSD license, for similar reasons as stated above.\n\nThe README will contain simple instructions for how to start, and how to contribute.\n" }, { "alpha_fraction": 0.8115941882133484, "alphanum_fraction": 0.8115941882133484, "avg_line_length": 68, "blob_id": "3b588fb5aeb24404e58c607cc46c324a39d12c53", "content_id": "88608d283f8ddf29721fe5d512f6294c475a6866", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 207, "license_type": "permissive", "max_line_length": 143, "num_lines": 3, "path": "/dev/control_flow.md", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "# Control flow graphs, possibly rest of the NOLTIS algorithm.\n\nAfter some linear programs can be successfully compiled, it would make not sense to not get on compiling programs with structured control flow.\n" }, { "alpha_fraction": 0.5856102108955383, "alphanum_fraction": 0.6001821756362915, "avg_line_length": 28.675676345825195, "blob_id": "f59b8a277e1505b30dfa1ddb26035d8f52bfa9f4", "content_id": "bfafd5e5227d7ce41d26a291caf2291bd5b8e754", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1098, "license_type": "permissive", "max_line_length": 80, "num_lines": 37, "path": "/scratch.py", "repo_name": "cheery/ratsrose", "src_encoding": "UTF-8", "text": "import instruction_select\nimport register_alloc\nfrom structures import *\nfrom patterns import *\nfrom x86_realmode_tiles import tiles\n\ncode = Op('sub', [\n Op('add', [\n Op('sub', [Const(0), Const(6)]),\n Const(2)]),\n Op('add', [\n Op('sub', [Const(0), Const(6)]),\n Const(2)])])\n\nblock = []\ndst = instruction_select.select(code, tiles, block)\ncolors = register_alloc.alloc(block, {'ax', 'bx', 'cx', 'dx'})\n\n# Next map the colors while emitting the code.\ndef colormap(node):\n if isinstance(node, VReg):\n return colors[node]\n return node\n\nprint \"use16\"\nprint \"org 0x0\"\nfor cell in block:\n print '; {:<64} {}'.format(cell, format_vregs(\"active=[{}]\", cell.active))\n if isinstance(cell, Code):\n form = map(colormap, cell.form)\n print \" \" + form[0].format(*form[1:])\n elif isinstance(cell, Motion):\n # there's no coalescing, but this occassionally happens by fortune.\n if colors[cell.dst] != colors[cell.src]:\n print \" mov {}, {}\".format(colors[cell.dst], colors[cell.src])\n else:\n assert False\n" } ]
11
jaya/ethereum-paywall
https://github.com/jaya/ethereum-paywall
682a90f420f19a139909c23935f0ce800bd5aa9c
cd2cbe6c5813de8c5e12f8feddce64462d9ce8b4
daa1679e14a8289f92527c6e73bb2d589bff1f3e
refs/heads/master
2021-05-04T05:37:00.034415
2018-02-05T17:55:19
2018-02-05T17:55:19
120,341,762
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6744730472564697, "alphanum_fraction": 0.6920374631881714, "avg_line_length": 26.54838752746582, "blob_id": "41308416effb011c039a0aeb7473e8262ec57a1e", "content_id": "ceb596f968d58f7f4292423952b772cae3402653", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 854, "license_type": "no_license", "max_line_length": 74, "num_lines": 31, "path": "/deploy.py", "repo_name": "jaya/ethereum-paywall", "src_encoding": "UTF-8", "text": "import solc\nfrom web3 import Web3, HTTPProvider\nimport json\n\nsource_file = './contracts/Coin.sol'\nartifacts = solc.compile_files([source_file])\n\nprint(artifacts.keys())\n\ncoin_artifact = artifacts[source_file + ':' + 'Coin']\n\njson.dump(\n coin_artifact['abi'],\n open('web/public/contract_interface.json', 'w'), indent=2)\n\nweb3 = Web3(HTTPProvider('http://localhost:8545'))\n\nprint('Owner:', web3.eth.accounts[0])\n\nCoin = web3.eth.contract(abi=coin_artifact['abi'],\n bytecode=coin_artifact['bin'])\n\ntx_hash = Coin.deploy(transaction={'from': web3.eth.accounts[0] })\ntx_receipt = web3.eth.getTransactionReceipt(tx_hash)\n\nprint('\\nRECEIPT:')\nfor key, value in tx_receipt.items(): print('\\t', key, value)\n\ncontract_address = tx_receipt['contractAddress']\n\njson.dump(contract_address, open('web/public/contract_address.json', 'w'))\n" } ]
1
aoll/nm-otool
https://github.com/aoll/nm-otool
fea363ccd5402e30f62c208fd3bf48fe75251dec
56fb02564fa5bfbc9f8ad41a023be72abd6fdc81
fb1bf2a7189f8c9c3b611c119c0b1b5aeb7d0656
refs/heads/master
2021-08-22T06:17:43.131728
2017-11-29T13:37:59
2017-11-29T13:37:59
107,954,237
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3996407687664032, "alphanum_fraction": 0.41939830780029297, "avg_line_length": 33.796875, "blob_id": "460d93636213a5be30dedb447353cc791045a3de", "content_id": "b8a73e3253b5cafec84bf66740acc44f804510ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2227, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/indian/src/ft_infos_segment.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_infos_segment.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:21:39 by aollivie #+# #+# */\n/* Updated: 2017/11/17 16:49:10 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tloop_segment(\n\tstruct load_command *lc, int *index, t_seg_infos *seg_infos)\n{\n\tstruct segment_command\t*segment;\n\tstruct section\t\t\t*section;\n\tuint32_t\t\t\t\tloop;\n\n\tsegment = (struct segment_command *)lc;\n\tsection = (void *)segment + sizeof(*segment);\n\tloop = 0;\n\twhile (loop < segment->nsects)\n\t{\n\t\tif (ft_strcmp(section->sectname, SECT_TEXT) == 0)\n\t\t\tseg_infos->text_nsect = *index + 1;\n\t\telse if (ft_strcmp(section->sectname, SECT_DATA) == 0)\n\t\t\tseg_infos->data_nsect = *index + 1;\n\t\telse if (ft_strcmp(section->sectname, SECT_BSS) == 0)\n\t\t\tseg_infos->bss_nsect = *index + 1;\n\t\tsection = (void *)section + sizeof(*section);\n\t\tloop++;\n\t\t*index = *index + 1;\n\t}\n}\n\nt_seg_infos\t*ft_infos_segment(char *ptr, char *ptr_end,\n\tstruct mach_header *header, struct load_command *lc)\n{\n\tt_seg_infos\t\t\t\t\t*seg_infos;\n\tuint32_t\t\t\t\t\ti;\n\tint\t\t\t\t\t\t\tindex;\n\n\tif (!(seg_infos = malloc(sizeof(t_seg_infos))))\n\t\treturn (NULL);\n\t//useless ?\n\t// if ((void *)(lc = (void *)ptr + sizeof(*header)) > (void *)ptr_end)\n\t// \treturn (NULL);\n\tif (ft_check_load(lc, header->ncmds, header->sizeofcmds))\n\t\treturn (NULL);\n\tft_init_seg_infos(seg_infos);\n\ti = 0;\n\tindex = 0;\n\twhile (i < header->ncmds)\n\t{\n\t\tif (lc->cmd == LC_SEGMENT)\n\t\t\tloop_segment(lc, &index, seg_infos);\n\t\tlc = (void *)lc + swap_uint32(lc->cmdsize);\n\t\ti++;\n\t}\n\treturn (seg_infos);\n}\n" }, { "alpha_fraction": 0.3628988564014435, "alphanum_fraction": 0.3921038508415222, "avg_line_length": 36.73469543457031, "blob_id": "1ba8b9b6ce11e5d35638b0f31b29efd3b0042b3a", "content_id": "87c517361340064d8e455f856471b8c0bca431fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1849, "license_type": "no_license", "max_line_length": 80, "num_lines": 49, "path": "/src/handle_64_text.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle_64_text.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:46 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:49:46 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tprint_name(char *av)\n{\n\tif (av)\n\t{\n\t\twrite(1, av, ft_strlen(av));\n\t\twrite(1, \":\\n\", 2);\n\t}\n}\n\nint\t\t\thandle_64_text(char *ptr, char *ptr_end, char *av, int is_indian)\n{\n\tstruct mach_header_64\t\t*header;\n\tstruct section_64\t\t\t*section;\n\tt_ptr\t\t\t\t\t\tptr_infos;\n\n\tptr_infos.ptr = ptr;\n\tptr_infos.ptr_end = ptr_end;\n\tptr_infos.is_indian = is_indian;\n\tptr_infos.is_64 = 1;\n\tif ((void *)ptr + sizeof(struct mach_header_64) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\theader = (struct mach_header_64 *)ptr;\n\tif ((void *)ptr + swap_uint32_check(header->sizeofcmds, is_indian)\n\t> (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (!(section = ft_find_segment_section_64(\n\t\t&ptr_infos, is_indian, SEG_TEXT, SECT_TEXT)))\n\t\treturn (EXIT_FAILURE);\n\tprint_name(av);\n\tprint_text_text_section(\n\t\t(void*)ptr + swap_uint32_check(section->offset, is_indian)\n\t\t, section->addr,\n\t\tswap_uint32_check(section->size, is_indian), &ptr_infos);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.27327781915664673, "alphanum_fraction": 0.29901590943336487, "avg_line_length": 46.17856979370117, "blob_id": "e0b2c9f34eeed9b600cf0848514900440ea8b077", "content_id": "ae86712fac1aaa42ca654beaab5a1200ea63168b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1321, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/indian/src/sort_and_print_outpout.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* sort_and_print_outpout.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:24:35 by aollivie #+# #+# */\n/* Updated: 2017/11/17 16:48:57 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tsort_and_print_outpout(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos)\n{\n\tchar\t\t\t\t*stringtable;\n\tstruct nlist\t\t*array;\n\n\tif ((void *)(array = ptr + swap_uint32(sym->symoff)) > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)(stringtable = ptr + swap_uint32(sym->stroff)) > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\n\treturn (ft_sort(array, swap_uint32(sym->nsyms), stringtable, seg_infos));\n}\n" }, { "alpha_fraction": 0.28236183524131775, "alphanum_fraction": 0.3035579025745392, "avg_line_length": 46.17856979370117, "blob_id": "df82c6ea980b40e5999e1efb7f5e750926a467bb", "content_id": "fe0809060edc4ae488344755e7edb17382359ee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1321, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/indian/src/ft_find_segment_section.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_segment_section.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:59 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:33:15 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstruct section\t*ft_find_segment_section(char *ptr,\n\tstruct mach_header *header, char *segment_name, char *section_name)\n{\n\tstruct load_command\t\t\t*lc;\n\tstruct segment_command\t\t*seg;\n\tstruct section\t\t\t\t*section;\n\n\tlc = (void *)ptr + sizeof(*header);\n\tif (!(seg = ft_find_segment(lc, header->ncmds, segment_name)))\n\t\treturn (NULL);\n\tif (!(section = ft_find_section(seg, section_name)))\n\t\treturn (NULL);\n\treturn (section);\n}\n" }, { "alpha_fraction": 0.302752286195755, "alphanum_fraction": 0.33289647102355957, "avg_line_length": 40.24324417114258, "blob_id": "b6a5451cded3b9e7a3e34e699b8e6eba94364ec8", "content_id": "f3256b7072d7f25c802dc50fa4a29ec9bfbba5a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/indian/src/handle_64_text.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle_64_text.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:46 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:22:47 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\thandle_64_text(char *ptr, char *ptr_end, char *av)\n{\n\tstruct mach_header_64\t\t*header;\n\tstruct section_64\t\t\t*section;\n\n\tif ((void *)ptr + sizeof(struct mach_header_64) > (void *)ptr_end)\n\t{\n\t\treturn (EXIT_FAILURE);\n\t}\n\theader = (struct mach_header_64 *)ptr;\n\tif ((void *)ptr + header->sizeofcmds > (void *)ptr_end)\n\t{\n\t\treturn (EXIT_FAILURE);\n\t}\n\tif (!(section = ft_find_segment_section_64(\n\t\tptr, header, SEG_TEXT, SECT_TEXT)))\n\t\treturn (EXIT_FAILURE);\n\twrite(1, av, ft_strlen(av));\n\twrite(1, \":\\n\", 2);\n\tprint_text_text_section(\n\t\t(void*)ptr + section->offset, section->addr, section->size, 1);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.3865760266780853, "alphanum_fraction": 0.4052676260471344, "avg_line_length": 28.797468185424805, "blob_id": "2e631f83963c94d86908bcef1baf3c5f2e037f12", "content_id": "26125bda7e5ec63c56061ad23a6eb7c9d2a52a22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2354, "license_type": "no_license", "max_line_length": 80, "num_lines": 79, "path": "/indian/src/print_outpout_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* print_outpout_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:23:38 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:46:09 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tcase_n_sect(\n\tstruct nlist_64 *nlist, t_seg_infos *seg_infos, char *type)\n{\n\tif (nlist->n_sect == seg_infos->text_nsect)\n\t\t*type = 't';\n\telse if (nlist->n_sect == seg_infos->data_nsect)\n\t\t*type = 'd';\n\telse if (nlist->n_sect == seg_infos->bss_nsect)\n\t\t*type = 'b';\n\telse\n\t\t*type = 's';\n\tif (nlist->n_sect == seg_infos->bss_nsect)\n\t{\n\t\t*type = 'b';\n\t}\n}\n\nstatic void\tcase_n_undef(struct nlist_64 *nlist, char *type)\n{\n\t*type = 'u';\n\tif (nlist->n_value != 0)\n\t\t*type = 'c';\n}\n\nstatic int\tset_type(struct nlist_64 *nlist, t_seg_infos *seg_infos)\n{\n\tchar\t\t\t\ttype;\n\tint\t\t\t\t\tc;\n\n\ttype = '?';\n\tc = nlist->n_type & N_TYPE;\n\tif (c == N_UNDF)\n\t\tcase_n_undef(nlist, &type);\n\telse if (c == N_ABS)\n\t\ttype = 'a';\n\telse if (c == N_SECT)\n\t\tcase_n_sect(nlist, seg_infos, &type);\n\telse if (c == N_PBUD)\n\t\ttype = 'u';\n\telse if (c == N_INDR)\n\t\ttype = 'i';\n\treturn (type);\n}\n\nint\t\t\tprint_outpout_64(\n\tstruct nlist_64 *nlist, char *stringtable,\n\tt_seg_infos *seg_infos, t_cmd_flag *cmd_f)\n{\n\tchar\t\t\t\ttype;\n\n\tif ((nlist->n_type & N_STAB) != 0)\n\t\treturn (EXIT_SUCCESS);\n\ttype = set_type(nlist, seg_infos);\n\tif (cmd_f->u && type != 'u')\n\t\treturn (EXIT_SUCCESS);\n\tif (cmd_f->uu && type == 'u')\n\t\treturn (EXIT_SUCCESS);\n\tif (cmd_f->g && !(nlist->n_type & N_EXT))\n\t\treturn (EXIT_SUCCESS);\n\tif ((nlist->n_type & N_EXT) && type != '?')\n\t\ttype = ft_toupper(type);\n\tprint_outpout_format_64(\n\t\tnlist, type, stringtable + nlist->n_un.n_strx, cmd_f);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.3946774899959564, "alphanum_fraction": 0.4217410981655121, "avg_line_length": 34.19047546386719, "blob_id": "e4c3fa7bf66ef9a30d01265f779564d2d7eddceb", "content_id": "7840d0f7e52a00d0108db90152a1cb658a6d8f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2217, "license_type": "no_license", "max_line_length": 80, "num_lines": 63, "path": "/indian/src/ft_infos_segment_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_infos_segment_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:21:30 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:25:16 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tloop_segment64(\n\tstruct load_command *lc, int *index, t_seg_infos *seg_infos)\n{\n\tstruct segment_command_64\t*segment;\n\tstruct section_64\t\t\t*section;\n\tuint32_t\t\t\t\t\tloop;\n\n\tsegment = (struct segment_command_64*)lc;\n\tsection = (void *)segment + sizeof(*segment);\n\tloop = 0;\n\twhile (loop < segment->nsects)\n\t{\n\t\tif (ft_strcmp(section->sectname, SECT_TEXT) == 0)\n\t\t\tseg_infos->text_nsect = *index + 1;\n\t\telse if (ft_strcmp(section->sectname, SECT_DATA) == 0)\n\t\t\tseg_infos->data_nsect = *index + 1;\n\t\telse if (ft_strcmp(section->sectname, SECT_BSS) == 0)\n\t\t\tseg_infos->bss_nsect = *index + 1;\n\t\tsection = (void *)section + sizeof(*section);\n\t\tloop++;\n\t\t*index = *index + 1;\n\t}\n}\n\nt_seg_infos\t*ft_infos_segment_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, struct load_command *lc)\n{\n\tt_seg_infos\t\t\t\t\t*seg_infos;\n\tuint32_t\t\t\t\t\ti;\n\tint\t\t\t\t\t\t\tindex;\n\n\tif (!(seg_infos = malloc(sizeof(t_seg_infos))))\n\t\treturn (NULL);\n\tft_init_seg_infos(seg_infos);\n\tif ((void *)(lc = (void *)ptr + sizeof(*header)) > (void *)ptr_end)\n\t\treturn (NULL);\n\tif (ft_check_load(lc, header->ncmds, header->sizeofcmds))\n\t\treturn (NULL);\n\ti = 0;\n\tindex = 0;\n\twhile (i < header->ncmds)\n\t{\n\t\tif (lc->cmd == LC_SEGMENT_64)\n\t\t\tloop_segment64(lc, &index, seg_infos);\n\t\tlc = (void *)lc + lc->cmdsize;\n\t\ti++;\n\t}\n\treturn (seg_infos);\n}\n" }, { "alpha_fraction": 0.261428564786911, "alphanum_fraction": 0.2857142984867096, "avg_line_length": 34, "blob_id": "b8e3110b618b99e3faaaeb55693288e251c87987", "content_id": "8bd6451d7016a3fd227aaf07b8ea3e21a86670f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 80, "num_lines": 40, "path": "/indian/src/ft_check_load.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_check_load.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:19:38 by aollivie #+# #+# */\n/* Updated: 2017/11/17 15:52:26 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tft_check_load(struct load_command *lc, int ncmds, int sizeofcmds)\n{\n\tint\t\t\t\t\t\t\ti;\n\tint\t\t\t\t\t\t\tsize;\n\n\ti = 0;\n\tsize = 0;\n\twhile (i < ncmds)\n\t{\n\t\tprintf(\"i : %d\\n\", i);\n\t\tif (lc->cmdsize < MIN_LOAD_SIZE)\n\t\t{\n\t\t\tft_putstr_fd(ERROR_LOAD_MIN_SIZE, STDERR);\n\t\t\treturn (EXIT_FAILURE);\n\t\t}\n\t\tsize += swap_uint32(lc->cmdsize);\n\t\ti++;\n\t\tlc = (void *)lc + swap_uint32(lc->cmdsize);\n\t}\n\tif (size != sizeofcmds)\n\t{\n\t\tft_putstr_fd(ERROR_LOAD_SIZE, STDERR);\n\t\treturn (EXIT_FAILURE);\n\t}\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.5340086817741394, "alphanum_fraction": 0.5412445664405823, "avg_line_length": 22.827587127685547, "blob_id": "d43157f70f23544aa74c9a406fe8dd51e51645a5", "content_id": "3e2ffb4623f6e4d8a5f06863d5308dd5ed9644ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 81, "num_lines": 29, "path": "/indian/norme.py", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "import re\n\ndef write_proto(src_file):\n\tm = re.findall('^[a-z].*',src_file.read(), re.MULTILINE)\n\tdest_h_file = open('ft_otool.test', 'w')\n\tfor proto in m:\n\t\tdest_h_file.write(proto + ';\\n')\n\ndef write_functions(src_file):\n\n\ton = False\n\tfor line in src_file:\n\t\tm = re.search('^[a-z].*', line)\n\t\tif m:\n\t\t\t# print(m.group(0))\n\t\t\tt = m.group(0).split('(')[0].replace('*', '\\t').replace(' ', '\\t').split('\\t')\n\t\t\t# print(t[len(t) - 1])\n\t\t\tf = open('src/'+t[len(t) - 1]+'.c', 'w')\n\t\t\ton = True\n\t\tif on == True:\n\t\t\tf.write(line)\n\t\tif line == '}':\n\t\t\ton = False\n\nif __name__ == '__main__':\n\tsrc_file = open('main.c', 'r')\n\t''' write prototype '''\n\t# write_proto(src_file)\n\twrite_functions(src_file)\n" }, { "alpha_fraction": 0.26144298911094666, "alphanum_fraction": 0.2893716096878052, "avg_line_length": 46.74074172973633, "blob_id": "70d606184cf22379da56684596eb5aeaad5ffee3", "content_id": "655f32f612a62674c29fe35e5687bff0ed1a03cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1289, "license_type": "no_license", "max_line_length": 80, "num_lines": 27, "path": "/indian/src/sort_and_print_outpout_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* sort_and_print_outpout_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:24:26 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:24:28 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tsort_and_print_outpout_64(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos)\n{\n\tchar\t\t\t\t*stringtable;\n\tstruct nlist_64\t\t*array;\n\n\tif ((void *)(array = ptr + sym->symoff) > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)(stringtable = ptr + sym->stroff) > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\treturn (ft_sort64(array, sym->nsyms, stringtable, seg_infos));\n}\n" }, { "alpha_fraction": 0.27799227833747864, "alphanum_fraction": 0.31274130940437317, "avg_line_length": 32.0638313293457, "blob_id": "9cc9a26f2b6393ac9bafe90a80f317a57be0d850", "content_id": "3131854263021dacc379a189f82a1f0cfc9b0e8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1554, "license_type": "no_license", "max_line_length": 80, "num_lines": 47, "path": "/indian/src/ft_copy_nlist64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_copy_nlist64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:19:56 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:31:20 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\t\t\tft_set_nlist64(\n\tstruct nlist_64 *l, struct nlist_64 *array, int i)\n{\n\tl->n_type = array[i].n_type;\n\tl->n_sect = array[i].n_sect;\n\tl->n_value = array[i].n_value;\n\tl->n_un.n_strx = array[i].n_un.n_strx;\n}\n\nstruct nlist_64\t\t**ft_copy_nlist64(struct nlist_64 *array, int nsyms)\n{\n\tstruct nlist_64\t**list;\n\tstruct nlist_64\t*l;\n\tint\t\t\t\ti;\n\n\tif (!(list = malloc(sizeof(struct nlist_64 *) * nsyms)))\n\t\treturn (NULL);\n\ti = 0;\n\twhile (i < nsyms)\n\t{\n\t\tif (!(l = malloc(sizeof(struct nlist_64))))\n\t\t{\n\t\t\twhile (--i >= 0)\n\t\t\t\tfree(list[i]);\n\t\t\tfree(list);\n\t\t\treturn (NULL);\n\t\t}\n\t\tft_set_nlist64(l, array, i);\n\t\tlist[i] = l;\n\t\ti++;\n\t}\n\treturn (list);\n}\n" }, { "alpha_fraction": 0.41010987758636475, "alphanum_fraction": 0.4281318783760071, "avg_line_length": 36.295082092285156, "blob_id": "772cea82c4c342a56084b9a357559fb810418ee9", "content_id": "f0da5816d3fd13b1ce7d1cf2301e090714eb7f51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2275, "license_type": "no_license", "max_line_length": 80, "num_lines": 61, "path": "/src/ft_otool.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_otool.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:21:53 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:22:41 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\t\t\tft_loop_error(char *av, int is_otool)\n{\n\tft_putstr_fd(av, is_otool ? STDOUT : STDERR);\n\tft_putstr_fd(\": \", is_otool ? STDOUT : STDERR);\n\tft_putstr_fd(ERROR_FORMAT_FILE, is_otool ? STDOUT : STDERR);\n\treturn (is_otool ? EXIT_SUCCESS : EXIT_FAILURE);\n}\n\nstatic unsigned int\tset_magic_number(\n\tunsigned int magic_number, t_cmd_flag *cmd_f)\n{\n\tunsigned int\t\ttmp;\n\n\ttmp = swap_uint32(magic_number);\n\tif (tmp == MH_MAGIC || tmp == MH_MAGIC_64)\n\t{\n\t\tcmd_f->is_indian = 1;\n\t\treturn (tmp);\n\t}\n\tcmd_f->is_indian = 0;\n\treturn (magic_number);\n}\n\nint\t\t\t\t\tft_otool(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tunsigned int magic_number;\n\n\tif (ptr >= ptr_end || (size_t)ptr_end - (size_t)ptr < 8)\n\t\treturn (ft_loop_error(av, cmd_f->is_otool));\n\tmagic_number = set_magic_number(*(int *)ptr, cmd_f);\n\tif (magic_number == MH_MAGIC)\n\t\tif (cmd_f->is_otool)\n\t\t\treturn (handle_text(ptr, ptr_end, av, cmd_f->is_indian));\n\t\telse\n\t\t\treturn (handle(ptr, ptr_end, cmd_f));\n\telse if (magic_number == MH_MAGIC_64)\n\t\tif (cmd_f->is_otool)\n\t\t\treturn (handle_64_text(ptr, ptr_end, av, cmd_f->is_indian));\n\t\telse\n\t\t\treturn (handle_64(ptr, ptr_end, cmd_f));\n\telse if (magic_number == FAT_CIGAM)\n\t\treturn (ft_fat_file(ptr, ptr_end, av, cmd_f));\n\telse if (!ft_strncmp(ptr, ARMAG, SARMAG))\n\t\treturn (ft_ar_file(ptr, ptr_end, av, cmd_f));\n\treturn (ft_loop_error(av, cmd_f->is_otool));\n}\n" }, { "alpha_fraction": 0.4514094889163971, "alphanum_fraction": 0.4658753573894501, "avg_line_length": 37.514286041259766, "blob_id": "c1d11376d75593c3af1861acf83eb38596b95f58", "content_id": "a153ca78b8b478eedf622da03f23daeaf0d153cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 80, "num_lines": 70, "path": "/src/handle.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:23:09 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:49:56 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic t_seg_infos\t*init_loop_handle(char *ptr, char *ptr_end,\n\tstruct mach_header *header, struct load_command **lc)\n{\n\tt_seg_infos\t*seg_infos;\n\n\tif ((void *)(*lc = (void *)ptr + sizeof(*header))\n\t+ sizeof(struct load_command) > (void *)ptr_end)\n\t\treturn (NULL);\n\tif (!(seg_infos = ft_infos_segment(ptr, ptr_end, header, *lc)))\n\t\treturn (NULL);\n\treturn (seg_infos);\n}\n\nstatic int\t\t\tloop_handle(char *ptr, char *ptr_end,\n\tstruct mach_header *header, t_cmd_flag *cmd_f)\n{\n\tuint32_t\t\t\t\ti;\n\tt_seg_infos\t\t\t\t*seg_infos;\n\tstruct symtab_command\t*sym;\n\tstruct load_command\t\t*lc;\n\n\tif (!(seg_infos = init_loop_handle(ptr, ptr_end, header, &lc)))\n\t\treturn (EXIT_FAILURE);\n\ti = -1;\n\tseg_infos->cmd_f = cmd_f;\n\twhile (++i < swap_uint32_check(header->ncmds, cmd_f->is_indian))\n\t\tif (swap_uint32_check(lc->cmd, cmd_f->is_indian) == LC_SYMTAB)\n\t\t{\n\t\t\tif ((void *)(sym = (struct symtab_command *)lc) + sizeof(*sym)\n\t\t\t> (void *)ptr_end)\n\t\t\t\treturn (free_seg_infos(&seg_infos, EXIT_FAILURE));\n\t\t\telse if (sort_and_print_outpout(sym, ptr, ptr_end, seg_infos))\n\t\t\t\treturn (free_seg_infos(&seg_infos, EXIT_FAILURE));\n\t\t\tbreak ;\n\t\t}\n\t\telse if ((void *)(lc = (void *)lc\n\t\t+ swap_uint32_check(lc->cmdsize, cmd_f->is_indian))\n\t\t+ sizeof(struct load_command) > (void *)ptr_end)\n\t\t\treturn (free_seg_infos(&seg_infos, EXIT_FAILURE));\n\treturn (free_seg_infos(&seg_infos, EXIT_SUCCESS));\n}\n\nint\t\t\t\t\thandle(char *ptr, char *ptr_end, t_cmd_flag *cmd_f)\n{\n\tstruct mach_header\t*header;\n\n\tif ((void *)(header = (struct mach_header *)ptr)\n\t+ sizeof(struct mach_header) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)ptr + swap_uint32_check(header->sizeofcmds, cmd_f->is_indian)\n\t> (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (loop_handle(ptr, ptr_end, header, cmd_f))\n\t\treturn (EXIT_FAILURE);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.2602739632129669, "alphanum_fraction": 0.2831050157546997, "avg_line_length": 36.5428581237793, "blob_id": "524a79cd1d39d8b367d705ce56832abcd6e8ea85", "content_id": "e54bf4ab7803c36f1606f6bff122a78a5158eaeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1314, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/src/check_valid_file.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* check_valid_file.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:19:16 by aollivie #+# #+# */\n/* Updated: 2017/11/29 14:30:18 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tcheck_valid_file(char *ptr, char *ptr_end)\n{\n\tunsigned int magic_number;\n\n\tif (ptr >= ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tmagic_number = *(int *)ptr;\n\tif (magic_number == MH_MAGIC || magic_number == MH_MAGIC_64)\n\t{\n\t\treturn (EXIT_SUCCESS);\n\t}\n\telse if (magic_number == FAT_CIGAM)\n\t{\n\t\treturn (EXIT_SUCCESS);\n\t}\n\telse if (!ft_strncmp(ptr, ARMAG, SARMAG))\n\t{\n\t\treturn (EXIT_SUCCESS);\n\t}\n\treturn (EXIT_FAILURE);\n}\n" }, { "alpha_fraction": 0.43032050132751465, "alphanum_fraction": 0.4562477469444275, "avg_line_length": 25.701923370361328, "blob_id": "ce04c57e71fdbf472ef30168d5d7d65c21f60dba", "content_id": "b9a43a0539d0f2cf7e59db166c91a8b5be2d0e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2777, "license_type": "no_license", "max_line_length": 80, "num_lines": 104, "path": "/src/ft_sort64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_sort64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:40 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:48:02 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\t\tloop_check_index64(\n\tstruct nlist_64 **list, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos)\n{\n\tint\t\t\t\t\ti;\n\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (list[i])\n\t\t{\n\t\t\tif ((long)swap_uint32_check((long)list[i]->n_un.n_strx,\n\t\t\tseg_infos->cmd_f->is_indian) < 0 ||\n\t\t\t(void *)(stringtable + swap_uint32_check((long)list[i]->n_un.n_strx,\n\t\t\tseg_infos->cmd_f->is_indian))\n\t\t\t\t>= (void *)seg_infos->ptr_end)\n\t\t\t{\n\t\t\t\tft_putstr_fd(ERROR_STRING_INDEX, STDERR);\n\t\t\t\treturn (EXIT_FAILURE);\n\t\t\t}\n\t\t}\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nstatic int\t\tsort64_set_index(\n\tstruct nlist_64 **list,\n\tint nsyms,\n\tchar *stringtable,\n\tt_seg_infos *seg_infos)\n{\n\tint\tindex;\n\n\tif (seg_infos->cmd_f->r)\n\t\tindex = loop_sort64_reverse(\n\t\t\tlist, nsyms, stringtable, seg_infos);\n\telse\n\t\tindex = loop_sort64(\n\t\t\tlist, nsyms, stringtable, seg_infos);\n\treturn (index);\n}\n\nstatic int\t\tsort64_init_loop(\n\tstruct nlist_64 **list,\n\tint nsyms,\n\tchar *stringtable,\n\tt_seg_infos *seg_infos)\n{\n\tint\terr;\n\tint\tj;\n\tint\tindex;\n\n\terr = loop_check_index64(list, nsyms, stringtable, seg_infos);\n\tj = -1;\n\twhile (++j < nsyms)\n\t{\n\t\tindex = j;\n\t\tif (!err)\n\t\t{\n\t\t\tif (!seg_infos->cmd_f->p)\n\t\t\t\tindex = sort64_set_index(\n\t\t\t\t\tlist, nsyms, stringtable, seg_infos);\n\t\t\tif ((void *)(stringtable + swap_uint32_check(\n\t\t\t\t(long)list[index]->n_un.n_strx, seg_infos->cmd_f->is_indian))\n\t\t\t>= seg_infos->ptr_end)\n\t\t\t\tlist[index]->n_un.n_strx = -1;\n\t\t\tprint_outpout_64(\n\t\t\t\tlist[index], stringtable, seg_infos, seg_infos->cmd_f);\n\t\t}\n\t\tfree(list[index]);\n\t\tlist[index] = NULL;\n\t}\n\treturn (err);\n}\n\nint\t\t\t\tft_sort64(\n\tstruct nlist_64 *array, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos)\n{\n\tstruct nlist_64\t\t**list;\n\tint\t\t\t\t\tj;\n\tint\t\t\t\t\terr;\n\n\tif (!(list = ft_copy_nlist64(array, nsyms, seg_infos)))\n\t\treturn (EXIT_FAILURE);\n\terr = sort64_init_loop(\n\t\tlist, nsyms, stringtable, seg_infos);\n\tfree(list);\n\treturn (err);\n}\n" }, { "alpha_fraction": 0.32463592290878296, "alphanum_fraction": 0.3464805781841278, "avg_line_length": 34.0638313293457, "blob_id": "1454c633658d63d437824fea3f6a3aa0c20f3d93", "content_id": "0e979f6fc7c50e15f02b36e56e88e365fe0e394d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1648, "license_type": "no_license", "max_line_length": 80, "num_lines": 47, "path": "/src/ft_check_load.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_check_load.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:19:38 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:11:05 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\tret_err(void)\n{\n\tft_putstr_fd(ERROR_LOAD_MIN_SIZE, STDERR);\n\treturn (EXIT_FAILURE);\n}\n\nint\t\t\tft_check_load(\n\tstruct load_command *lc, char *ptr_end, t_load *load)\n{\n\tint\t\t\t\t\t\t\ti;\n\tint\t\t\t\t\t\t\tsize;\n\n\ti = -1;\n\tsize = 0;\n\tif (!load->ncmds)\n\t\treturn (EXIT_FAILURE);\n\twhile (++i < load->ncmds)\n\t{\n\t\tif (swap_uint32_check(lc->cmdsize, load->is_indian) < MIN_LOAD_SIZE)\n\t\t\treturn (ret_err());\n\t\tsize += swap_uint32_check(lc->cmdsize, load->is_indian);\n\t\tif ((void *)(lc = (void *)lc\n\t\t+ swap_uint32_check(lc->cmdsize, load->is_indian))\n\t\t+ sizeof(struct load_command) > (void *)ptr_end)\n\t\t\treturn (EXIT_FAILURE);\n\t}\n\tif (size != load->sizeofcmds)\n\t{\n\t\tft_putstr_fd(ERROR_LOAD_SIZE, STDERR);\n\t\treturn (EXIT_FAILURE);\n\t}\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.34703195095062256, "alphanum_fraction": 0.3664383590221405, "avg_line_length": 37.93333435058594, "blob_id": "9ede35a900fed40e8749de92ebc68e2af6b257e0", "content_id": "590aaf901869b67a6693b87712ae03610d6545b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1752, "license_type": "no_license", "max_line_length": 80, "num_lines": 45, "path": "/indian/src/ft_otool.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_otool.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:21:53 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:21:54 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\tft_loop_error(char *av)\n{\n\tft_putstr_fd(av, STDERR);\n\tft_putstr_fd(\": \", STDERR);\n\tft_putstr_fd(ERROR_FORMAT_FILE, STDERR);\n\treturn (EXIT_FAILURE);\n}\n\nint\t\t\tft_otool(char *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tunsigned int magic_number;\n\n\tif (ptr >= ptr_end)\n\t\treturn (ft_loop_error(av));\n\tmagic_number = *(int *)ptr;\n\tif (magic_number == MH_MAGIC)\n\t\tif (cmd_f->is_otool)\n\t\t\treturn (handle_text(ptr, ptr_end, av));\n\t\telse\n\t\t\treturn (handle(ptr, ptr_end, cmd_f));\n\telse if (magic_number == MH_MAGIC_64)\n\t\tif (cmd_f->is_otool)\n\t\t\treturn (handle_64_text(ptr, ptr_end, av));\n\t\telse\n\t\t\treturn (handle_64(ptr, ptr_end, cmd_f));\n\telse if (magic_number == FAT_CIGAM)\n\t\treturn (ft_fat_file(ptr, ptr_end, av, cmd_f));\n\telse if (!strncmp(ptr, ARMAG, SARMAG))\n\t\treturn (ft_ar_file(ptr, ptr_end, av, cmd_f));\n\treturn (ft_loop_error(av));\n}\n" }, { "alpha_fraction": 0.32081687450408936, "alphanum_fraction": 0.343873530626297, "avg_line_length": 42.371429443359375, "blob_id": "dc3cd67840fff9c655a3b4385fa11c4a694b506c", "content_id": "ff7f55781b52e9f519ce10c4ce46a83520b5c536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/src/sort_and_print_outpout.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* sort_and_print_outpout.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:24:35 by aollivie #+# #+# */\n/* Updated: 2017/11/22 19:34:09 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tsort_and_print_outpout(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos)\n{\n\tchar\t\t\t\t*stringtable;\n\tstruct nlist\t\t*array;\n\n\tif ((void *)(array = ptr\n\t\t+ swap_uint32_check(sym->symoff, seg_infos->cmd_f->is_indian))\n\t\t+ sizeof(struct nlist) > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)(stringtable = ptr\n\t\t+ swap_uint32_check(sym->stroff, seg_infos->cmd_f->is_indian))\n\t\t+ 1 > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tseg_infos->ptr = ptr;\n\tseg_infos->ptr_end = ptr_end;\n\treturn (ft_sort(array,\n\t\tswap_uint32_check(sym->nsyms, seg_infos->cmd_f->is_indian),\n\t\tstringtable, seg_infos));\n}\n" }, { "alpha_fraction": 0.36008772253990173, "alphanum_fraction": 0.38947367668151855, "avg_line_length": 27.860759735107422, "blob_id": "cf72d61033821a107caa7ebe612f7574ca67538d", "content_id": "5c9c5db1066a9b98cad33d1e1e91d82562e86534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2280, "license_type": "no_license", "max_line_length": 80, "num_lines": 79, "path": "/src/ft_loop_sort64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_loop_sort64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/22 18:37:23 by aollivie #+# #+# */\n/* Updated: 2017/11/23 15:12:59 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tset_index64(\n\tstruct nlist_64 **tmp, int *index, int i, struct nlist_64 **list)\n{\n\t*tmp = list[i];\n\t*index = i;\n}\n\nint\t\t\tloop_sort64(\n\tstruct nlist_64 **list, int nsyms, char *stringtable,\n\tt_seg_infos *seg_infos)\n{\n\tstruct nlist_64\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index64(&tmp, &index, i, list);\n\t\tif (list[i] && tmp)\n\t\t{\n\t\t\tcmp = ft_strcmp(is_bad_adresse(\n\t\t\t\tstringtable, tmp->n_un.n_strx, seg_infos),\n\t\t\t\tis_bad_adresse(stringtable, list[i]->n_un.n_strx, seg_infos));\n\t\t\tif (cmp > 0)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value > list[i]->n_value)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n\nint\t\t\tloop_sort64_reverse(\n\tstruct nlist_64 **list, int nsyms, char *s, t_seg_infos *seg_infos)\n{\n\tstruct nlist_64\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tcmp = 1;\n\t\tif (!tmp && list[i])\n\t\t\tset_index64(&tmp, &index, i, list);\n\t\tif (list[i])\n\t\t{\n\t\t\tcmp = ft_strcmp(is_bad_adresse(s, tmp->n_un.n_strx, seg_infos),\n\t\t\t\tis_bad_adresse(s, list[i]->n_un.n_strx, seg_infos));\n\t\t\tif (cmp < 0)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value < list[i]->n_value)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n" }, { "alpha_fraction": 0.3899119198322296, "alphanum_fraction": 0.40512409806251526, "avg_line_length": 26.755556106567383, "blob_id": "c12e72da7030a10b329f50bf3ad8d5d0d0545c97", "content_id": "b5f447bc2b40f4372c106c94bf61a8bf9480fcf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2498, "license_type": "no_license", "max_line_length": 80, "num_lines": 90, "path": "/src/ft_loop_sort.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_loop_sort.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/22 18:37:23 by aollivie #+# #+# */\n/* Updated: 2017/11/23 00:12:43 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tset_index(\n\tstruct nlist **tmp, int *index, int i, struct nlist **list)\n{\n\t*tmp = list[i];\n\t*index = i;\n}\n\nchar\t\t*is_bad_adresse(char *s, long offset, t_seg_infos *seg_infos)\n{\n\tuint32_t off;\n\n\toff = swap_uint32_check(offset, seg_infos->cmd_f->is_indian);\n\tif ((void *)(s + off) >= (void *)seg_infos->ptr_end)\n\t{\n\t\treturn (BAD_STRING_INDEX);\n\t}\n\treturn ((s + off));\n}\n\nint\t\t\tloop_sort(\n\tstruct nlist **list, int nsyms, char *stringtable, t_seg_infos *seg_infos)\n{\n\tstruct nlist\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index(&tmp, &index, i, list);\n\t\tif (list[i] && tmp)\n\t\t{\n\t\t\tcmp = ft_strcmp(is_bad_adresse(stringtable, tmp->n_un.n_strx,\n\t\t\t\tseg_infos), is_bad_adresse(stringtable,\n\t\t\t\tlist[i]->n_un.n_strx, seg_infos));\n\t\t\tif (cmp > 0)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value > list[i]->n_value)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n\nint\t\t\tloop_sort_reverse(\n\tstruct nlist **list, int nsyms, char *s, t_seg_infos *seg_infos)\n{\n\tstruct nlist\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index(&tmp, &index, i, list);\n\t\tif (list[i])\n\t\t{\n\t\t\tcmp = ft_strcmp(is_bad_adresse(s, tmp->n_un.n_strx,\n\t\t\t\tseg_infos), is_bad_adresse(s, list[i]->n_un.n_strx,\n\t\t\t\t\tseg_infos));\n\t\t\tif (cmp < 0)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value < list[i]->n_value)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n" }, { "alpha_fraction": 0.42581143975257874, "alphanum_fraction": 0.44319939613342285, "avg_line_length": 33.50666809082031, "blob_id": "d5f431cffea15108aa8c54beb0442b95caf94db1", "content_id": "3a89a1a2c03c66179434dd8d0e4def9bb7c92d16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2588, "license_type": "no_license", "max_line_length": 80, "num_lines": 75, "path": "/src/ft_find_segment.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_segment.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:21:09 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:38:31 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\t\t\t\t\tret_err(void)\n{\n\tft_putstr_fd(BAD_OJECT, STDERR);\n\treturn (EXIT_FAILURE);\n}\n\nstatic int\t\t\t\t\tcheck_segment(\n\tstruct load_command *lc, int ncmds, char *segment_name, t_ptr *ptr_infos)\n{\n\tstruct segment_command\t\t*seg;\n\tint\t\t\t\t\t\t\ti;\n\n\ti = -1;\n\twhile (++i < ncmds)\n\t{\n\t\tif (swap_uint32_check(lc->cmd, ptr_infos->is_indian) == LC_SEGMENT)\n\t\t{\n\t\t\tif ((void *)(seg = (struct segment_command *)lc)\n\t\t\t+ sizeof(struct segment_command) > (void *)ptr_infos->ptr_end)\n\t\t\t\treturn (EXIT_FAILURE);\n\t\t\telse if (swap_uint32_check(seg->fileoff, ptr_infos->is_indian)\n\t\t\t+ swap_uint32_check(seg->filesize, ptr_infos->is_indian)\n\t\t\t> (long)((size_t)ptr_infos->ptr_end - (size_t)ptr_infos->ptr))\n\t\t\t\treturn (ret_err());\n\t\t}\n\t\tif (i + 1 < ncmds)\n\t\t\tif ((void *)(lc = (void *)lc\n\t\t\t+ swap_uint32_check(lc->cmdsize, ptr_infos->is_indian))\n\t\t\t+ sizeof(*lc) > (void *)ptr_infos->ptr_end)\n\t\t\t\treturn (EXIT_FAILURE);\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nstruct segment_command\t\t*ft_find_segment(\n\tstruct load_command *lc, int ncmds, char *segment_name, t_ptr *ptr_infos)\n{\n\tstruct segment_command\t\t*seg;\n\tint\t\t\t\t\t\t\ti;\n\n\tif (check_segment(lc, ncmds, segment_name, ptr_infos))\n\t\treturn (NULL);\n\tseg = NULL;\n\ti = -1;\n\twhile (++i < ncmds)\n\t{\n\t\tif (swap_uint32_check(lc->cmd, ptr_infos->is_indian) == LC_SEGMENT)\n\t\t{\n\t\t\tif ((void *)(seg = (struct segment_command *)lc)\n\t\t\t+ sizeof(struct segment_command) > (void *)ptr_infos->ptr_end)\n\t\t\t\treturn (NULL);\n\t\t\tif (ft_strcmp(seg->segname, segment_name) == 0 || ncmds == 1)\n\t\t\t\treturn (seg);\n\t\t}\n\t\tif ((void *)(lc = (void *)lc\n\t\t+ swap_uint32_check(lc->cmdsize, ptr_infos->is_indian)) + sizeof(*lc)\n\t\t> (void *)ptr_infos->ptr_end)\n\t\t\treturn (NULL);\n\t}\n\treturn (seg);\n}\n" }, { "alpha_fraction": 0.5379363894462585, "alphanum_fraction": 0.560951828956604, "avg_line_length": 34.60416793823242, "blob_id": "eca0d77bd69621e38e362e73b7199f72b76dc82f", "content_id": "c2be521055c6783ff5ede4714b8925149248ebb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5127, "license_type": "no_license", "max_line_length": 80, "num_lines": 144, "path": "/indian/inc/ft_otool.h", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_otool.h :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:42:38 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:52:25 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#ifndef FT_OTOOL_H\n# define FT_OTOOL_H\n\n# include <stdio.h>\n# include <sys/mman.h>\n# include <mach-o/loader.h>\n# include <mach-o/nlist.h>\n# include <mach-o/fat.h>\n# include <mach-o/ranlib.h>\n# include <mach-o/stab.h>\n# include <ar.h>\n# include <fcntl.h>\n# include <sys/stat.h>\n# include <stdlib.h>\n# include <unistd.h>\n# include <stdlib.h>\n# include \"libft.h\"\n# include \"ft_mess.h\"\n\n# define PADDING_STR\t\t\"00000000\"\n# define PADDING_STR_64\t\t\"0000000000000000\"\n# define PADDING_SPACE_64\t\" \"\n# define PADDING_SPACE\t\t\" \"\n# define MIN_LOAD_SIZE\t\t8\n# define HELP_FLAG\t\t\t-2\n# define IS_OTOOL\t\t\t1\n# define IS_NM\t\t\t\t0\n\ntypedef struct s_cmd_flag\tt_cmd_flag;\n\nstruct\t\t\t\t\t\ts_cmd_flag\n{\n\tint\t\t\t\t\t\tis_otool;\n\tint\t\t\t\t\t\tp;\n\tint\t\t\t\t\t\tu;\n\tint\t\t\t\t\t\tuu;\n\tint\t\t\t\t\t\tg;\n\tint\t\t\t\t\t\tj;\n\tint\t\t\t\t\t\tr;\n};\n\ntypedef struct s_seg_infos\tt_seg_infos;\n\nstruct\t\t\t\t\t\ts_seg_infos\n{\n\tt_cmd_flag\t\t\t\t*cmd_f;\n\tint\t\t\t\t\t\ttext_nsect;\n\tint\t\t\t\t\t\tdata_nsect;\n\tint\t\t\t\t\t\tbss_nsect;\n};\n\ntypedef struct s_fat_infos\tt_fat_infos;\n\nstruct\t\t\t\t\t\ts_fat_infos\n{\n\tstruct fat_header\t\t*f_h;\n\tstruct fat_arch\t\t\t*f_a;\n\tint\t\t\t\t\t\tnb_arch;\n\tint\t\t\t\t\t\toffset;\n\tchar\t\t\t\t\t*s;\n};\n\nint\t\t\t\t\t\t\tset_cmd_flag(\n\tint ac, char **av, t_cmd_flag *cmd_f, int is_otool);\nint\t\t\t\t\t\t\tprint_outpout_format(\n\tstruct nlist *nlist, char type, char *name, t_cmd_flag *cmd_flag);\nint\t\t\t\t\t\t\tprint_outpout_format_64(\n\tstruct nlist_64 *nlist, char type, char *name, t_cmd_flag *cmd_flag);\nint\t\t\t\t\t\t\tprint_outpout(\n\tstruct nlist *nlist, char *stringtable,\n\tt_seg_infos *seg_infos, t_cmd_flag *cmd_flag);\nint\t\t\t\t\t\t\tprint_outpout_64(\n\tstruct nlist_64 *nlist, char *stringtable,\n\tt_seg_infos *seg_infos, t_cmd_flag *cmd_flag);\nstruct nlist_64\t\t\t\t**ft_copy_nlist64(\n\tstruct nlist_64 *array, int nsyms);\nstruct nlist\t\t\t\t**ft_copy_nlist(\n\tstruct nlist *array, int nsyms);\nint\t\t\t\t\t\t\tft_sort64(struct nlist_64 *array, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tft_sort(\n\tstruct nlist *array, int nsyms, char *stringtable, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tsort_and_print_outpout(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tsort_and_print_outpout_64(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos);\nvoid\t\t\t\t\t\tft_init_seg_infos(t_seg_infos *seg_infos);\nt_seg_infos\t\t\t\t\t*ft_infos_segment_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, struct load_command *lc);\nt_seg_infos\t\t\t\t\t*ft_infos_segment(char *ptr, char *ptr_end,\n\tstruct mach_header *header, struct load_command *lc);\nint\t\t\t\t\t\t\thandle_64(\n\tchar *ptr, char *ptr_end, t_cmd_flag *cmd_f);\nint\t\t\t\t\t\t\thandle(char *ptr, char *ptr_end, t_cmd_flag *cmd_f);\nvoid\t\t\t\t\t\tft_print_adress(long double adr);\nvoid\t\t\t\t\t\tft_get_adress_str(\n\tlong double adr, char **dest, int index);\nvoid\t\t\t\t\t\tft_print_padding_adresse(\n\tlong int addr, size_t len_padding, char *padding);\nint\t\t\t\t\t\t\tprint_text_text_section(\n\tvoid *ptr, long double addr, int size, int is64);\nstruct section_64\t\t\t*ft_find_section_64(\n\tstruct segment_command_64 *segment, char *section_name);\nstruct section\t\t\t\t*ft_find_section(\n\tstruct segment_command *segment, char *section_name);\nint\t\t\t\t\t\t\tft_check_load(\n\tstruct load_command *lc, int ncmds, int sizeofcmds);\nstruct segment_command_64\t*ft_find_segment_64(\n\tstruct load_command *lc, int ncmds, char *segment_name);\nstruct segment_command\t\t*ft_find_segment(\n\tstruct load_command *lc, int ncmds, char *segment_name);\nstruct section_64\t\t\t*ft_find_segment_section_64(char *ptr,\n\tstruct mach_header_64 *header, char *segment_name, char *section_name);\nstruct section\t\t\t\t*ft_find_segment_section(char *ptr,\n\tstruct mach_header *header, char *segment_name, char *section_name);\nint\t\t\t\t\t\t\thandle_64_text(char *ptr, char *ptr_end, char *av);\nint\t\t\t\t\t\t\thandle_text(char *ptr, char *ptr_end, char *av);\nchar\t\t\t\t\t\t*ft_format_archive_name(\n\tchar *n1, char *n2, char *n3, char *n4);\nvoid\t\t\t\t\t\tft_print_archive_name(char *s1, char *s2);\nint\t\t\t\t\t\t\tcheck_valid_file(char *ptr, char *ptr_end);\nint\t\t\t\t\t\t\tft_ar_file(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f);\nuint32_t\t\t\t\t\tswap_uint32(uint32_t val);\nint\t\t\t\t\t\t\tft_fat_file(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f);\nint\t\t\t\t\t\t\tft_otool(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f);\n\n#endif\n" }, { "alpha_fraction": 0.5462691187858582, "alphanum_fraction": 0.5667970180511475, "avg_line_length": 34.27586364746094, "blob_id": "0a17f313851ba48390ef43de3f213d5ff7fc415a", "content_id": "9915a7261c2d8a13cc65e871deff0d8029068233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6138, "license_type": "no_license", "max_line_length": 80, "num_lines": 174, "path": "/inc/ft_otool.h", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_otool.h :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:42:38 by aollivie #+# #+# */\n/* Updated: 2017/11/29 14:09:24 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#ifndef FT_OTOOL_H\n# define FT_OTOOL_H\n\n# include <stdio.h>\n# include <sys/mman.h>\n# include <mach-o/loader.h>\n# include <mach-o/nlist.h>\n# include <mach-o/fat.h>\n# include <mach-o/ranlib.h>\n# include <mach-o/stab.h>\n# include <ar.h>\n# include <fcntl.h>\n# include <sys/stat.h>\n# include <stdlib.h>\n# include <unistd.h>\n# include <stdlib.h>\n# include \"libft.h\"\n# include \"ft_mess.h\"\n\n# define PADDING_STR\t\t\"00000000\"\n# define PADDING_STR_64\t\t\"0000000000000000\"\n# define PADDING_SPACE_64\t\" \"\n# define PADDING_SPACE\t\t\" \"\n# define MIN_LOAD_SIZE\t\t8\n# define HELP_FLAG\t\t\t-2\n# define IS_OTOOL\t\t\t1\n# define IS_NM\t\t\t\t0\n\ntypedef struct\t\t\t\ts_cmd_flag\n{\n\tint\t\t\t\t\t\tis_indian;\n\tint\t\t\t\t\t\tis_otool;\n\tint\t\t\t\t\t\tp;\n\tint\t\t\t\t\t\tu;\n\tint\t\t\t\t\t\tuu;\n\tint\t\t\t\t\t\tg;\n\tint\t\t\t\t\t\tj;\n\tint\t\t\t\t\t\tr;\n}\t\t\t\t\t\t\tt_cmd_flag;\n\ntypedef struct\t\t\t\ts_seg_infos\n{\n\tt_cmd_flag\t\t\t\t*cmd_f;\n\tint\t\t\t\t\t\ttext_nsect;\n\tint\t\t\t\t\t\tdata_nsect;\n\tint\t\t\t\t\t\tbss_nsect;\n\tvoid\t\t\t\t\t*ptr;\n\tvoid\t\t\t\t\t*ptr_end;\n}\t\t\t\t\t\t\tt_seg_infos;\n\ntypedef struct\t\t\t\ts_fat_infos\n{\n\tstruct fat_header\t\t*f_h;\n\tstruct fat_arch\t\t\t*f_a;\n\tint\t\t\t\t\t\tnb_arch;\n\tint\t\t\t\t\t\toffset;\n\tchar\t\t\t\t\t*s;\n}\t\t\t\t\t\t\tt_fat_infos;\n\ntypedef struct\t\t\t\ts_load\n{\n\tint\t\t\t\t\t\tncmds;\n\tint\t\t\t\t\t\tsizeofcmds;\n\tint\t\t\t\t\t\tis_indian;\n\tvoid\t\t\t\t\t*ptr;\n\tvoid\t\t\t\t\t*ptr_end;\n\n}\t\t\t\t\t\t\tt_load;\n\ntypedef struct\t\t\t\ts_ptr\n{\n\tchar\t\t\t\t\t*ptr;\n\tchar\t\t\t\t\t*ptr_end;\n\tint\t\t\t\t\t\tis_indian;\n\tint\t\t\t\t\t\tis_64;\n}\t\t\t\t\t\t\tt_ptr;\n\nchar\t\t\t\t\t\t*is_bad_adresse(\n\tchar *s, long offset, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tloop_sort_reverse(\n\tstruct nlist **list, int nsyms, char *s, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tloop_sort(\n\tstruct nlist **list, int nsyms, char *stringtable, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tfree_seg_infos(t_seg_infos **seg_infos, int ret);\nint\t\t\t\t\t\t\tloop_sort64_reverse(\n\tstruct nlist_64 **list, int nsyms, char *s, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tloop_sort64(\n\tstruct nlist_64 **list, int nsyms, char *stringtable,\n\tt_seg_infos *seg_infos);\nlong\t\t\t\t\t\tswap_uint32_check(long val, int is_indian);\nint\t\t\t\t\t\t\tset_cmd_flag(\n\tint ac, char **av, t_cmd_flag *cmd_f, int is_otool);\nint\t\t\t\t\t\t\tprint_outpout_format(\n\tstruct nlist *nlist, char type, char *name, t_cmd_flag *cmd_flag);\nint\t\t\t\t\t\t\tprint_outpout_format_64(\n\tstruct nlist_64 *nlist, char type, char *name, t_cmd_flag *cmd_flag);\nint\t\t\t\t\t\t\tprint_outpout(\n\tstruct nlist *nlist, char *stringtable,\n\tt_seg_infos *seg_infos, t_cmd_flag *cmd_flag);\nint\t\t\t\t\t\t\tprint_outpout_64(\n\tstruct nlist_64 *nlist, char *stringtable,\n\tt_seg_infos *seg_infos, t_cmd_flag *cmd_flag);\nstruct nlist_64\t\t\t\t**ft_copy_nlist64(\n\tstruct nlist_64 *array, int nsyms, t_seg_infos *seg_infos);\nstruct nlist\t\t\t\t**ft_copy_nlist(\n\tstruct nlist *array, int nsyms, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tft_sort64(struct nlist_64 *array, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tft_sort(\n\tstruct nlist *array, int nsyms, char *stringtable, t_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tsort_and_print_outpout(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos);\nint\t\t\t\t\t\t\tsort_and_print_outpout_64(\n\tstruct symtab_command *sym, void *ptr, void *ptr_end,\n\tt_seg_infos *seg_infos);\nvoid\t\t\t\t\t\tft_init_seg_infos(t_seg_infos *seg_infos);\nt_seg_infos\t\t\t\t\t*ft_infos_segment_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, struct load_command *lc);\nt_seg_infos\t\t\t\t\t*ft_infos_segment(char *ptr, char *ptr_end,\n\tstruct mach_header *header, struct load_command *lc);\nint\t\t\t\t\t\t\thandle_64(\n\tchar *ptr, char *ptr_end, t_cmd_flag *cmd_f);\nint\t\t\t\t\t\t\thandle(char *ptr, char *ptr_end, t_cmd_flag *cmd_f);\nvoid\t\t\t\t\t\tft_print_adress(long double adr);\nvoid\t\t\t\t\t\tft_get_adress_str(\n\tlong double adr, char **dest, int index);\nvoid\t\t\t\t\t\tft_print_padding_adresse(\n\tlong long unsigned addr, size_t len_padding, char *padding);\nint\t\t\t\t\t\t\tprint_text_text_section(\n\tvoid *ptr, long double addr, int size, t_ptr *ptr_infos);\nstruct section_64\t\t\t*ft_find_section_64(\n\tstruct segment_command_64 *segment, char *section_name, t_ptr *ptr_infos);\nstruct section\t\t\t\t*ft_find_section(\n\tstruct segment_command *segment, char *section_name, t_ptr *ptr_infos);\nint\t\t\t\t\t\t\tft_check_load(\n\tstruct load_command *lc, char *ptr_end, t_load *load);\nstruct segment_command_64\t*ft_find_segment_64(\n\tstruct load_command *lc, int ncmds, char *segment_name, t_ptr *ptr_infos);\nstruct segment_command\t\t*ft_find_segment(\n\tstruct load_command *lc, int ncmds, char *segment_name, t_ptr *ptr_infos);\nstruct section_64\t\t\t*ft_find_segment_section_64(t_ptr *ptr_infos,\n\tint is_indian, char *segment_name, char *section_name);\nstruct section\t\t\t\t*ft_find_segment_section(t_ptr *ptr_infos,\n\tint is_indian, char *segment_name, char *section_name);\nint\t\t\t\t\t\t\thandle_64_text(\n\tchar *ptr, char *ptr_end, char *av, int is_indian);\nint\t\t\t\t\t\t\thandle_text(\n\tchar *ptr, char *ptr_end, char *av, int is_indian);\nchar\t\t\t\t\t\t*ft_format_archive_name(\n\tchar *n1, char *n2, char *n3, char *n4);\nvoid\t\t\t\t\t\tft_print_archive_name(char *s1, char *s2);\nint\t\t\t\t\t\t\tcheck_valid_file(char *ptr, char *ptr_end);\nint\t\t\t\t\t\t\tft_ar_file(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f);\nuint32_t\t\t\t\t\tswap_uint32(uint32_t val);\nint\t\t\t\t\t\t\tft_fat_file(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f);\nint\t\t\t\t\t\t\tft_otool(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f);\n\n#endif\n" }, { "alpha_fraction": 0.4105423092842102, "alphanum_fraction": 0.42524075508117676, "avg_line_length": 53.80555725097656, "blob_id": "785b6ff1d1498ee0da2bce8ad62ec69a07bc2461", "content_id": "22297c5f805b6fbc800b519aa2c14e8bbb00010c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1973, "license_type": "no_license", "max_line_length": 80, "num_lines": 36, "path": "/indian/inc/en/ft_mess.h", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_mess.h :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:42:43 by aollivie #+# #+# */\n/* Updated: 2017/11/07 17:05:20 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#ifndef FT_MESS_H\n# define FT_MESS_H\n\n# define CONTENT_TEXT_TEXT \t\"Contents of (__TEXT,__text) section\\n\"\n# define ERROR_LOAD_MIN_SIZE\t\"Mach-O load command with size < 8 bytes\\n\"\n# define ERROR_LOAD_SIZE\t\"Mach-O segment load command size is too small\\n\"\n\n# define ERROR_FORMAT \"The file was not recognized as a valid object file\\n\"\n# define ERROR_FORMAT_FILE ERROR_FORMAT\n# define NO_ACCESS\t\t\t\"No access to : \"\n# define ERROR_FLAG\t\t\t\"ft_nm: Unknown command line argument\"\n# define TYPE_HELP\t\t\t\"Type './ft_nm -help' for more informations\\n\"\n# define HELP\t\t\t\t\"-help\"\n# define USAGE \"USAGE: nm [options] <input files>\\n\\n\"\n# define OPTION \"General options:\\n\\n\"\n# define O_G \"-g Display only global (external) symbols\\n\\n\"\n# define O_U \"-u Display only undefined symbols\\n\\n\"\n# define O_UU \"-U Don't display undefined symbols\\n\\n\"\n# define O_P \"-p Don't sort; display in symbol-table order\\n\\n\"\n# define O_J \"-j Just display the symbol names (no value or type)\\n\\n\"\n# define O_R \"-r Sort in reverse order\\n\\n\"\n# define HELP_MESS\tUSAGE OPTION O_G O_U O_UU O_P O_J O_R\n\n#endif\n" }, { "alpha_fraction": 0.44277822971343994, "alphanum_fraction": 0.4581688940525055, "avg_line_length": 33.71232986450195, "blob_id": "00e929012b8d9811263ff96ed9146412bb213a3f", "content_id": "d1cba7960284af3937ce0a86dccbdbabac68ee9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2534, "license_type": "no_license", "max_line_length": 98, "num_lines": 73, "path": "/indian/src/handle.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:23:09 by aollivie #+# #+# */\n/* Updated: 2017/11/17 16:48:40 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\tloop_handle(char *ptr, char *ptr_end,\n\tstruct mach_header *header, t_cmd_flag *cmd_f)\n{\n\tuint32_t\t\t\t\ti;\n\tt_seg_infos\t\t\t\t*seg_infos;\n\tstruct symtab_command\t*sym;\n\tstruct load_command\t\t*lc;\n\n\tif ((void *)(lc = (void *)ptr + sizeof(*header)) + sizeof(struct load_command) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (!(seg_infos = ft_infos_segment(ptr, ptr_end, header, lc)))\n\t\treturn (EXIT_FAILURE);\n\ti = -1;\n\tseg_infos->cmd_f = cmd_f;\n\twhile (++i < header->ncmds)\n\t{\n\t\tif (swap_uint32(lc->cmd) == LC_SYMTAB)\n\t\t{\n\t\t\tsym = (struct symtab_command *)lc;\n\t\t\tsort_and_print_outpout(sym, ptr, ptr_end, seg_infos);\n\t\t\tbreak ;\n\t\t}\n\t\tlc = (void *)lc + swap_uint32(lc->cmdsize);\n\t\ti++;\n\t}\n\tfree(seg_infos);\n\treturn (EXIT_SUCCESS);\n}\n\nstruct mach_header *endian(void *ptr, void *ptr_end)\n{\n\tstruct mach_header\t*tmp;\n\tstruct mach_header\t*header;\n\n\theader = malloc(sizeof(struct mach_header));\n\tft_bzero(header, sizeof(struct mach_header));\n\tif ((void *)(tmp = (struct mach_header *)ptr) > (void *)ptr_end)\n\t\treturn (NULL);\n\tft_memcpy(header, tmp, sizeof(struct mach_header));\n\theader->ncmds = swap_uint32(tmp->ncmds);\n\theader->sizeofcmds = swap_uint32(header->sizeofcmds);\n\treturn (header);\n}\n\nint\t\t\thandle(char *ptr, char *ptr_end, t_cmd_flag *cmd_f)\n{\n\tstruct mach_header\t*header;\n\tchar *tmp;\n\n\n\tif ((void *)(header = (struct mach_header *)ptr) + sizeof(struct mach_header) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\theader = endian(ptr, ptr_end);\n\tif ((void *)ptr + header->sizeofcmds > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (loop_handle(ptr, ptr_end, header, cmd_f))\n\t\treturn (EXIT_FAILURE);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.3645144999027252, "alphanum_fraction": 0.38942551612854004, "avg_line_length": 34.76363754272461, "blob_id": "9aeeb1a0489e2cb91f94308b038cd5d1b4dd0331", "content_id": "ce479be822913b4f8ffe7ddb9252f99a346a95eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1967, "license_type": "no_license", "max_line_length": 80, "num_lines": 55, "path": "/indian/src/handle_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:52 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:36:55 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\tloop_handle_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, t_cmd_flag *cmd_f)\n{\n\tuint32_t\t\t\t\ti;\n\tt_seg_infos\t\t\t\t*seg_infos;\n\tstruct symtab_command\t*sym;\n\tstruct load_command\t\t*lc;\n\n\tif ((void *)(lc = (void *)ptr + sizeof(*header)) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (!(seg_infos = ft_infos_segment_64(ptr, ptr_end, header, lc)))\n\t\treturn (EXIT_FAILURE);\n\ti = -1;\n\tseg_infos->cmd_f = cmd_f;\n\twhile (++i < header->ncmds)\n\t{\n\t\tif (lc->cmd == LC_SYMTAB)\n\t\t{\n\t\t\tsym = (struct symtab_command *)lc;\n\t\t\tsort_and_print_outpout_64(\n\t\t\t\tsym, ptr, ptr_end, seg_infos);\n\t\t\tbreak ;\n\t\t}\n\t\tlc = (void *)lc + lc->cmdsize;\n\t}\n\tfree(seg_infos);\n\treturn (EXIT_SUCCESS);\n}\n\nint\t\t\thandle_64(char *ptr, char *ptr_end, t_cmd_flag *cmd_f)\n{\n\tstruct mach_header_64\t*header;\n\n\tif ((void *)(header = (struct mach_header_64 *)ptr) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)ptr + header->sizeofcmds > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (loop_handle_64(ptr, ptr_end, header, cmd_f))\n\t\treturn (EXIT_FAILURE);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.28383979201316833, "alphanum_fraction": 0.3080110549926758, "avg_line_length": 38.135135650634766, "blob_id": "9ef148067ede390157ec34ce0aa924b19e27f5dc", "content_id": "a1c686eb50819b1930ef0e63f595985c576defbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/src/print_outpout_format.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* print_outpout_format.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:23:53 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:23:54 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tprint_outpout_format(\n\tstruct nlist *nlist, char type, char *name, t_cmd_flag *cmd_flag)\n{\n\tif (!cmd_flag->u && !cmd_flag->j)\n\t{\n\t\tif (nlist->n_value || type != 'U')\n\t\t{\n\t\t\tft_print_padding_adresse(\n\t\t\t\tswap_uint32_check(nlist->n_value, cmd_flag->is_indian),\n\t\t\t\tft_strlen(PADDING_STR), PADDING_STR);\n\t\t}\n\t\telse\n\t\t{\n\t\t\twrite(1, PADDING_SPACE, ft_strlen(PADDING_SPACE));\n\t\t}\n\t\twrite(STDOUT, \" \", 1);\n\t\twrite(STDOUT, &type, 1);\n\t\twrite(STDOUT, \" \", 1);\n\t}\n\twrite(STDOUT, name, ft_strlen(name));\n\twrite(STDOUT, \"\\n\", 1);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.47385525703430176, "alphanum_fraction": 0.500443160533905, "avg_line_length": 33.191917419433594, "blob_id": "3e5145d948d01204ddce89b0b64e342e79e7693c", "content_id": "99f9695b4eae1f0779fe1c0546dbe48db30bd13d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3385, "license_type": "no_license", "max_line_length": 80, "num_lines": 99, "path": "/src/ft_infos_segment_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_infos_segment_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:21:30 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:47:14 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\t\tloop_segment64(\n\tstruct load_command *lc, int *index, t_seg_infos *seg_infos, t_load *load)\n{\n\tstruct segment_command_64\t*segment;\n\tstruct section_64\t\t\t*section;\n\tuint32_t\t\t\t\t\tloop;\n\n\tif ((void *)(segment = (struct segment_command_64 *)lc)\n\t+ sizeof(struct segment_command_64) > load->ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)(section = (void *)segment + sizeof(*segment))\n\t+ sizeof(*section) > load->ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tloop = -1;\n\twhile (++loop < swap_uint32_check(segment->nsects, load->is_indian))\n\t{\n\t\tif (ft_strcmp(section->sectname, SECT_TEXT) == 0)\n\t\t\tseg_infos->text_nsect = *index + 1;\n\t\telse if (ft_strcmp(section->sectname, SECT_DATA) == 0)\n\t\t\tseg_infos->data_nsect = *index + 1;\n\t\telse if (ft_strcmp(section->sectname, SECT_BSS) == 0)\n\t\t\tseg_infos->bss_nsect = *index + 1;\n\t\tif ((void *)(section = (void *)section + sizeof(*section))\n\t\t+ sizeof(*section) > load->ptr_end)\n\t\t\treturn (EXIT_FAILURE);\n\t\t*index = *index + 1;\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nstatic void\t\tinit_load64(\n\tt_load *load, char *ptr, char *ptr_end, struct mach_header_64 *header)\n{\n\tload->is_indian = swap_uint32(*(int *)ptr) == MH_MAGIC_64 ? 1 : 0;\n\tload->ncmds = swap_uint32_check(header->ncmds, load->is_indian);\n\tload->sizeofcmds = swap_uint32_check(header->sizeofcmds, load->is_indian);\n\tload->ptr = ptr;\n\tload->ptr_end = ptr_end;\n}\n\nstatic int\t\tinit_loop_segment_64(\n\tstruct mach_header_64 *header, struct load_command *lc,\nt_load *load, t_seg_infos *seg_infos)\n{\n\tuint32_t\t\t\t\t\ti;\n\tint\t\t\t\t\t\t\tindex;\n\n\ti = -1;\n\tindex = 0;\n\twhile (++i < header->ncmds)\n\t{\n\t\tif (swap_uint32_check(lc->cmd, load->is_indian) == LC_SEGMENT_64)\n\t\t\tloop_segment64(lc, &index, seg_infos, load);\n\t\tif ((void *)(lc = (void *)lc\n\t\t\t+ swap_uint32_check(lc->cmdsize, load->is_indian))\n\t\t\t+ sizeof(struct load_command) > (void *)load->ptr_end)\n\t\t{\n\t\t\treturn (EXIT_FAILURE);\n\t\t}\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nt_seg_infos\t\t*ft_infos_segment_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, struct load_command *lc)\n{\n\tt_seg_infos\t\t\t\t\t*seg_infos;\n\tt_load\t\t\t\t\t\tload;\n\n\tif (!(seg_infos = malloc(sizeof(t_seg_infos))))\n\t\treturn (NULL);\n\tinit_load64(&load, ptr, ptr_end, header);\n\tft_init_seg_infos(seg_infos);\n\tif ((void *)(lc = (void *)ptr + sizeof(*header))\n\t+ sizeof(*lc) > (void *)ptr_end)\n\t\treturn (NULL);\n\tif (ft_check_load(lc, ptr_end, &load))\n\t\treturn (NULL);\n\tif (init_loop_segment_64(header, lc, &load, seg_infos))\n\t{\n\t\tfree(seg_infos);\n\t\treturn (NULL);\n\t}\n\treturn (seg_infos);\n}\n" }, { "alpha_fraction": 0.27259984612464905, "alphanum_fraction": 0.30483531951904297, "avg_line_length": 38.63888931274414, "blob_id": "8968ff9cc7847782c2a9a729a601ef97ee85de3b", "content_id": "4b45803f319c96efbe51cdc3d05b38e8bcb533f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1427, "license_type": "no_license", "max_line_length": 80, "num_lines": 36, "path": "/indian/src/print_outpout_format_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* print_outpout_format_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:23:46 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:23:47 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\tprint_outpout_format_64(\n\tstruct nlist_64 *nlist, char type, char *name, t_cmd_flag *cmd_flag)\n{\n\tif (!cmd_flag->u && !cmd_flag->j)\n\t{\n\t\tif (nlist->n_value || type != 'U')\n\t\t{\n\t\t\tft_print_padding_adresse(\n\t\t\t\tnlist->n_value, ft_strlen(PADDING_STR_64), PADDING_STR_64);\n\t\t}\n\t\telse\n\t\t{\n\t\t\twrite(STDOUT, PADDING_SPACE_64, ft_strlen(PADDING_SPACE_64));\n\t\t}\n\t\twrite(STDOUT, \" \", 1);\n\t\twrite(STDOUT, &type, 1);\n\t\twrite(STDOUT, \" \", 1);\n\t}\n\twrite(STDOUT, name, ft_strlen(name));\n\twrite(STDOUT, \"\\n\", 1);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.25446081161499023, "alphanum_fraction": 0.2785104811191559, "avg_line_length": 38.06060791015625, "blob_id": "4738aa62c072639d6affbeaf67b761d27d8e88c7", "content_id": "b7317d3d1f67c5fa9e8aa1662026777eb229337a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1289, "license_type": "no_license", "max_line_length": 80, "num_lines": 33, "path": "/indian/src/ft_find_section.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_section.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:17 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:20:19 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstruct section\t*ft_find_section(\n\tstruct segment_command *segment, char *section_name)\n{\n\tstruct section\t\t\t*section;\n\tuint32_t\t\t\t\tloop;\n\n\tloop = 0;\n\tsection = (void *)segment + sizeof(*segment);\n\twhile (loop < segment->nsects)\n\t{\n\t\tif (!ft_strcmp(section->sectname, section_name))\n\t\t{\n\t\t\treturn (section);\n\t\t}\n\t\tsection = (void *)section + sizeof(*section);\n\t\tloop++;\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.32330095767974854, "alphanum_fraction": 0.34368932247161865, "avg_line_length": 23.819276809692383, "blob_id": "5c638a18ba117e9a5b0ce42a341011accca0bce0", "content_id": "085408d4b562e3923610fa50a677d3f2574ea217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2060, "license_type": "no_license", "max_line_length": 80, "num_lines": 83, "path": "/src/set_cmd_flag.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* set_cmd_flag.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:17:20 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:46:39 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\terror_flag(char *av)\n{\n\tft_putstr_fd(ERROR_FLAG, STDERR);\n\tft_putstr_fd(\" '-\", STDERR);\n\tft_putstr_fd(av, STDERR);\n\tft_putstr_fd(\"'\\n\", STDERR);\n\tft_putstr_fd(TYPE_HELP, STDOUT);\n\treturn (EXIT_FAILURE);\n}\n\nstatic int\thelp_mess(void)\n{\n\tft_putstr_fd(HELP_MESS, STDOUT);\n\treturn (HELP_FLAG);\n}\n\nstatic int\tset_flag(char *av, t_cmd_flag *cmd_f)\n{\n\tav++;\n\tif (!*av)\n\t\treturn (error_flag(av));\n\twhile (*av)\n\t{\n\t\tif (*av == 'p')\n\t\t\tcmd_f->p = 1;\n\t\telse if (*av == 'u')\n\t\t\tcmd_f->u = 1;\n\t\telse if (*av == 'U')\n\t\t\tcmd_f->uu = 1;\n\t\telse if (*av == 'g')\n\t\t\tcmd_f->g = 1;\n\t\telse if (*av == 'j')\n\t\t\tcmd_f->j = 1;\n\t\telse if (*av == 'r')\n\t\t\tcmd_f->r = 1;\n\t\telse\n\t\t\treturn (error_flag(av));\n\t\tav++;\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nint\t\t\tset_cmd_flag(int ac, char **av, t_cmd_flag *cmd_f, int is_otool)\n{\n\tint i;\n\tint err;\n\n\tcmd_f->is_otool = is_otool;\n\tcmd_f->p = 0;\n\tcmd_f->g = 0;\n\tcmd_f->u = 0;\n\tcmd_f->uu = 0;\n\tcmd_f->j = 0;\n\tcmd_f->r = 0;\n\ti = 1;\n\tif (is_otool)\n\t\treturn (i);\n\twhile (i < ac)\n\t{\n\t\tif (*av[i] != '-')\n\t\t\tbreak ;\n\t\tif (!ft_strcmp(av[i], HELP))\n\t\t\treturn (help_mess());\n\t\tif ((err = set_flag(av[i], cmd_f)))\n\t\t\treturn (-1);\n\t\ti++;\n\t}\n\treturn (i);\n}\n" }, { "alpha_fraction": 0.4374084770679474, "alphanum_fraction": 0.4513177275657654, "avg_line_length": 25.784313201904297, "blob_id": "a7276074a5b060a50a9b8f281ded6c502064d90d", "content_id": "8020989e89d9c3cfdcf5d4b437a87c5f38d8aa4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2732, "license_type": "no_license", "max_line_length": 80, "num_lines": 102, "path": "/src/ft_sort.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_sort.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:29 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:49:17 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\t\tloop_check_index(\n\tstruct nlist **list, int nsyms, char *stringtable, t_seg_infos *seg_infos)\n{\n\tint\t\t\t\t\ti;\n\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (list[i])\n\t\t{\n\t\t\tif ((long)swap_uint32_check((long)list[i]->n_un.n_strx,\n\t\t\tseg_infos->cmd_f->is_indian) < 0 ||\n\t\t\t(void *)(stringtable + swap_uint32_check((long)list[i]->n_un.n_strx,\n\t\t\tseg_infos->cmd_f->is_indian))\n\t\t\t\t>= (void *)seg_infos->ptr_end)\n\t\t\t{\n\t\t\t\tft_putstr_fd(ERROR_STRING_INDEX, STDERR);\n\t\t\t\treturn (EXIT_FAILURE);\n\t\t\t}\n\t\t}\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nstatic int\t\tsort_set_index(\n\tstruct nlist **list,\n\tint nsyms,\n\tchar *stringtable,\n\tt_seg_infos *seg_infos)\n{\n\tint\tindex;\n\n\tif (seg_infos->cmd_f->r)\n\t\tindex = loop_sort_reverse(\n\t\t\tlist, nsyms, stringtable, seg_infos);\n\telse\n\t\tindex = loop_sort(\n\t\t\tlist, nsyms, stringtable, seg_infos);\n\treturn (index);\n}\n\nstatic int\t\tsort_init_loop(\n\tstruct nlist **list,\n\tint nsyms,\n\tchar *stringtable,\n\tt_seg_infos *seg_infos)\n{\n\tint\terr;\n\tint\tj;\n\tint\tindex;\n\n\terr = loop_check_index(list, nsyms, stringtable, seg_infos);\n\tj = -1;\n\twhile (++j < nsyms)\n\t{\n\t\tindex = j;\n\t\tif (!err)\n\t\t{\n\t\t\tif (!seg_infos->cmd_f->p)\n\t\t\t\tindex = sort_set_index(list, nsyms, stringtable, seg_infos);\n\t\t\tif ((void *)(stringtable + swap_uint32_check(\n\t\t\t\t(long)list[index]->n_un.n_strx, seg_infos->cmd_f->is_indian))\n\t\t\t>= seg_infos->ptr_end)\n\t\t\t\tlist[index]->n_un.n_strx = -1;\n\t\t\tprint_outpout(\n\t\t\t\tlist[index], stringtable, seg_infos, seg_infos->cmd_f);\n\t\t}\n\t\tfree(list[index]);\n\t\tlist[index] = NULL;\n\t}\n\treturn (err);\n}\n\nint\t\t\t\tft_sort(\n\tstruct nlist *array, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos)\n{\n\tstruct nlist\t\t**list;\n\tint\t\t\t\t\tj;\n\tint\t\t\t\t\terr;\n\n\tif (!(list = ft_copy_nlist(array, nsyms, seg_infos)))\n\t\treturn (EXIT_FAILURE);\n\terr = sort_init_loop(\n\t\tlist, nsyms, stringtable, seg_infos);\n\tfree(list);\n\treturn (err);\n}\n" }, { "alpha_fraction": 0.27909886837005615, "alphanum_fraction": 0.3185231685638428, "avg_line_length": 31.612245559692383, "blob_id": "54c40a12f30557319140aaa13f1cb28c140a6495", "content_id": "2459fc3dc40dcae2e636eec00840599f84f751a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1598, "license_type": "no_license", "max_line_length": 80, "num_lines": 49, "path": "/indian/src/print_text_text_section.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* print_text_text_section.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:24:10 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:41:32 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tprint_chariot(int j)\n{\n\tif (j % 16 != 0)\n\t\twrite(1, \"\\n\", 1);\n}\n\nint\t\t\tprint_text_text_section(\n\tvoid *ptr, long double addr, int size, int is64)\n{\n\tsize_t\tlen;\n\tint\t\tj;\n\n\tj = 0;\n\tlen = is64 ? ft_strlen(PADDING_STR_64) : ft_strlen(PADDING_STR);\n\twrite(1, CONTENT_TEXT_TEXT, ft_strlen(CONTENT_TEXT_TEXT));\n\twhile (j < size)\n\t{\n\t\tif (j % 16 == 0)\n\t\t{\n\t\t\tft_print_padding_adresse(\n\t\t\t\taddr, len, is64 ? PADDING_STR_64 : PADDING_STR);\n\t\t\twrite(1, \"\\t\", 1);\n\t\t}\n\t\tif (*(unsigned char *)ptr < 0x10)\n\t\t\twrite(1, \"0\", 1);\n\t\tft_print_adress(*(unsigned char *)ptr);\n\t\twrite(1, \" \", 1);\n\t\tif (++j % 16 == 0)\n\t\t\twrite(1, \"\\n\", 1);\n\t\taddr++;\n\t\tptr++;\n\t}\n\tprint_chariot(j);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.4516814053058624, "alphanum_fraction": 0.4739823043346405, "avg_line_length": 36.171051025390625, "blob_id": "0bb3aeb1293f1b5848b5dec393359e6ab18ace87", "content_id": "35b5cc73350c0d70ebc634c9aff7cd9bd6d38b82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2825, "license_type": "no_license", "max_line_length": 80, "num_lines": 76, "path": "/src/handle_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:52 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:50:29 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\t\t\t\t\tfree_seg_infos(t_seg_infos **seg_infos, int ret)\n{\n\tfree(*seg_infos);\n\treturn (ret);\n}\n\nstatic t_seg_infos\t*init_loop_handle_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, struct load_command **lc)\n{\n\tt_seg_infos\t*seg_infos;\n\n\tif ((void *)(*lc = (void *)ptr + sizeof(*header))\n\t+ sizeof(struct load_command) > (void *)ptr_end)\n\t\treturn (NULL);\n\tif (!(seg_infos = ft_infos_segment_64(ptr, ptr_end, header, *lc)))\n\t\treturn (NULL);\n\treturn (seg_infos);\n}\n\nstatic int\t\t\tloop_handle_64(char *ptr, char *ptr_end,\n\tstruct mach_header_64 *header, t_cmd_flag *cmd_f)\n{\n\tuint32_t\t\t\t\ti;\n\tt_seg_infos\t\t\t\t*seg_infos;\n\tstruct symtab_command\t*sym;\n\tstruct load_command\t\t*lc;\n\n\tif (!(seg_infos = init_loop_handle_64(ptr, ptr_end, header, &lc)))\n\t\treturn (EXIT_FAILURE);\n\ti = -1;\n\tseg_infos->cmd_f = cmd_f;\n\twhile (++i < swap_uint32_check(header->ncmds, cmd_f->is_indian))\n\t\tif (swap_uint32_check(lc->cmd, cmd_f->is_indian) == LC_SYMTAB)\n\t\t{\n\t\t\tif ((void *)(sym = (struct symtab_command *)lc) + sizeof(*sym)\n\t\t\t> (void *)ptr_end)\n\t\t\t\treturn (free_seg_infos(&seg_infos, EXIT_FAILURE));\n\t\t\telse if (sort_and_print_outpout_64(sym, ptr, ptr_end, seg_infos))\n\t\t\t\treturn (free_seg_infos(&seg_infos, EXIT_FAILURE));\n\t\t\tbreak ;\n\t\t}\n\t\telse if ((void *)(lc = (void *)lc\n\t\t+ swap_uint32_check(lc->cmdsize, cmd_f->is_indian))\n\t\t+ sizeof(struct load_command) > (void *)ptr_end)\n\t\t\treturn (free_seg_infos(&seg_infos, EXIT_FAILURE));\n\treturn (free_seg_infos(&seg_infos, EXIT_SUCCESS));\n}\n\nint\t\t\t\t\thandle_64(char *ptr, char *ptr_end, t_cmd_flag *cmd_f)\n{\n\tstruct mach_header_64\t*header;\n\n\tif ((void *)(header = (struct mach_header_64 *)ptr)\n\t+ sizeof(struct mach_header) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)ptr + swap_uint32_check(header->sizeofcmds, cmd_f->is_indian)\n\t> (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (loop_handle_64(ptr, ptr_end, header, cmd_f))\n\t\treturn (EXIT_FAILURE);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.32015809416770935, "alphanum_fraction": 0.33992093801498413, "avg_line_length": 28.516666412353516, "blob_id": "9c9bc3c2971a42cc39d8a9f4dfd006d4eaf5e1ba", "content_id": "f6b17d25f10f86e3f82f61e7267b56d83d0d1f13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1771, "license_type": "no_license", "max_line_length": 80, "num_lines": 60, "path": "/src/main_otool.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* main_otool.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:23:28 by aollivie #+# #+# */\n/* Updated: 2017/11/29 14:26:10 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\texecute_is_otool(char *av, t_cmd_flag *cmd_f)\n{\n\tint\t\t\tfd;\n\tchar\t\t*ptr;\n\tstruct stat\tbuf;\n\tint\t\t\tret;\n\n\tif ((fd = open(av, O_RDONLY)) < 0)\n\t{\n\t\tft_putstr(NO_ACCESS);\n\t\tft_putstr(av);\n\t\tft_putstr(\"\\n\");\n\t\treturn (EXIT_FAILURE);\n\t}\n\tif (fstat(fd, &buf) < 0)\n\t\treturn (EXIT_FAILURE);\n\tif ((ptr = mmap(\n\t\t0, buf.st_size, PROT_READ, MAP_PRIVATE, fd, 0)) == MAP_FAILED)\n\t\treturn (EXIT_FAILURE);\n\tret = ft_otool(ptr, (void *)ptr + buf.st_size, av, cmd_f);\n\tif (munmap(ptr, buf.st_size) < 0)\n\t\treturn (EXIT_FAILURE);\n\treturn (ret);\n}\n\nint\t\t\tmain(int ac, char **av)\n{\n\tint\t\t\tret;\n\tint\t\t\ti;\n\tt_cmd_flag\tcmd_f;\n\n\tif (ac < 2)\n\t\treturn (EXIT_FAILURE);\n\tif ((i = set_cmd_flag(ac, av, &cmd_f, IS_OTOOL)) < 0)\n\t{\n\t\tif (i == HELP_FLAG)\n\t\t\treturn (EXIT_SUCCESS);\n\t\treturn (EXIT_FAILURE);\n\t}\n\twhile (i < ac)\n\t{\n\t\tret = execute_is_otool(av[i], &cmd_f);\n\t\ti++;\n\t}\n\treturn (ret);\n}\n" }, { "alpha_fraction": 0.31665751338005066, "alphanum_fraction": 0.35239142179489136, "avg_line_length": 30.912281036376953, "blob_id": "36526ea4c7ff8165b5ccd695b07b9bd5707b6d97", "content_id": "0f9e9264229a65c411d710c2df294b9ff12d5409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 80, "num_lines": 57, "path": "/src/print_text_text_section.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* print_text_text_section.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:24:10 by aollivie #+# #+# */\n/* Updated: 2017/11/23 15:58:46 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tprint_chariot(int j)\n{\n\tif (j % 16 != 0)\n\t\twrite(1, \"\\n\", 1);\n}\n\nstatic void\tprint_infos(char *ptr, t_ptr *ptr_infos, int *j)\n{\n\tif (*(unsigned char *)ptr < 0x10)\n\t\twrite(1, \"0\", 1);\n\tft_print_adress(*(unsigned char *)ptr);\n\tif (!ptr_infos->is_indian)\n\t\twrite(1, \" \", 1);\n\tif (++(*j) % 4 == 0 && ptr_infos->is_indian)\n\t\twrite(1, \" \", 1);\n\tif (*j % 16 == 0)\n\t\twrite(1, \"\\n\", 1);\n}\n\nint\t\t\tprint_text_text_section(\n\tvoid *ptr, long double addr, int size, t_ptr *ptr_infos)\n{\n\tsize_t\tlen;\n\tint\t\tj;\n\n\tj = 0;\n\tlen = ptr_infos->is_64 ? ft_strlen(PADDING_STR_64) : ft_strlen(PADDING_STR);\n\twrite(1, CONTENT_TEXT_TEXT, ft_strlen(CONTENT_TEXT_TEXT));\n\twhile (j < size)\n\t{\n\t\tif (j % 16 == 0)\n\t\t{\n\t\t\tft_print_padding_adresse(\n\t\t\t\taddr, len, ptr_infos->is_64 ? PADDING_STR_64 : PADDING_STR);\n\t\t\twrite(1, \"\\t\", 1);\n\t\t}\n\t\tprint_infos(ptr, ptr_infos, &j);\n\t\taddr++;\n\t\tptr++;\n\t}\n\tprint_chariot(j);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.30014124512672424, "alphanum_fraction": 0.33121469616889954, "avg_line_length": 46.20000076293945, "blob_id": "897fe01b5fba7587dd11e7959bf137ea19e6b41f", "content_id": "971b5c16c67fae8fb09f996b5db2457541df4a90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1416, "license_type": "no_license", "max_line_length": 80, "num_lines": 30, "path": "/indian/src/ft_find_segment_section_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_segment_section_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:38 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:33:57 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstruct section_64\t*ft_find_segment_section_64(char *ptr,\n\tstruct mach_header_64 *header, char *segment_name, char *section_name)\n{\n\tstruct load_command\t\t\t*lc;\n\tstruct segment_command_64\t*seg;\n\tstruct section_64\t\t\t*section;\n\n\tlc = (void *)ptr + sizeof(*header);\n\tif (ft_check_load(lc, header->ncmds, header->sizeofcmds))\n\t\treturn (NULL);\n\tif (!(seg = ft_find_segment_64(lc, header->ncmds, segment_name)))\n\t\treturn (NULL);\n\tif (!(section = ft_find_section_64(seg, section_name)))\n\t\treturn (NULL);\n\treturn (section);\n}\n" }, { "alpha_fraction": 0.3963930904865265, "alphanum_fraction": 0.4262053668498993, "avg_line_length": 25.900989532470703, "blob_id": "327e0cd8cc0303b54a609b5bb5eab6d0766bb3ab", "content_id": "71381e5eb1b9cc3981a20a32a39ea63ee77a6d6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2717, "license_type": "no_license", "max_line_length": 80, "num_lines": 101, "path": "/indian/src/ft_sort64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_sort64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:40 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:37:50 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tset_index64(\n\tstruct nlist_64 **tmp, int *index, int i, struct nlist_64 **list)\n{\n\t*tmp = list[i];\n\t*index = i;\n}\n\nstatic int\tloop_sort64(struct nlist_64 **list, int nsyms, char *stringtable)\n{\n\tstruct nlist_64\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index64(&tmp, &index, i, list);\n\t\tif (list[i])\n\t\t{\n\t\t\tcmp = ft_strcmp(stringtable + tmp->n_un.n_strx,\n\t\t\t\tstringtable + list[i]->n_un.n_strx);\n\t\t\tif (cmp > 0)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value > list[i]->n_value)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n\nstatic int\tloop_sort64_reverse(struct nlist_64 **list, int nsyms, char *s)\n{\n\tstruct nlist_64\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index64(&tmp, &index, i, list);\n\t\tif (list[i])\n\t\t{\n\t\t\tcmp = ft_strcmp(s + tmp->n_un.n_strx,\n\t\t\t\ts + list[i]->n_un.n_strx);\n\t\t\tif (cmp < 0)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value < list[i]->n_value)\n\t\t\t\tset_index64(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n\nint\t\t\tft_sort64(\n\tstruct nlist_64 *array, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos)\n{\n\tstruct nlist_64\t\t**list;\n\tint\t\t\t\t\tj;\n\tint\t\t\t\t\tindex;\n\n\tif (!(list = ft_copy_nlist64(array, nsyms)))\n\t\treturn (EXIT_FAILURE);\n\tj = -1;\n\twhile (++j < nsyms)\n\t{\n\t\tif (seg_infos->cmd_f->p)\n\t\t\tindex = j;\n\t\telse if (seg_infos->cmd_f->r)\n\t\t\tindex = loop_sort64_reverse(list, nsyms, stringtable);\n\t\telse\n\t\t\tindex = loop_sort64(list, nsyms, stringtable);\n\t\tprint_outpout_64(list[index], stringtable, seg_infos, seg_infos->cmd_f);\n\t\tfree(list[index]);\n\t\tlist[index] = NULL;\n\t}\n\tfree(list);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.4637185335159302, "alphanum_fraction": 0.48595163226127625, "avg_line_length": 29.09558868408203, "blob_id": "47f70499b456b04977d034c9661d54a7ddfac1e9", "content_id": "336a28efd2bcccd813add16d95dd51f09f4637fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4093, "license_type": "no_license", "max_line_length": 80, "num_lines": 136, "path": "/src/ft_fat_file.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_fat_file.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:03 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:46:15 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic char\t*ft_cputype_name(int cputype)\n{\n\tif (cputype == CPU_TYPE_POWERPC)\n\t\treturn (\"ppc\");\n\telse if (cputype == CPU_TYPE_I386)\n\t\treturn (\"i386\");\n\telse if (cputype == CPU_TYPE_VAX)\n\t\treturn (\"vax\");\n\telse if (cputype == CPU_TYPE_MC680x0)\n\t\treturn (\"mc680x0\");\n\telse if (cputype == CPU_TYPE_MC98000)\n\t\treturn (\"mc98000\");\n\telse if (cputype == CPU_TYPE_SPARC)\n\t\treturn (\"sparc\");\n\telse if (cputype == CPU_TYPE_I860)\n\t\treturn (\"i860\");\n\treturn (UNKNOW);\n}\n\nstatic int\tft_print_arch_name(\n\tchar *file_name, void *ptr, void *ptr_end, int is_otool)\n{\n\tint\t\tis_indian;\n\tint\t\tcputype;\n\n\tif (ptr + sizeof(unsigned int) > ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tis_indian = swap_uint32(*(int *)ptr) == MH_MAGIC ? 1 : 0;\n\tcputype = swap_uint32_check(\n\t\t*(int *)(ptr + sizeof(unsigned int)), is_indian);\n\tif (!is_otool)\n\t\tft_putstr(\"\\n\");\n\tft_putstr(file_name);\n\tft_putstr(\" (\");\n\tif (!is_otool)\n\t\tft_putstr(FOR_ARCH);\n\telse\n\t\tft_putstr(\"architecture\");\n\tft_putstr(\" \");\n\tft_putstr(ft_cputype_name(cputype));\n\tft_putstr(\"):\\n\");\n\treturn (EXIT_SUCCESS);\n}\n\nstatic int\tft_fat_file_all(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tt_fat_infos f_i;\n\n\tf_i.f_h = (struct fat_header *)ptr;\n\tf_i.nb_arch = swap_uint32(f_i.f_h->nfat_arch);\n\tf_i.f_a = (void *)f_i.f_h + sizeof(*f_i.f_h);\n\tf_i.offset = 0;\n\twhile (f_i.nb_arch)\n\t{\n\t\tf_i.offset = swap_uint32(f_i.f_a->offset);\n\t\tif (ft_print_arch_name(av, ptr + f_i.offset, ptr_end, cmd_f->is_otool))\n\t\t\treturn (EXIT_FAILURE);\n\t\tft_otool(ptr + f_i.offset, ptr_end, NULL, cmd_f);\n\t\tif ((void *)(f_i.f_a = (void *)f_i.f_a + sizeof(*f_i.f_a))\n\t\t+ sizeof(t_fat_infos) > (void *)ptr_end)\n\t\t\treturn (EXIT_FAILURE);\n\t\tf_i.nb_arch--;\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nstatic int\tft_fat_file_all_one(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tt_fat_infos f_i;\n\n\tf_i.f_h = (struct fat_header *)ptr;\n\tf_i.nb_arch = swap_uint32(f_i.f_h->nfat_arch);\n\tf_i.f_a = (void *)f_i.f_h + sizeof(*f_i.f_h);\n\tf_i.offset = 0;\n\twhile (f_i.nb_arch)\n\t{\n\t\tf_i.offset = swap_uint32(f_i.f_a->offset);\n\t\tif (!cmd_f->is_otool)\n\t\t{\n\t\t\tft_putstr(av);\n\t\t\tft_putstr(\":\\n\");\n\t\t}\n\t\tft_otool(ptr + f_i.offset, ptr_end, av, cmd_f);\n\t\tif ((void *)(f_i.f_a = (void *)f_i.f_a + sizeof(*f_i.f_a))\n\t\t+ sizeof(t_fat_infos) > (void *)ptr_end)\n\t\t\treturn (EXIT_FAILURE);\n\t\tf_i.nb_arch--;\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nint\t\t\tft_fat_file(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tstruct fat_header\t*f_h;\n\tstruct fat_arch\t\t*f_a;\n\tint\t\t\t\t\tnb_arch;\n\n\tif ((void *)(f_h = (void *)ptr) + sizeof(*f_h) >= (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tnb_arch = swap_uint32(f_h->nfat_arch) + 1;\n\tif ((void *)(f_a = (void *)f_h + sizeof(*f_h))\n\t+ sizeof(*f_a) >= (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\twhile (--nb_arch)\n\t{\n\t\tif (swap_uint32(f_a->cputype) == CPU_TYPE_X86_64)\n\t\t{\n\t\t\tif ((long)swap_uint32(f_a->offset) >= 0)\n\t\t\t\treturn (ft_otool(ptr + swap_uint32(f_a->offset),\n\t\t\t\tptr_end, av, cmd_f));\n\t\t}\n\t\tif ((void *)(f_a = (void *)f_a + sizeof(*f_a)) + sizeof(*f_a)\n\t\t> (void *)ptr_end)\n\t\t\treturn (EXIT_FAILURE);\n\t}\n\tif ((nb_arch = swap_uint32(f_h->nfat_arch)) == 1)\n\t\treturn (ft_fat_file_all_one(ptr, ptr_end, av, cmd_f));\n\treturn (ft_fat_file_all(ptr, ptr_end, av, cmd_f));\n}\n" }, { "alpha_fraction": 0.40427929162979126, "alphanum_fraction": 0.41741740703582764, "avg_line_length": 25.376237869262695, "blob_id": "d5576a36231796f2ed4955fa7a8b9d44f38ae686", "content_id": "80d7a315fa0a42964391f52f227e9c4af6aec193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2664, "license_type": "no_license", "max_line_length": 80, "num_lines": 101, "path": "/indian/src/ft_sort.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_sort.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:29 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:22:31 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tset_index(\n\tstruct nlist **tmp, int *index, int i, struct nlist **list)\n{\n\t*tmp = list[i];\n\t*index = i;\n}\n\nstatic int\tloop_sort(struct nlist **list, int nsyms, char *stringtable)\n{\n\tstruct nlist\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index(&tmp, &index, i, list);\n\t\tif (list[i])\n\t\t{\n\t\t\tcmp = ft_strcmp(stringtable + tmp->n_un.n_strx,\n\t\t\t\tstringtable + list[i]->n_un.n_strx);\n\t\t\tif (cmp > 0)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value > list[i]->n_value)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n\nstatic int\tloop_sort_reverse(struct nlist **list, int nsyms, char *s)\n{\n\tstruct nlist\t\t*tmp;\n\tint\t\t\t\t\ti;\n\tint\t\t\t\t\tcmp;\n\tint\t\t\t\t\tindex;\n\n\ttmp = NULL;\n\tindex = 0;\n\ti = -1;\n\twhile (++i < nsyms)\n\t{\n\t\tif (!tmp && list[i])\n\t\t\tset_index(&tmp, &index, i, list);\n\t\tif (list[i])\n\t\t{\n\t\t\tcmp = ft_strcmp(s + tmp->n_un.n_strx,\n\t\t\t\ts + list[i]->n_un.n_strx);\n\t\t\tif (cmp < 0)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t\telse if (!cmp && tmp->n_value < list[i]->n_value)\n\t\t\t\tset_index(&tmp, &index, i, list);\n\t\t}\n\t}\n\treturn (index);\n}\n\nint\t\t\tft_sort(\n\tstruct nlist *array, int nsyms,\n\tchar *stringtable, t_seg_infos *seg_infos)\n{\n\tstruct nlist\t\t**list;\n\tint\t\t\t\t\tj;\n\tint\t\t\t\t\tindex;\n\n\tif (!(list = ft_copy_nlist(array, nsyms)))\n\t\treturn (EXIT_FAILURE);\n\tj = -1;\n\twhile (++j < nsyms)\n\t{\n\t\tif (seg_infos->cmd_f->p)\n\t\t\tindex = j;\n\t\telse if (seg_infos->cmd_f->r)\n\t\t\tindex = loop_sort_reverse(list, nsyms, stringtable);\n\t\telse\n\t\t\tindex = loop_sort(list, nsyms, stringtable);\n\t\tprint_outpout(list[index], stringtable, seg_infos, seg_infos->cmd_f);\n\t\tfree(list[index]);\n\t\tlist[index] = NULL;\n\t}\n\tfree(list);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.43021711707115173, "alphanum_fraction": 0.45680105686187744, "avg_line_length": 41.58490753173828, "blob_id": "53e3b4b19f7da238b6fb3d2a3938f74460cc0a41", "content_id": "a145981b3e9890e8c3f3eb4fae3468e1d526e9c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2257, "license_type": "no_license", "max_line_length": 80, "num_lines": 53, "path": "/src/ft_find_segment_section_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_segment_section_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:38 by aollivie #+# #+# */\n/* Updated: 2017/11/23 00:56:19 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\t\t\tcheck_load(struct mach_header_64 *header,\n\tt_ptr *ptr_infos, struct load_command *lc)\n{\n\tt_load\t\t\t\t\t\tload;\n\n\tload.is_indian = swap_uint32(*(int *)ptr_infos->ptr) == MH_MAGIC_64 ? 1 : 0;\n\tload.ncmds = swap_uint32_check(header->ncmds, load.is_indian);\n\tload.sizeofcmds = swap_uint32_check(header->sizeofcmds, load.is_indian);\n\tload.ptr = ptr_infos->ptr;\n\tload.ptr_end = ptr_infos->ptr_end;\n\tif (ft_check_load(lc, ptr_infos->ptr_end, &load))\n\t\treturn (EXIT_FAILURE);\n\treturn (EXIT_SUCCESS);\n}\n\nstruct section_64\t*ft_find_segment_section_64(t_ptr *ptr_infos,\n\tint is_indian, char *segment_name, char *section_name)\n{\n\tstruct mach_header_64\t\t*header;\n\tstruct load_command\t\t\t*lc;\n\tstruct segment_command_64\t*seg;\n\tstruct section_64\t\t\t*section;\n\n\tif ((void *)(header = (struct mach_header_64 *)ptr_infos->ptr)\n\t+ sizeof(struct mach_header) > (void *)ptr_infos->ptr_end)\n\t\treturn (NULL);\n\tif ((void *)(lc = (void *)ptr_infos->ptr + sizeof(*header))\n\t+ sizeof(struct load_command) > (void *)ptr_infos->ptr_end)\n\t\treturn (NULL);\n\tif (check_load(header, ptr_infos, lc))\n\t\treturn (NULL);\n\tptr_infos->is_indian = is_indian;\n\tif (!(seg = ft_find_segment_64(lc,\n\t\tswap_uint32_check(header->ncmds, is_indian), segment_name, ptr_infos)))\n\t\treturn (NULL);\n\tif (!(section = ft_find_section_64(seg, section_name, ptr_infos)))\n\t\treturn (NULL);\n\treturn (section);\n}\n" }, { "alpha_fraction": 0.2974812686443329, "alphanum_fraction": 0.31926479935646057, "avg_line_length": 42.20588302612305, "blob_id": "6fa90f90f988b840d89d700f5df9eb29b6921ad4", "content_id": "d8d903406095dc8da9c7e6357023b24b86aa3e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1469, "license_type": "no_license", "max_line_length": 80, "num_lines": 34, "path": "/indian/src/handle_text.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle_text.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:59 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:23:00 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nint\thandle_text(char *ptr, char *ptr_end, char *av)\n{\n\tstruct mach_header\t\t*header;\n\tstruct section\t\t\t*section;\n\n\tif ((void *)(header = (struct mach_header *)ptr) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)ptr + header->sizeofcmds > (void *)ptr_end)\n\t{\n\t\treturn (EXIT_FAILURE);\n\t}\n\tif (!(section = ft_find_segment_section(\n\t\tptr, header, SEG_TEXT, SECT_TEXT)))\n\t\treturn (EXIT_FAILURE);\n\twrite(1, av, ft_strlen(av));\n\twrite(1, \":\\n\", 2);\n\tprint_text_text_section(\n\t\t(void*)ptr + section->offset, section->addr, section->size, 0);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.4099283516407013, "alphanum_fraction": 0.42476969957351685, "avg_line_length": 54.82857131958008, "blob_id": "b9beeff2a2ffce06bc57b5668bfe86ae2b395bc2", "content_id": "07f7da886170e51292bdfda584106f9f605cd154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1954, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/indian/inc/fr/ft_mess.h", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_mess.h :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:42:56 by aollivie #+# #+# */\n/* Updated: 2017/11/07 17:02:21 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#ifndef FT_MESS_H\n# define FT_MESS_H\n\n# define CONTENT_TEXT_TEXT \t\"Contenu de (__TEXT,__text) section\\n\"\n# define ERROR_LOAD_MIN_SIZE\t\"Mach-O load command avec taille < 8 octets\\n\"\n# define ERROR_LOAD_SIZE\t\"Mach-O segment load command taille trop petite\\n\"\n# define ERR_VA \"Le fichier n'est pas reconnu comme un objet valide\\n\"\n# define ERROR_FORMAT_FILE\tERR_VA\n# define NO_ACCESS\t\t\t\"Acces refuse pour : \"\n# define ERROR_FLAG\t\t\t\"ft_nm: Argument inconnu\"\n# define TYPE_HELP\t\t\t\"Entrez './ft_nm -aide' pour plus d'informations\\n\"\n# define HELP\t\t\t\t\"-aide\"\n# define USAGE \"USAGE: nm [options] <fichier>\\n\\n\"\n# define OPTION \"Options:\\n\\n\"\n# define O_G \"-g Affiche uniquement symboles (extern) global\\n\\n\"\n# define O_U \"-u Affiche uniquement symboles indefenis\\n\\n\"\n# define O_UU \"-U N'affiche pas symboles indefenis\\n\\n\"\n# define O_P \"-p Pas de tri; affiche les symboles par odre d'apparition\\n\\n\"\n# define O_J \"-j Affiche uniquement le nom des symboles\\n\\n\"\n# define O_R \"-r Tri inverse\\n\\n\"\n# define HELP_MESS\tUSAGE OPTION O_G O_U O_UU O_P O_J O_R\n\n#endif\n" }, { "alpha_fraction": 0.44897282123565674, "alphanum_fraction": 0.46785950660705566, "avg_line_length": 23.942148208618164, "blob_id": "7397b8e9ca4ed18b2c20189f3fece215507e43e6", "content_id": "0f2ae571f97e553531eabbcddee7e7d86f72fa70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3018, "license_type": "no_license", "max_line_length": 181, "num_lines": 121, "path": "/Makefile", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "# **************************************************************************** #\n# #\n# ::: :::::::: #\n# Makefile :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: aollivie <[email protected]> +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2014/11/14 17:07:06 by aollivie #+# #+# #\n# Updated: 2017/11/22 18:54:56 by aollivie ### ########.fr #\n# #\n# **************************************************************************** #\n\n\nDEBUG=yes\nCC=clang\n\nifeq ($(DEBUG), yes)\n\t\t\t\tCFLAGS= -ggdb3 -Wall -Wextra -Wno-unused-variable -Wno-unused-parameter -O1 -g #-fsanitize=address -fno-omit-frame-pointer # -g -fsanitize=address,undefined # -g -ansi -pedantic\nelse\n\t\t# CFLAGS= -Wall -Wextra -Werror #-O1 -g #-fsanitize=address -fno-omit-frame-pointer\n\t\t# CFLAGS= -shared -fPIC -Wall -Wextra -Werror\n\t\t# CFLAGS=\n\t\tCFLAGS= -Wall -Wextra -Werror\nendif\n\nNAME_OTOOL = ft_otool\n\nNAME_NM = ft_nm\n\nLIBFT =libft/libft.a\n\n\n\nI_DIR= -I inc/ -I./libft/inc -I inc/en\n\nifeq ($(LANG),fr)\n\tI_DIR= -I inc/ -I./libft/inc -I inc/fr\nendif\n\nO_DIR= obj\n\nMKDIR = mkdir\n\nC_SRC =\tcheck_valid_file.c \\\n\t\tft_ar_file.c \\\n\t\tft_check_load.c \\\n\t\tft_copy_nlist.c \\\n\t\tft_copy_nlist64.c \\\n\t\tft_fat_file.c \\\n\t\tft_find_section.c \\\n\t\tft_find_section_64.c \\\n\t\tft_find_segment.c \\\n\t\tft_find_segment_64.c \\\n\t\tft_find_segment_section.c \\\n\t\tft_find_segment_section_64.c \\\n\t\tft_format_archive_name.c \\\n\t\tft_get_adress_str.c \\\n\t\tft_infos_segment.c \\\n\t\tft_infos_segment_64.c \\\n\t\tft_init_seg_infos.c \\\n\t\tft_otool.c \\\n\t\tft_print_adress.c \\\n\t\tft_print_archive_name.c \\\n\t\tft_print_padding_adresse.c \\\n\t\tft_sort.c \\\n\t\tft_sort64.c \\\n\t\thandle.c \\\n\t\thandle_64.c \\\n\t\thandle_64_text.c \\\n\t\thandle_text.c \\\n\t\tprint_outpout.c \\\n\t\tprint_outpout_64.c \\\n\t\tprint_outpout_format.c \\\n\t\tprint_outpout_format_64.c \\\n\t\tprint_text_text_section.c \\\n\t\tsort_and_print_outpout.c \\\n\t\tsort_and_print_outpout_64.c \\\n\t\tswap_uint32.c \\\n\t\tset_cmd_flag.c \\\n\t\tft_loop_sort64.c \\\n\t\tft_loop_sort.c\n\nVPATH= src\n\nOBJS= $(C_SRC:%.c=$(O_DIR)/%.o)\n\n.PHONY : all clean fclean re\n\nall :\n\tmake -C libft\n\tmake -j $(NAME_OTOOL)\n\tmake -j $(NAME_NM)\n\nifeq ($(DEBUG),yes)\n\t\t\t\t@echo \"Generation mode debug\"\nelse\n\t\t\t\t@echo \"Generation mode release\"\nendif\n\n$(NAME_OTOOL):$(OBJS)\n\t\t\t\t$(CC) $(CFLAGS) $(I_DIR) $^ src/main_otool.c $(LIBFT) -o $@\n\n$(NAME_NM):$(OBJS)\n\t\t$(CC) $(CFLAGS) $(I_DIR) $^ src/main_nm.c $(LIBFT) -o $@\n\n$(O_DIR)/%.o: %.c\n\t\t\t\t$(CC) $(CFLAGS) $(I_DIR) -c $< -o $@\n\n$(OBJS): | $(O_DIR)\n\n$(O_DIR):\n\t\t\t\t$(MKDIR) $(O_DIR)\nclean :\n\t\trm -rf $(O_DIR)\n\t\tmake clean -C libft\n\nfclean : clean\n\t\t@rm -rf $(NAME_OTOOL) $(NAME_NM)\n\t\tmake fclean -C libft\n\nre : fclean all\n" }, { "alpha_fraction": 0.30122700333595276, "alphanum_fraction": 0.3196319043636322, "avg_line_length": 32.26530456542969, "blob_id": "822570299f6b3a59a9d06b9c0969dae9af781967", "content_id": "d423357cea2aa125ef3076c51dacf0173dd01c05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 80, "num_lines": 49, "path": "/src/ft_copy_nlist.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_copy_nlist.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:19:49 by aollivie #+# #+# */\n/* Updated: 2017/11/23 15:39:23 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\t\tft_set_nlist(struct nlist *l, struct nlist *array, int i)\n{\n\tl->n_type = array[i].n_type;\n\tl->n_sect = array[i].n_sect;\n\tl->n_value = array[i].n_value;\n\tl->n_un.n_strx = array[i].n_un.n_strx;\n}\n\nstruct nlist\t**ft_copy_nlist(\n\tstruct nlist *array, int nsyms, t_seg_infos *seg_infos)\n{\n\tstruct nlist\t**list;\n\tstruct nlist\t*l;\n\tint\t\t\t\ti;\n\n\tif (!(list = malloc(sizeof(struct nlist *) * nsyms)))\n\t\treturn (NULL);\n\ti = 0;\n\twhile (i < nsyms)\n\t{\n\t\tif (!(l = malloc(sizeof(struct nlist)))\n\t\t|| (void *)((void *)array + (i * sizeof(*l))) + sizeof(*l)\n\t\t> seg_infos->ptr_end)\n\t\t{\n\t\t\twhile (--i >= 0)\n\t\t\t\tfree(list[i]);\n\t\t\tfree(list);\n\t\t\treturn (NULL);\n\t\t}\n\t\tft_set_nlist(l, array, i);\n\t\tlist[i] = l;\n\t\ti++;\n\t}\n\treturn (list);\n}\n" }, { "alpha_fraction": 0.3799639046192169, "alphanum_fraction": 0.39620938897132874, "avg_line_length": 33.625, "blob_id": "89cdb3d95f30baac71c01ec9f0dfc8374650bde1", "content_id": "5ef57854be4ce4cc5539ef4526cf92f24767fd64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2216, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/src/ft_ar_file.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_ar_file.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:19:29 by aollivie #+# #+# */\n/* Updated: 2017/11/29 13:27:17 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tprint_archive_name(char *archive_name, t_cmd_flag *cmd_f)\n{\n\tif (!cmd_f->is_otool)\n\t{\n\t\twrite(1, \"\\n\", 1);\n\t\twrite(1, archive_name, ft_strlen(archive_name));\n\t\twrite(1, \":\\n\", 2);\n\t}\n}\n\nstatic int\tft_loop_ar_file(\n\tstruct ar_hdr *ar, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tint\t\t\t\tnb;\n\tchar\t\t\t*archive_name;\n\tint\t\t\t\tlen;\n\n\tif ((len = ft_atoi(ar->ar_size)) <= 0)\n\t\treturn (EXIT_FAILURE);\n\twhile ((char *)(\n\t\tar = (void *)ar + sizeof(*ar) + len) + sizeof(*ar) < ptr_end)\n\t{\n\t\tif ((len = ft_atoi(ar->ar_size)) <= 0)\n\t\t\treturn (EXIT_FAILURE);\n\t\tnb = ft_atoi(ar->ar_name + ft_strlen(AR_EFMT1));\n\t\tif (check_valid_file((void *)ar + nb + sizeof(*ar), ptr_end))\n\t\t\tcontinue ;\n\t\tif (!(archive_name = ft_format_archive_name(\n\t\t\tav, \"(\", (void *)ar + sizeof(*ar), \")\")))\n\t\t\treturn (EXIT_FAILURE);\n\t\tprint_archive_name(archive_name, cmd_f);\n\t\tft_otool(\n\t\t\t(void *)ar + nb + sizeof(*ar), ptr_end, archive_name, cmd_f);\n\t\tfree(archive_name);\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nint\t\t\tft_ar_file(char *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tstruct ar_hdr\t*ar;\n\n\tif ((void *)\n\t(ar = (void *)ptr + SARMAG) + sizeof(struct ar_hdr) + sizeof(*ar)\n\t> (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (cmd_f->is_otool)\n\t\tft_print_archive_name(\"Archive : \", av);\n\treturn (ft_loop_ar_file(ar, ptr_end, av, cmd_f));\n}\n" }, { "alpha_fraction": 0.35057735443115234, "alphanum_fraction": 0.3718244731426239, "avg_line_length": 31.313432693481445, "blob_id": "f9abd91da32e00e3b024ee38dc0bd51a89fe4e07", "content_id": "71bb10789935f503b28134da4ff29c8425554def", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2165, "license_type": "no_license", "max_line_length": 80, "num_lines": 67, "path": "/indian/src/ft_fat_file.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_fat_file.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:03 by aollivie #+# #+# */\n/* Updated: 2017/11/17 14:48:33 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic int\tft_fat_file_all(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tt_fat_infos f_i;\n\n\tf_i.f_h = (struct fat_header *)ptr;\n\tf_i.nb_arch = swap_uint32(f_i.f_h->nfat_arch);\n\tf_i.f_a = (void *)f_i.f_h + sizeof(*f_i.f_h);\n\tf_i.offset = 0;\n\twhile (f_i.nb_arch)\n\t{\n\t\tf_i.offset = swap_uint32(f_i.f_a->offset);\n\t\t// if (!check_valid_file(ptr + f_i.offset, ptr_end) && f_i.offset >= 0)\n\t\t// {\n\t\t\tif (!cmd_f->is_otool)\n\t\t\t{\n\t\t\t\tft_putstr(av);\n\t\t\t\tft_putstr(\":\\n\");\n\t\t\t}\n\t\t\t// ft_otool(ptr + f_i.offset, ptr_end, av, cmd_f);\n\t\t\thandle(ptr + f_i.offset, ptr_end, cmd_f);\n\t\t// }\n\t\tf_i.f_a = (void *)f_i.f_a + sizeof(*f_i.f_a);\n\t\tf_i.nb_arch--;\n\t}\n\treturn (EXIT_SUCCESS);\n}\n\nint\t\t\tft_fat_file(\n\tchar *ptr, char *ptr_end, char *av, t_cmd_flag *cmd_f)\n{\n\tstruct fat_header\t*f_h;\n\tstruct fat_arch\t\t*f_a;\n\tint\t\t\t\t\tnb_arch;\n\tint\t\t\t\t\toffset;\n\n\tf_h = (struct fat_header *)ptr;\n\tnb_arch = swap_uint32(f_h->nfat_arch);\n\tf_a = (void *)f_h + sizeof(*f_h);\n\toffset = 0;\n\twhile (nb_arch)\n\t{\n\t\tif (swap_uint32(f_a->cputype) == CPU_TYPE_X86_64)\n\t\t{\n\t\t\toffset = swap_uint32(f_a->offset);\n\t\t\tif (offset >= 0)\n\t\t\t\treturn (ft_otool(ptr + offset, ptr_end, av, cmd_f));\n\t\t}\n\t\tf_a = (void *)f_a + sizeof(*f_a);\n\t\tnb_arch--;\n\t}\n\treturn (ft_fat_file_all(ptr, ptr_end, av, cmd_f));\n}\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 29, "blob_id": "2332f3b739761bfce184dfa52ae34496de960400", "content_id": "842e0a3985e0aea6ec18e952a457ee0a03fd6a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 30, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/todo.txt", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "ft_nm 32bit.o ft_nm multifile\n" }, { "alpha_fraction": 0.3710285425186157, "alphanum_fraction": 0.39364567399024963, "avg_line_length": 36.8979606628418, "blob_id": "13cc4a9581e6ae5a3b8ea604f0862df7b62d2f8e", "content_id": "0fea3cac413206f3a5680e3f2b02caa9f3c253c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1857, "license_type": "no_license", "max_line_length": 80, "num_lines": 49, "path": "/src/handle_text.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* handle_text.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:22:59 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:23:00 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstatic void\tprint_name(char *av)\n{\n\tif (av)\n\t{\n\t\twrite(1, av, ft_strlen(av));\n\t\twrite(1, \":\\n\", 2);\n\t}\n}\n\nint\t\t\thandle_text(char *ptr, char *ptr_end, char *av, int is_indian)\n{\n\tstruct mach_header\t\t*header;\n\tstruct section\t\t\t*section;\n\tt_ptr\t\t\t\t\tptr_infos;\n\n\tptr_infos.ptr = ptr;\n\tptr_infos.ptr_end = ptr_end;\n\tptr_infos.is_indian = is_indian;\n\tptr_infos.is_64 = 0;\n\tif ((void *)(header = (struct mach_header *)ptr)\n\t+ sizeof(struct mach_header) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif ((void *)ptr\n\t+ swap_uint32_check(header->sizeofcmds, is_indian) > (void *)ptr_end)\n\t\treturn (EXIT_FAILURE);\n\tif (!(section = ft_find_segment_section(\n\t\t&ptr_infos, is_indian, SEG_TEXT, SECT_TEXT)))\n\t\treturn (EXIT_FAILURE);\n\tprint_name(av);\n\tprint_text_text_section(\n\t\t(void*)ptr + swap_uint32_check(section->offset, is_indian)\n\t\t, swap_uint32_check(section->addr, is_indian),\n\t\tswap_uint32_check(section->size, is_indian), &ptr_infos);\n\treturn (EXIT_SUCCESS);\n}\n" }, { "alpha_fraction": 0.3086419701576233, "alphanum_fraction": 0.3391812741756439, "avg_line_length": 40.5945930480957, "blob_id": "4b997f67c5dd9570c8fb19348a449ab763054815", "content_id": "caa65a9ba2a839ac186f01cc827d10e7fe76ecdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/src/ft_find_section_64.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_section_64.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:11 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:32:27 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstruct section_64\t*ft_find_section_64(\n\tstruct segment_command_64 *segment, char *section_name, t_ptr *ptr_infos)\n{\n\tstruct section_64\t\t*section;\n\tuint32_t\t\t\t\tloop;\n\n\tloop = 0;\n\tif ((void *)(section = (void *)segment + sizeof(*segment))\n\t+ sizeof(struct section_64) > (void *)ptr_infos->ptr_end)\n\t\treturn (NULL);\n\twhile (loop < swap_uint32_check(segment->nsects, ptr_infos->is_indian))\n\t{\n\t\tif (!ft_strcmp(section->sectname, section_name))\n\t\t{\n\t\t\treturn (section);\n\t\t}\n\t\tif ((void *)(section = (void *)section + sizeof(*section))\n\t\t+ sizeof(struct section_64) > (void *)ptr_infos->ptr_end)\n\t\t\treturn (NULL);\n\t\tloop++;\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.36059701442718506, "alphanum_fraction": 0.37850746512413025, "avg_line_length": 45.52777862548828, "blob_id": "7ac30af2638abef4e17ebe0f734b32b6546f6732", "content_id": "912a929925f827b5b8adc28f199b7602e08dd5cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1675, "license_type": "no_license", "max_line_length": 80, "num_lines": 36, "path": "/src/ft_find_segment_section.c", "repo_name": "aoll/nm-otool", "src_encoding": "UTF-8", "text": "/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ft_find_segment_section.c :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: aollivie <[email protected]> +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2017/11/07 16:20:59 by aollivie #+# #+# */\n/* Updated: 2017/11/07 16:33:15 by aollivie ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ft_otool.h\"\n\nstruct section\t*ft_find_segment_section(t_ptr *ptr_infos,\n\tint is_indian, char *segment_name, char *section_name)\n{\n\tstruct mach_header\t\t\t*header;\n\tstruct load_command\t\t\t*lc;\n\tstruct segment_command\t\t*seg;\n\tstruct section\t\t\t\t*section;\n\n\tif ((void *)(header = (struct mach_header *)ptr_infos->ptr)\n\t+ sizeof(struct mach_header) > (void *)ptr_infos->ptr_end)\n\t\treturn (NULL);\n\tif ((void *)(lc = (void *)ptr_infos->ptr + sizeof(*header))\n\t+ sizeof(struct load_command) > (void *)ptr_infos->ptr_end)\n\t\treturn (NULL);\n\tptr_infos->is_indian = is_indian;\n\tif (!(seg = ft_find_segment(lc,\n\t\tswap_uint32_check(header->ncmds, is_indian), segment_name, ptr_infos)))\n\t\treturn (NULL);\n\tif (!(section = ft_find_section(seg, section_name, ptr_infos)))\n\t\treturn (NULL);\n\treturn (section);\n}\n" } ]
51
krishnaramya/student-teacher
https://github.com/krishnaramya/student-teacher
f1b917b37c1ee3c68d935b45d6a4e3a0761c6943
2d445d489f284a210e6ab9af1aaf1ebc950fa532
04cd97781b185ed336253506243a4cad3b0d81ea
refs/heads/master
2021-05-19T00:28:53.264390
2020-03-31T03:57:46
2020-03-31T03:57:46
251,494,844
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6663611531257629, "alphanum_fraction": 0.6677359938621521, "avg_line_length": 38.672725677490234, "blob_id": "0460f6c6c1e0acb73b063b1758503948ab0db5c2", "content_id": "e377eb61afb55a91c8911ee8453803c9ffc4451a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2182, "license_type": "no_license", "max_line_length": 117, "num_lines": 55, "path": "/startService.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "import holder\nimport engine\nimport logging.config\nimport logging as log\nimport config\nimport os\nimport pathlib\nfrom utils.Exceptions import AWSBucketEmpty, AWSIdEmpty, AWSSecretEmpty, StudentFileIsNotFound, ParquetFileIsNotFound\n\n\"\"\"\nThis module is for starting the application. \nAdded logging module to display the logs in proper format.\nAdded custom error handling messages\n\nRaises:\n AWSBucketEmpty: If AWS bucket is empty\n AWSIdEmpty: If AWS Id is empty\n AWSSecretEmpty: IF AWS Secret is empty\n StudentFileIsNotFound: If Student file is not found\n ParquetFileIsNotFound: If parquet file is not found\n\"\"\"\nif __name__==\"__main__\":\n try:\n logging.config.fileConfig('logging.ini', disable_existing_loggers=False)\n aws_bucket = os.getenv(\"AWS_BUCKET\",config.aws_bucket)\n if not aws_bucket:\n raise AWSBucketEmpty\n expiration_mins = int(os.getenv(\"AWS_URL_EXP_MIN\",config.aws_url_exp_min))\n aws_id = os.getenv(\"AWS_ID\",config.aws_id)\n if not aws_id:\n raise AWSIdEmpty\n aws_sec = os.getenv(\"AWS_SECRET\",config.aws_sec)\n if not aws_sec:\n raise AWSSecretEmpty\n file = pathlib.Path(\"data/students.csv\")\n if not file.exists():\n raise StudentFileIsNotFound\n file = pathlib.Path(\"data/teachers.parquet\")\n if not file.exists():\n raise ParquetFileIsNotFound\n holder.setS3(aws_id, aws_sec, aws_bucket,60*expiration_mins)\n engine.runme()\n except AWSBucketEmpty as E:\n log.error(\"----> AWS Bucket Name is Empty - Need to enter AWS_BUCKET name\")\n except AWSIdEmpty as E:\n log.error(\"----> AWS Id is Empty - Need to enter AWS_ID name\")\n except AWSSecretEmpty as E:\n log.error(\"----> AWS Secret is Empty - Need to enter AWS_SECRET name\")\n except StudentFileIsNotFound as E:\n log.error(\"----> Student CSV file is not found - Need to uplaod that file\")\n except ParquetFileIsNotFound as E:\n log.error(\"----> Parquet file is not found - Need to uplaod that file\")\n except Exception as E:\n log.error(\"----> failed to start service with error ---->\")\n log.exception(E)\n" }, { "alpha_fraction": 0.4588235318660736, "alphanum_fraction": 0.6352941393852234, "avg_line_length": 16, "blob_id": "b343879f9701d2140dceb6ff24678c83f8488f1b", "content_id": "0db2d41675c5de968f84af6a998f7be498b77687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/config.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "aws_bucket = \"\"\naws_id = \"\"\naws_sec=\"\"\ns3_timout = 1500000\naws_url_exp_min = 1500000\n" }, { "alpha_fraction": 0.39534884691238403, "alphanum_fraction": 0.6976743936538696, "avg_line_length": 13.333333015441895, "blob_id": "6cfced4f9cc72c9b2735059e53982f0ca82d1dab", "content_id": "45c6b85d9fcfdacadf18a72fd8f32e5f9fb0a3fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 43, "license_type": "no_license", "max_line_length": 14, "num_lines": 3, "path": "/requirements.txt", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "boto3==1.10.18\npandas=1.0.3\npyarrow=0.15.1\n" }, { "alpha_fraction": 0.688022255897522, "alphanum_fraction": 0.7075209021568298, "avg_line_length": 20.75757598876953, "blob_id": "82e16c352715b679eecc56e8df6c62b5c71d4596", "content_id": "98b32b57d7261229e2d9b07ffb14fc028a130818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 718, "license_type": "no_license", "max_line_length": 71, "num_lines": 33, "path": "/holder.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "from connectors.s3Connector import S3_connect\n\ns3_connector = None\n\ndef setS3(aws_id, aws_sec,aws_bucket,expiration_time):\n\t\"\"\"\n\tcreates the aws s3 connector object\n\n\tParameters:\n\t\taws_id (str): AWS Id\n\t\taws_sec (str)\" AWS Secret\n\t\taws_bucket (str): AWS bucket name\n\t\texpiration_time (int): Expired mins\n\t\"\"\"\n\tglobal s3_connector\n\ts3_connector = S3_connect(aws_id, aws_sec, aws_bucket,expiration_time)\n\ndef getS3():\n\t\"\"\"\n\tget the aws s3 connection object\n\n\tReturn:\n\t\tS3 Object: It returns aws s3 connection object\n\t\"\"\"\n\tglobal s3_connector\n\treturn s3_connector\n\ndef filewrite(data):\n\t\"\"\"\n\t\twrite content to file\n\t\"\"\"\n\twith open('student-teachers.json', 'a') as f:\n\t\tf.write(data.to_json(orient='records', lines=True))\n" }, { "alpha_fraction": 0.6173721551895142, "alphanum_fraction": 0.6184716820716858, "avg_line_length": 34.66666793823242, "blob_id": "e12bdc474102c8401b7681f5bd43e61d222bdf35", "content_id": "696c83f8fe737a6078471c8cba371e3faabfc576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 109, "num_lines": 51, "path": "/utils/DataProcessor.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "import holder\nimport pandas as pd\nfrom pyarrow.parquet import ParquetFile\nfrom utils.FileReaders import Reader\n\nclass DataProcessor():\n \"\"\"\n A class is used for processing the students and teachers data frames\n and comparing the class id from students and teachers dataframes and saves individual record into a file.\n stores the file into aws s3\n\n Methods\n -------\n processData()\n iterate the students and sets teacher's current position\n\n lookup(studentsdf, teachersdf)\n iterate the students and teachers by comparing class id.\n copied the resultant data into a file. \n \"\"\"\n def __init__(self, student, par_file):\n \"\"\" \n Parameters:\n student (str): The student chunk data\n parquest_file (obj): parque object\n\n Returns:\n chunk (obj): The generator Object\n \"\"\"\n self.student = student\n self.parquetObj = Reader.parquet_reader(par_file)\n\n def processData(self):\n \"\"\"\n Iterate the students and sets teacher's data to current position\n \"\"\"\n for students in self.student: \n teacherData = Reader.get_parquet_data(self.parquetObj)\n for teachers in teacherData:\n self.lookup(students, teachers)\n\n def lookup(self, studentsdf, teachersdf):\n \"\"\"\n Iterate the students and comparing class id for both student and teacher's dataframe\n and stores in AWS S3 bucket\n \"\"\"\n for studentIndex, studentRow in studentsdf.iterrows():\n cid = studentRow['cid']\n for teacherIndex , teacherRow in teachersdf.iterrows():\n if (cid == teacherRow['tcid']):\n holder.filewrite(pd.concat([studentRow, teacherRow]).to_frame().T)\n" }, { "alpha_fraction": 0.6935725212097168, "alphanum_fraction": 0.6980568170547485, "avg_line_length": 32.45000076293945, "blob_id": "0173a368a4f33890128271a9f2a9c0b602e317b4", "content_id": "700437e564fd87e21b5b6c86cc56f764eee45413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 84, "num_lines": 20, "path": "/engine.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "from utils.DataProcessor import DataProcessor\nfrom utils.FileReaders import Reader\nimport os\nimport holder\n\ndef runme():\n \"\"\"\n set the students and teachers file and calls the readers for loadings each file.\n \"\"\"\n student_file = os.getcwd() + '/data/students.csv'\n reader_obj = Reader()\n student_data = reader_obj.file_reader(student_file)\n\n par_file = os.getcwd() + '/data/teachers.parquet'\n data_process_obj = DataProcessor(student_data, par_file)\n data_process_obj.processData()\n json_file = os.getcwd() + 'student-teachers.json'\n\n s3_connect = holder.getS3()\n s3_connect.upload_to_aws(json_file, 'data', 'student-teachers.json')\n" }, { "alpha_fraction": 0.7089552283287048, "alphanum_fraction": 0.7313432693481445, "avg_line_length": 21.33333396911621, "blob_id": "f4ba86ee8585749ed95c62b91d0ee169d9e6a23b", "content_id": "bec421f210bb92aff49c6e0c94a1cb0ccd6488e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 268, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/Dockerfile", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "FROM python:3.6-stretch\nRUN mkdir /app\nEXPOSE 8080\nADD . /app\nWORKDIR /app\nENV AWS_BUCKET = \nENV AWS_ID =\nENV AWS_SECRET =\nRUN pip install -r requirements.txt\n# expose the port for the API\n# run and start anomaly detection service.\nCMD [ \"python\", \"startService.py\" ]\n" }, { "alpha_fraction": 0.7117516398429871, "alphanum_fraction": 0.7117516398429871, "avg_line_length": 22.789474487304688, "blob_id": "edfca23e93ae495ba6bd0922612b8e5cc524600e", "content_id": "d0ae36bd39b27357883f59088ebd2c4f455ee2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 50, "num_lines": 19, "path": "/utils/Exceptions.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "class AWSBucketEmpty(Exception):\n \"\"\"Raised when the AWS Bucket is empty\"\"\"\n pass\n\nclass AWSIdEmpty(Exception):\n \"\"\"Raised when the AWS Id is empty\"\"\"\n pass\n\nclass AWSSecretEmpty(Exception):\n \"\"\"Raised when the AWS Secret is empty\"\"\"\n pass\n\nclass StudentFileIsNotFound(Exception):\n \"\"\"Raised when the Student File is not found\"\"\"\n pass\n\nclass ParquetFileIsNotFound(Exception):\n \"\"\"Raised when the Parquet File is not found\"\"\"\n pass" }, { "alpha_fraction": 0.5917030572891235, "alphanum_fraction": 0.6062591075897217, "avg_line_length": 30.227272033691406, "blob_id": "cd48e09b46a6f9885ccb8086787662e1033b93e0", "content_id": "3ca6696221c4f14e7d2606a6943000003b0f543d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1374, "license_type": "no_license", "max_line_length": 103, "num_lines": 44, "path": "/connectors/s3Connector.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "import logging as log\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom botocore.exceptions import NoCredentialsError\n\nclass S3_connect:\n \"\"\"\n Creates AWS S3 connection and upload the file into AWS s3 bucket\n\n Methods\n -------\n __create_s3_session - It creates s3 session\n upload_to_aws - Upload file into s3\n \"\"\"\n def __init__(self, aws_id, aws_sec, aws_bucket, expiration_time):\n self.aws_id = aws_id\n self.aws_sec = aws_sec\n self.bucket = aws_bucket\n self.S3 = None\n self.s3client = None\n self.__create_s3_session()\n self.expiration = expiration_time\n\n def __create_s3_session(self):\n \"\"\"\n Creates s3 session and s3 client\n \"\"\"\n self.session = boto3.Session(aws_access_key_id=self.aws_id, aws_secret_access_key=self.aws_sec)\n self.s3client = self.session.client(\"s3\")\n\n def upload_to_aws(self, local_file, bucket, s3_file):\n \"\"\"\n Upload the file into aws s3 bucket\n \"\"\"\n try:\n self.s3_client.upload_file(local_file, bucket, s3_file)\n print(\"Upload Successful\")\n return True\n except FileNotFoundError:\n print(\"The file was not found\")\n return False\n except NoCredentialsError:\n print(\"Credentials not available\")\n return False\n" }, { "alpha_fraction": 0.7230769395828247, "alphanum_fraction": 0.7230769395828247, "avg_line_length": 31.5, "blob_id": "26657d2871c51eb6c9f33929d5c2ee7eb8963800", "content_id": "cd9940fe53aa3cddf4a01be565c21ea5434c5148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 130, "license_type": "no_license", "max_line_length": 60, "num_lines": 4, "path": "/README.md", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "## System Requirements\n\n- [Docker](https://docs.docker.com/install/)\n- [Docker Compose](https://docs.docker.com/compose/install/)\n" }, { "alpha_fraction": 0.5932286381721497, "alphanum_fraction": 0.5947006940841675, "avg_line_length": 27.704225540161133, "blob_id": "6725a2a9ac0ca33297687a2253d25daa134f729e", "content_id": "03830e86c30a54d191be1d156f0ea6b3118acb7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2038, "license_type": "no_license", "max_line_length": 143, "num_lines": 71, "path": "/utils/FileReaders.py", "repo_name": "krishnaramya/student-teacher", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom pyarrow.parquet import ParquetFile\nimport os\n\nclass Reader():\n \"\"\"\n A class is used for Reading the files and convert the files into DataFrames using generators\n\n Attributes\n ----------\n chunksize : int\n Used to return the data in chunks.\n\n Methods\n -------\n file_reader(filename)\n Read a csv file and convert to pandas data frame with chunks\n\n parquet_reader(filename)\n Read a parque file and convert to ParquetFile Object\n\n get_parquet_date(parquetData)\n Convert Parque object into data frame\n \"\"\"\n\n chunksize = 10\n \n def file_reader(self, filename):\n \"\"\"\n To load the data from csv file and convert into dataframe as chunks and return generator object value.\n \n Parameters:\n filename (str): The student file name\n\n Returns:\n chunk (obj): The generator Object\n \"\"\"\n\n for chunk in pd.read_csv(filename, chunksize = Reader.chunksize,delimiter='_'): \n yield chunk\n\n def parquet_reader(filename):\n \"\"\"\n Reader interface for a single Parquet file\n\n Parameters:\n filename (str): The teacher parquet file name\n\n Returns:\n parque (obj): ParquetFile object\n \"\"\"\n\n return ParquetFile(source = filename)\n\n def get_parquet_data(parquet_data):\n \"\"\"\n Convert parquet file object into dataframe and returns the generator object value\n\n Parameters:\n parquet_data (obj): ParquetFile object\n\n Returns:\n res (obj): The generator Object\n \"\"\"\n rows = parquet_data.num_row_groups\n cols = ['fname', 'lname', 'email', 'cid']\n for row in range(0, rows):\n row_df = parquet_data.read_row_group(row) \n res = row_df.to_pandas()[cols];\n res.rename(columns = {'fname': 'teacherfirstname', 'lname': 'teacherlastname', 'email':'teacheremail', 'cid':'tcid'}, inplace=True)\n yield res\n" } ]
11
efren-cabrera/geoarea
https://github.com/efren-cabrera/geoarea
87b71f462592566258e4a018833c9e26cd394aab
b96776e1a900a927f319cf3cd6cc59b4a184560a
88b24ec28b2d7d2f3ebc16f96aa3015b68c833fe
refs/heads/master
2020-06-23T18:54:37.503956
2019-07-25T20:51:21
2019-07-25T20:51:21
198,723,107
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.614973247051239, "alphanum_fraction": 0.6844919919967651, "avg_line_length": 27.33333396911621, "blob_id": "571166182b5d72ff34c1226503bbe211e7eeea9d", "content_id": "f4592b71399a09dd9988ea3f0699f7b099bd95e3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "permissive", "max_line_length": 91, "num_lines": 33, "path": "/test.py", "repo_name": "efren-cabrera/geoarea", "src_encoding": "UTF-8", "text": "import json\nimport unittest\n\nfrom geoarea import area\n\n\nf = open('illinois.json')\nillinois = json.loads(f.read())\n\nlatitude_world = [-90, 90, 90, -90, -90]\nlongitude_world = [-180, -180, 180, 180, -180]\n\nILLINOIS_AREA = 145978332359.36746\nworld_area = 511207893395811.06\n\n\nclass AreaTestCase(unittest.TestCase):\n\n def test_world_area(self):\n self.assertAlmostEqual(area(latitude_world, longitude_world), world_area, places=0)\n\n def test_illinois_area(self): \n illinois_area = area(*self.get_lat_lon(illinois[\"coordinates\"][0][0]))\n self.assertAlmostEqual(illinois_area, ILLINOIS_AREA, places=3)\n\n def get_lat_lon(self, coordinate_pairs_list):\n latitude = [coordinate_pair[1] for coordinate_pair in coordinate_pairs_list]\n longitude = [coordinate_pair[0] for coordinate_pair in coordinate_pairs_list]\n return latitude, longitude\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5950704216957092, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 17.521739959716797, "blob_id": "bda9ccd482914742d85b5ce409481478bf180a99", "content_id": "6caa22d1e1fce47a2613b8a0a455adc21f43ae6e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 852, "license_type": "permissive", "max_line_length": 113, "num_lines": 46, "path": "/README.md", "repo_name": "efren-cabrera/geoarea", "src_encoding": "UTF-8", "text": "# geoarea\n\n[![travis](https://travis-ci.org/efren-cabrera/area.svg?branch=master)](https://travis-ci.org/efren-cabrera/area)\n\n\nThis is a fork from [geojson-area (python)](https://github.com/scisco/area). \nCalculate the area from lists of latitude and longitude coordinates.\n\nInstallation\n------------\n\n```\n $ pip install geoarea\n```\n\nUsage\n-----\n\nSimply pass a list of latitude and longitude\n\n```\n>>> from geoarea import area\n>>> latitude_world = [-90, 90, 90, -90, -90]\n>>> longitude_world = [-180, -180, 180, 180, -180] \n>>> area(latitude_world, longitude_world)\n511207893395811.06\n```\n\nTest\n----\n\n```\n $ python test.py\n```\n\nCredit\n------\n\n- [geojson-area (python)](https://github.com/scisco/area)\n- [geojson-area](https://github.com/mapbox/geojson-area)\n\n\nReferences\n----------\n\n- https://trs.jpl.nasa.gov/bitstream/handle/2014/41271/07-0286.pdf\n" }, { "alpha_fraction": 0.585605263710022, "alphanum_fraction": 0.5932388305664062, "avg_line_length": 31.75, "blob_id": "6d7107f33f660a2746810fbc902b465dcab897a8", "content_id": "b5f1540956296813a72a614a86347a1e45de2607", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 917, "license_type": "permissive", "max_line_length": 80, "num_lines": 28, "path": "/setup.py", "repo_name": "efren-cabrera/geoarea", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nwith open('README.md', encoding='utf-8') as f:\n readme = f.read()\n\n\nsetup(name='geoarea',\n version=\"V0.1.3\",\n description=\"Calculate the area from latitude longitude coordinates list\",\n long_description_content_type='text/markdown',\n long_description=readme, \n author=\"Efren Cabrera\",\n author_email=\"[email protected]\",\n url='https://github.com/efren-cabrera/geoarea',\n license='BSD-2-Clause',\n packages=['geoarea'],\n include_package_data=True,\n zip_safe=False,\n test_suite=\"test\",\n classifiers=[\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: GIS',\n 'License :: OSI Approved :: BSD License', \n 'Programming Language :: Python :: 3.7',\n ]\n)\n" }, { "alpha_fraction": 0.5780206322669983, "alphanum_fraction": 0.6144505143165588, "avg_line_length": 32.61224365234375, "blob_id": "be0e5c620b709eeb207c56b9f8d3509abd7ea7fb", "content_id": "a06ea12a80755e4c21d64de0abdcdfea07325cee", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1647, "license_type": "permissive", "max_line_length": 108, "num_lines": 49, "path": "/geoarea/__init__.py", "repo_name": "efren-cabrera/geoarea", "src_encoding": "UTF-8", "text": "from math import pi, sin, radians\n\nWGS84_RADIUS = 6_378_137\n\n\ndef _ring_area(coordinates) -> float:\n \"\"\"\n Calculate the approximate _area of the polygon were it projected onto\n the earth. Note that this _area will be positive if ring is oriented\n clockwise, otherwise it will be negative.\n\n Reference:\n Robert. G. Chamberlain and William H. Duquette, \"Some Algorithms for\n Polygons on a Sphere\", JPL Publication 07-03, Jet Propulsion\n Laboratory, Pasadena, CA, June 2007 https://trs.jpl.nasa.gov/bitstream/handle/2014/41271/07-0286.pdf\n\n @Returns\n\n {float} The approximate signed geodesic _area of the polygon in square meters.\n \"\"\"\n coordinates_length = len(coordinates)\n if coordinates_length <= 2:\n return 0\n area = 0\n for i in range(0, coordinates_length):\n if i == (coordinates_length - 2):\n lower_index = coordinates_length - 2\n middle_index = coordinates_length - 1\n upper_index = 0\n elif i == (coordinates_length - 1):\n lower_index = coordinates_length - 1\n middle_index = 0\n upper_index = 1\n else:\n lower_index = i\n middle_index = i + 1\n upper_index = i + 2\n\n p1 = coordinates[lower_index]\n p2 = coordinates[middle_index]\n p3 = coordinates[upper_index]\n\n area += (radians(p3[0]) - radians(p1[0])) * sin(radians(p2[1]))\n area = area * WGS84_RADIUS * WGS84_RADIUS / 2\n return area\n\ndef area(latitude, longitude) -> float:\n coordinates = [coodinates_pair for coodinates_pair in zip(longitude, latitude)]\n return _ring_area(coordinates)\n" } ]
4
zkhadikov/jockerizer
https://github.com/zkhadikov/jockerizer
e8c798ab9f77637faff0def13a5dd921f42d77cd
2a53ec8a2dc4e2f030e58e9f18cb0ed41d3a3a3c
8a88b1f82bc34fd25a3d3143b3efcff3e3b05c64
refs/heads/master
2018-05-30T19:38:49.052892
2016-04-05T09:46:26
2016-04-05T09:46:26
51,938,400
0
0
null
2016-02-17T16:45:43
2016-02-12T17:17:05
2016-02-15T15:24:42
null
[ { "alpha_fraction": 0.7152682542800903, "alphanum_fraction": 0.716643750667572, "avg_line_length": 19.77142906188965, "blob_id": "b18006438626e510146a6ffee2b957f010a6fab4", "content_id": "b8563c384ed6125de3bd02e0134a055696b57857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 727, "license_type": "no_license", "max_line_length": 39, "num_lines": 35, "path": "/add/6.0_SR2/docker.sh", "repo_name": "zkhadikov/jockerizer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nBASEDIR=$(dirname $0)\n\nfunction stop { \n\techo \"stopping jedox\"\n\tbash /etc/init.d/jedox_olap stop\n\tbash /etc/init.d/jedox_core stop\n\tbash /tomcat/jedox_tomcat.sh stop\n\tbash /etc/init.d/httpd stop\n\texit \n } \n\ntrap stop SIGTERM SIGHUP SIGINT SIGQUIT\n \necho \"starting jedox\"\nulimit -S unlimited -c unlimited\necho \"starting olap\"\nbash /etc/init.d/jedox_olap start\necho \"starting core\"\nbash /etc/init.d/jedox_core start\necho \"starting tomcat\"\nbash /tomcat/jedox_tomcat.sh start\necho \"starting apache\"\nbash /etc/init.d/httpd start\n\nif [ ! -f /log/olap_server.log ]; then\n\t\ttouch /log/olap_server.log\nfi\n\nif [ ! -f /log/olap_server.log ]; then\n\t\ttouch /log/olap_server.log\nfi\n\ntail -f /log/*.log /log/tomcat/*.log\n" }, { "alpha_fraction": 0.5993283987045288, "alphanum_fraction": 0.606984555721283, "avg_line_length": 45.53125, "blob_id": "fad518a9d0b250b5f5c749e19cdd7f8adb55a021", "content_id": "4783eed5c25e9d8d69e2096ed537f4fd07db4b12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7445, "license_type": "no_license", "max_line_length": 158, "num_lines": 160, "path": "/jedox_auto_installer.py", "repo_name": "zkhadikov/jockerizer", "src_encoding": "UTF-8", "text": "import logging\nimport argparse\nimport wget\nimport sys\nimport tarfile\nimport os\nimport pexpect\nimport subprocess\nimport shutil\n\nclass default_logger:\n def __init__(self,name,**kwargs):\n logger=logging.getLogger(name)\n level=kwargs.get(\"level\",\"DEBUG\")\n logger.setLevel(getattr(logging,level))\n formater=logging.Formatter(\"%(asctime)15s %(name)5s %(levelname)8s %(lineno)3d %(funcName)15s %(message)s\")\n sh=logging.StreamHandler()\n sh.setFormatter(formater)\n logger.addHandler(sh)\n self.logger=logger\n\n\n\nclass jedox_installer(default_logger):\n def __init__(self,args):\n default_logger.__init__(self,\"jedox_installer\")\n self.args=args\n uncompress=False\n\n self.download_directory=self.args[\"installer_directory\"]\n self.installer_directory=self.args[\"installer_directory\"] + \"/\" + self.args[\"jedox_version\"] + \"/\"\n\n if args[\"installer_download\"]!=False:\n self.logger.info(\"Installer : download from url\")\n self.installer=self.download()\n uncompress=True\n elif args[\"installer_file\"]!=False:\n self.logger.info(\"Installer : taking the file specified :%s\" % args[\"installer_file\"])\n self.installer=args[\"installer_file\"]\n uncompress=True\n\n if uncompress==True :\n self.uncompress()\n\n if not os.path.isdir(self.installer_directory):\n self.logger.critical(\"Installer directory not found --> ABORTING %s\" % self.installer_directory)\n sys.exit(3)\n\n self.sign_eula()\n self.install()\n\n def remove_old_install(self):\n install_dir=\"/opt/jedox/ps\"\n if os.path.isdir(install_dir):\n self.logger.warn(\"older installation detected : stopping and deleting...\")\n try:\n self.stop()\n except Exception:\n self.logger.info(\"Could not stop jedox, probably not running\")\n shutil.rmtree(install_dir)\n\n\n def download(self):\n\n url=self.args[\"installer_download\"]\n self.logger.info(\"start downloading installer at : %s\" % url)\n\n if not os.path.isdir(self.download_directory):\n os.mkdir(self.download_directory)\n installer_file = wget.download(url,self.download_directory)\n self.logger.info(\"saved as : %s\" % installer_file)\n return installer_file\n\n def uncompress(self):\n \"\"\"\n # delete directory content if not empty\n if os.path.isdir(self.installer_directory):\n \"\"\"\n if not os.path.isdir(self.installer_directory):\n os.mkdir(self.installer_directory)\n self.logger.info(\"uncompress file %s to %s \" % (self.installer,self.installer_directory))\n os.chdir(self.installer_directory)\n tar=tarfile.open(self.installer)\n tar.extractall()\n tar.close()\n\n def install(self):\n self.remove_old_install()\n os.chdir(self.installer_directory)\n install_script=os.path.join(self.installer_directory,\"install.sh\")\n self.logger.info(\"starting install script : %s\" % install_script)\n if not os.path.isfile(install_script):\n self.logger.critical(\"installer not found, aborting\")\n sys.exit(1)\n\n else :\n answers=[\n {\"expect\":\"Default.*\", \"answer\":\"\", \"description\":\"jedox_home [/opt/jedox/ps]\"},\n {\"expect\":\"The directory.*\", \"answer\":\"y\", \"description\":\"create jedox_home directory [Y]\"},\n {\"expect\":\"Default.*\", \"answer\":\"jedoxweb\", \"description\":\"user for jedox-suite [jedoxweb]\"},\n {\"expect\":\"Default.*\", \"answer\":\"jedoxweb\", \"description\":\"group for jedox-suite [jedoxweb]\"},\n {\"expect\":\"What is this servers name ?.*\", \"answer\":\"jedox_ps\", \"description\":\"servers name\", \"timeout\":60 },\n {\"expect\":\"What is this servers IP-Address ? .*\", \"answer\":\"\", \"description\":\"servers IP-Address\"},\n {\"expect\":\"Who should get administrative e-mails regarding this server ?.*\", \"answer\":\"[email protected]\", \"description\":\"admin email\"},\n {\"expect\":\"Which IP-address should the OLAP server run on.*\", \"answer\":\"all\", \"description\":\"olap ip\"},\n {\"expect\":\"Which port should the OLAP server run on ?.*\", \"answer\":\"7777\", \"description\":\"olap port\"},\n {\"expect\":\"Which IP-address should the HTTP server listen on.*\", \"answer\": \"all\", \"description\":\"http ip\"},\n {\"expect\":\"Which port should the HTTP server run on ?.*\", \"answer\":\"80\", \"description\":\"http port\"},\n {\"expect\":\"Which IP-address should the Spreadsheet server run on.*\", \"answer\":\"127.0.0.1\", \"description\":\"spreadsheet server ip\"},\n {\"expect\":\"Which port should the Spreadsheet server run on ?.*\", \"answer\":\"8193\", \"description\":\"spreadsheet server port\"},\n {\"expect\":\"Which AJP-address should the Tomcat server run on ?.*\", \"answer\":\"127.0.0.1\", \"description\":\"tomcat ajp ip\"},\n {\"expect\":\"Which AJP port should the Tomcat server run on ?.*\", \"answer\":\"8010\", \"description\":\"tomcat ajp port\"},\n {\"expect\":\"Which HTTP-address should the Tomcat server run on ?.*\", \"answer\":\"127.0.0.1\", \"description\":\"tomcat http ip\"},\n {\"expect\":\"Which HTTP port should the Tomcat server run on ?.*\", \"answer\":\"7775\", \"description\":\"tomcat http port\"},\n ]\n\n\n child = pexpect.spawn(install_script,env=os.environ)\n fout_file=\"installer_fout.log\"\n fout = open(fout_file, \"ab\")\n child.logfile = fout\n i=0\n for a in answers:\n self.logger.info(\"awaiting question[%d]:%s expect=%s answer=%s timeout=%d\" % (i,a[\"description\"],a[\"expect\"],a[\"answer\"],a.get(\"timeout\",-1)))\n child.expect (a[\"expect\"],a.get(\"timeout\",-1))\n child.sendline (a[\"answer\"])\n i+=1\n child.expect(pexpect.EOF)\n fout.close()\n self.logger.info(open(fout_file).read())\n self.logger.info(\"install finished output below\")\n\n def start(self):\n subprocess.check_call([\"/opt/jedox/ps/jedox-suite.sh\",\"start\"], shell=False)\n #Starting httpd: [ OK ]\n\n def stop(self):\n subprocess.check_call([\"/opt/jedox/ps/jedox-suite.sh\",\"stop\"], shell=False)\n #Unmounting /opt/jedox/ps/sys...done.\n\n def sign_eula(self):\n\n eula_file=os.path.join(self.installer_directory,self.args[\"eula\"])\n if not os.path.isfile(eula_file):\n self.logger.info(\"EULA License was not sign, creating file %s\" % eula_file)\n with open(eula_file, 'a'):\n os.utime(eula_file, None)\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Build a jedox image for docker')\n parser.add_argument('--installer-download', help='download the installer rather than using a local one',default=False)\n parser.add_argument('--installer-directory',type=str, help='where the install files are or will be uncompressed',default=\"/opt/jedox_installation\")\n parser.add_argument('--installer-file',type=str, help='where the installer tar file is stored',default=False)\n parser.add_argument('--jedox-version', help='Jedox version to be installed ex: 6.0_SR1',default=\"6.0_SR2\")\n args = vars(parser.parse_args())\n\n installer=jedox_installer(args)\n" }, { "alpha_fraction": 0.6457038521766663, "alphanum_fraction": 0.6500914096832275, "avg_line_length": 44.58333206176758, "blob_id": "80d739253660d390077e9d1297d70bfd9d2e7f50", "content_id": "2b4867930cbd1f96e91a8b1d35fd1793db411a06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5470, "license_type": "no_license", "max_line_length": 265, "num_lines": 120, "path": "/jedox_auto_dockerize.py", "repo_name": "zkhadikov/jockerizer", "src_encoding": "UTF-8", "text": "from jedox_auto_installer import *\nimport subprocess\nfrom docker import Client\nimport json\nimport os\nfrom time import sleep\nfrom string import Template\n\nclass dockerizer(default_logger):\n def __init__(self,args):\n default_logger.__init__(self,\"dockerizer\")\n self.args=args\n self.config=self.get_config()\n self.args[\"eula\"]=self.config[\"installer\"][\"eula\"]\n self.directory=(os.path.dirname(os.path.realpath(__file__)))\n\n self.base_image_name=args[\"base_image\"]\n self.docker=Client(base_url='unix://var/run/docker.sock')\n\n\n self.installer=jedox_installer(args)\n self.installer.start()\n sleep(15)\n self.installer.stop()\n sleep(15)\n\n self.patch()\n self.add()\n self.build_base_image(self.base_image_name)\n self.base_container=self.docker.create_container(self.base_image_name)\n self.docker.start(self.base_container)\n self.docker_exec(self.base_container,self.config[\"docker\"][\"exec\"])\n self.commit(self.args[\"docker_repository\"],self.args[\"docker_tag\"])\n #remove intermediate container\n self.logger.info(\"removing base container\")\n self.docker.remove_container(container=self.base_container,force=True)\n\n def get_config(self):\n try :\n config_file=self.args[\"config\"]\n version=self.args[\"jedox_version\"]\n\n j=json.load(open(config_file))\n return j[version]\n\n except KeyError as e:\n self.logger.exception(e)\n self.logger.error(\"Could not find the right config for version=%s in file=%s \\n Aborting...\" % (version,config_file))\n sys.exit(1)\n\n def patch(self):\n self.logger.info(\"patching files from installer\")\n self.change_working_directory(\"patch\")\n\n for p in self.config[\"patch\"]:\n target=os.path.join(self.args[\"jedox_home\"],p[\"target\"])\n description=p.get(\"description\",p[\"target\"])\n\n self.logger.info(\"patching : %s\" % description)\n subprocess.check_call(\"patch %s < %s\" % (target,p[\"source\"]),shell=True)\n\n def add(self):\n self.logger.info(\"adding additional content to installation\")\n self.change_working_directory(\"add\")\n\n for a in self.config[\"add\"]:\n target=os.path.join(self.args[\"jedox_home\"],a[\"target\"])\n self.logger.info(\"copy %s to %s\" % (a[\"source\"],target))\n shutil.copy(a[\"source\"],target)\n\n def change_working_directory(self,area):\n working_directory=os.path.join(self.directory,area,self.args[\"jedox_version\"])\n self.logger.info(\"working dir is now %s\" % working_directory)\n os.chdir(working_directory)\n\n def build_base_image(self,image_name=\"jedox/base\"):\n os.chdir(self.args[\"jedox_home\"])\n self.logger.info(\"Import Jedox Suite into intermediate docker image '%s'\" % image_name)\n subprocess.check_call(\"\"\"tar --to-stdout --numeric-owner --exclude=/proc --exclude=/sys --exclude='*.tar.gz' --exclude='*.log' -c ./ | docker import --change \"CMD while true; do ping 8.8.8.8; done\" --change \"ENV TERM=xterm\" - %s\"\"\" % image_name, shell=True)\n self.logger.info(\"successfully create basecontainer %s\" % image_name)\n\n\n def docker_exec(self,myContainer,exec_list):\n\n self.docker.timeout=300\n for e in exec_list:\n if \"description\" in e : #print description in logs if available\n self.logger.info(e[\"description\"])\n exec_c=self.docker.exec_create(myContainer,e[\"cmd\"],stdout=True,stderr=True)\n output=self.docker.exec_start(exec_c)\n self.logger.debug(self.docker.exec_inspect(exec_c))\n self.logger.info(output)\n\n self.logger.debug(\"all exec done\")\n\n def commit(self,repository,tag):\n tag=Template(self.args[\"docker_tag\"]).safe_substitute(jedox_version=self.args[\"jedox_version\"])\n self.logger.info(\"commiting finale image %s to %s : %s\" % (self.base_container,repository,tag))\n\n config={\"CMD\":\"/entrypoint\",\n \"EXPOSE\": \"[80,7777]\",\n }\n self.docker.commit(self.base_container,repository,tag,conf=config)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Build a jedox image for docker')\n parser.add_argument('--installer-download', help='download the installer rather than using a local one',default=False)\n parser.add_argument('--installer-directory',type=str, help='where the install files are or will be uncompressed',default=\"/opt/jedox_installation\")\n parser.add_argument('--installer-file',type=str, help='where the installer tar file is stored',default=False)\n parser.add_argument('--jedox-home',type=str, help='directory where jedox is installed default=/opt/jedox/ps',default=\"/opt/jedox/ps\")\n parser.add_argument('--jedox-version', help='Jedox version to be installed ex: 6.0_SR1',default=\"6.0_SR2\")\n parser.add_argument('--base-image', help='name of the docker base image to be created default=jedox/base',default=\"jedox/base\")\n parser.add_argument('--config', help='json config file',default=\"./config.json\")\n parser.add_argument('--docker-repository', help='docker repository where the image will be stored',default=\"jedox/base\")\n parser.add_argument('--docker-tag', help='tag used for storing final docker image',default=\"$jedox_version\")\n args = vars(parser.parse_args())\n\n installer=dockerizer(args)\n" }, { "alpha_fraction": 0.7516985535621643, "alphanum_fraction": 0.7566398978233337, "avg_line_length": 42.75675582885742, "blob_id": "d0c784306592c47faaabc45bb6ecf3f9f91570f1", "content_id": "bce4d6c34975d53cb47e91901447e4c65af29b51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1619, "license_type": "no_license", "max_line_length": 126, "num_lines": 37, "path": "/readme.md", "repo_name": "zkhadikov/jockerizer", "src_encoding": "UTF-8", "text": "#Create automatically docker images for jedox\n\nAim ist being able to create automatically jedox images. You can specify the download url from jedox,\nor provide the directory where the install tar has been extracted. The program then automatically install jedox, \nand create the corresponding docker image.\n\nAll was done based on the [work from Zurab Khadikov](https://github.com/zkhadikov/dockerize_jedox/)\n\n\n##Usage\npython jedox_auto_dockerize [options]\n\nYou have 3 different possibility how you can get the installer :\n - --installer-download url the installer will be downloaded from\n - --installer-directory if the installer is already uncompressed somewhere on the disk\n - --installer-file in case you have already the installer.tag on the disk, but still compressed\n \nOptions :\n\n--installer-download', help='download the installer rather than using a local one',default=False)\n\n--installer-directory',type=str, help='where the install files are or will be uncompressed',default=\"/opt/jedox_installation\")\n\n--jedox-home',type=str, help='directory where jedox is installed default=/opt/jedox/ps',default=\"/opt/jedox/ps\")\n\n--jedox-version', help='Jedox version to be installed ex: 6.0_SR2',default=\"6.0_SR2\")\n\n--base-image', help='name of the docker base image to be created default=jedox/base',default=\"jedox/base\")\n\n--config', help='json config file',default=\"./config.json\")\n\n--docker-repository', help='docker repository where the image will be stored',default=\"leanbi/jedox6\")\n\n--docker-tag', help='tag used for storing final docker image',default=\"$jedox_version\")\n\n\n[LeanBI](http://leanbi.ch/big-data/)\n" } ]
4
kk6axq/VCC_Bot
https://github.com/kk6axq/VCC_Bot
eefa0b59b515780119a578f0c8eafc35e4ab088f
ca5d1bc7e91c690a7b89647ae877b7bd87dee240
3d03099c0d12de29f8ffcdfc0039346b11ab9554
refs/heads/master
2021-01-17T12:57:38.601790
2016-06-29T00:52:47
2016-06-29T00:52:47
59,429,067
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.739234447479248, "alphanum_fraction": 0.7464115023612976, "avg_line_length": 15.038461685180664, "blob_id": "ed1c7be84d6bc46cc3405abe22e636e8519a4307", "content_id": "b7fd6d84c143b71ebddc230181d4c9faf8bbede1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 418, "license_type": "no_license", "max_line_length": 90, "num_lines": 26, "path": "/vccbot_testing/notes.txt", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "TODO \nOdometry track - done\nSpin - done\nmove dist - done\n\nMigrate vccbot_button and vccbot_display to global state srv. button - done display - done\nFinish package.xml files\nvsp - done\nvm - done\nvb - done\nvd - done\nvl - done\nvmsgs - done\n\nTest catkin_make - done\n\n\n\nLicense is GPLv3\n\nOn vccbot_button/CMakeLists.txt line 51 uncomment AdafruitBBIO when compiling on BBB\n\n\n\nNotes:\nCamera image is on /usb_cam/image_raw\n\n" }, { "alpha_fraction": 0.7328385710716248, "alphanum_fraction": 0.7411873936653137, "avg_line_length": 21, "blob_id": "f2a3b6313291f8a3bad30a1586367177ab2cdad8", "content_id": "45677942a9653f70bc41c1964e9bb9f7672e300f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "no_license", "max_line_length": 88, "num_lines": 49, "path": "/vccbot_state_publisher/src/vccbot_state_publisher.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom std_srvs.srv import Empty\nfrom vccbot_msgs.msg import GlobalState\nWAITING=0\nMOVING=1\nSPIN=2\nDISPLAY=3\nUNKNOWN=4\ncurrentState = UNKNOWN\n\ndef service_move(msg):\n\tglobal MOVING\n\tglobal currentState\n\tcurrentState = MOVING\n\treturn []\ndef service_waiting(msg):\n\tglobal WAITING\n\tglobal currentState\n\tcurrentState = WAITING\n\treturn []\n\ndef service_spin(msg):\n\tglobal SPIN\n\tglobal currentState\n\tcurrentState = SPIN\n\treturn []\n\ndef service_display(msg):\n\tglobal DISPLAY\n\tglobal currentState\n\tcurrentState = DISPLAY\n\treturn []\n\t\ndef main():\n\tglobal_state_publisher = rospy.Publisher('/global_state', GlobalState, queue_size = 10)\n\trospy.init_node('vccbot_state_publisher')\n\tmove = rospy.Service('move', Empty, service_move)\n\twaiting = rospy.Service('waiting', Empty, service_waiting)\n\tspin = rospy.Service('spin', Empty, service_spin)\n\tdisplay = rospy.Service('display', Empty, service_display)\n\trate = rospy.Rate(30)\n\twhile not rospy.is_shutdown():\n\t\tglobal_state_publisher.publish(currentState)\n\t\trate.sleep()\n\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.7421088218688965, "alphanum_fraction": 0.7499440312385559, "avg_line_length": 28.57615852355957, "blob_id": "02327116aef8e0ae43852a396fdbc910886a754b", "content_id": "b4610db2f1a061363dbecd131487d726e64cffea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4467, "license_type": "no_license", "max_line_length": 141, "num_lines": 151, "path": "/vccbot_move/src/vccbot_move.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom vccbot_msgs.msg import GlobalState\nfrom nav_msgs.msg import Odometry\nfrom std_srvs.srv import Empty\nimport time\nimport os\nos.system(\"echo \\\"\\\" > /home/ubuntu/vccbot_move_log.txt\")\n#Variables that need to be changed by the user:\nmoveSpeed = 0.05# Speed to drive at. Units in m/s.\nrotationSpeed = 0.5#Speed of rotation in rad/s\n\n\n#global variables\nWAITING=0\nMOVING=1\nSPIN=2\nDISPLAY=3\nUNKNOWN=4\ncurrentGlobalState=UNKNOWN\ntime_at_beginning_spin = 0\nfirstSpin = True\nmaxDist = 0.5#0.5 meters\nf = open(\"/home/ubuntu/vccbot_move_log.txt\", \"r+\")\ndef logMessage(message):\n\tf.write(message)\n\tprint \"Vccbot_move: MESSAGE: \" + message\n\n\n#Variables and function for the odometry/distance measurement system\nrotNotDone = True\ninitialOdometryDist = 0\ncurrentOdomDist = 0\nodomFirst = True\nodomOffset = 0\ndef odometryCallback(data):\n\tglobal currentGlobalState\n\tglobal currentOdomDist\n\tglobal WAITING\n\tglobal SPIN\n\tglobal DISPLAY\n\tglobal MOVING\n\tglobal UNKNOWN\n\tglobal maxDist\n\tglobal odomFirst\n\tglobal mode_change_spin\n\tglobal odomOffset\n\tif odomFirst:\n\t\todomFirst = False\n\t\todomOffset = data.pose.pose.position.x\n\n\tcurrentOdomDist = data.pose.pose.position.x - odomOffset#get current forward distance value from odometry. +X is forward. This is in meters.\n\tlogMessage(\"currentOdomDist: \" + str(currentOdomDist))\n\tif currentOdomDist > maxDist and currentGlobalState == MOVING:\n\t\t\tmode_change_spin()\n\n\n# Variables and functions for the global state system\nGSFirst = True\nstateName = UNKNOWN # Variable to hold state name to publish\nglobal global_state_publisher\n\ndef globalStateCallback(data):\n\tglobal currentGlobalState\n\tglobal GSFirst\n\tglobal stateName\n\tglobal WAITING\n\tglobal MOVING\n\tglobal SPIN\n\tglobal DISPLAY\n\tglobal UNKNOWN\t\n\tcurrentGlobalState = data.globalstate\n\trospy.loginfo(\"Updated current global state to %s\", currentGlobalState)\n\t#logMessage(\"updated global state to: \" + str(currentGlobalState))\n\t\n#Variables and functions for the Twist publisher system\n\ntwist_message = Twist() # variable to assemble the message into.\n\ndef main():\n\tglobal WAITING\n\tglobal MOVING\n\tglobal SPIN\n\tglobal DISPLAY\n\tglobal UNKNOWN\n\tglobal time_at_beginning_spin\n\tglobal firstSpin\n\tglobal rotationSpeed\n\trospy.wait_for_service('move')\n\trospy.wait_for_service('waiting')\n\trospy.wait_for_service('display')\n\trospy.wait_for_service('spin')\n\tglobal mode_change_move\n\tglobal mode_change_waiting\n\tglobal mode_change_spin\n\tglobal mode_change_display\n\tmode_change_move = rospy.ServiceProxy('move', Empty)\n\tmode_change_waiting = rospy.ServiceProxy('waiting', Empty)\n\tmode_change_spin = rospy.ServiceProxy('spin', Empty)\n\tmode_change_display = rospy.ServiceProxy('display', Empty)\n\tglobal global_state_publisher\n\tglobal twist_publisher\n\ttwist_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\t\n\trospy.init_node('vccbot_move', anonymous=False)\n\trospy.Subscriber(\"/global_state\", GlobalState, globalStateCallback)\n\trospy.Subscriber(\"/odom\", Odometry, odometryCallback)\n\trate = rospy.Rate(30)\n\twhile not rospy.is_shutdown():\n\t\tglobal moveSpeed\n\t\t#The program doesn't need to shutdown so loop.\n\t\tif currentGlobalState == MOVING: # The robot should move forward\n\t\t\t#logMessage(\"Moving\") \n\t\t\t#drive forward at a constant speed.\n\t\t\ttwist_message = Twist()#clear any previous data in the twist_message variable.\n\t\t\ttwist_message.linear.x = moveSpeed # These six lines create a Twist message that says move forward.\n\t\t\ttwist_message.linear.y = 0\n\t\t\ttwist_message.linear.z = 0\n\t\t\ttwist_message.angular.x = 0\n\t\t\ttwist_message.angular.y = 0\n\t\t\ttwist_message.angular.z = 0\n\t\t\ttwist_publisher.publish(twist_message)\n\t\telif currentGlobalState == SPIN:# The robot needs to spin\n\t\t\tif firstSpin == True:\n\t\t\t\tlogMessage(\"FirstSpin\")\n\t\t\t\ttime_at_beginning_spin = time.time()\n\t\t\t\tfirstSpin = False\n\t\t\tif firstSpin == False and time.time() - time_at_beginning_spin > 22:\n\t\t\t\tlogMessage(\"Spin2Display\")\t\t\t\t\n\t\t\t\tmode_change_display()\n\t\t\telse:\n\t\t\t\tlogMessage(\"Spinning\")\n\t\t\t\ttwist_message = Twist()\n\t\t\t\ttwist_message.linear.x = 0\n\t\t\t\ttwist_message.linear.y = 0\n\t\t\t\ttwist_message.linear.z = 0\n\t\t\t\ttwist_message.angular.x = 0\n\t\t\t\ttwist_message.angular.y = 0\n\t\t\t\ttwist_message.angular.z = rotationSpeed#Turn at set number of rad/s\n\t\t\t\ttwist_publisher.publish(twist_message)\n\t\t\t\n\t\trate.sleep()\n\n\n\nif __name__ == '__main__':#If this is being run as the main program, run the loop.\n\ttry:\n\t\tmain()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\n" }, { "alpha_fraction": 0.7918890118598938, "alphanum_fraction": 0.7929562330245972, "avg_line_length": 70.92308044433594, "blob_id": "af8290f0892a02aa6e7a0f3a7ca723cbb17eb570", "content_id": "3dcfc992928a028c0a64499f1c89bb9283ec378a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 937, "license_type": "no_license", "max_line_length": 138, "num_lines": 13, "path": "/README.md", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "# VCC_Bot\nMy code from the Vision Centric Challenge.\n\n#Installation\nBesides the code contained here, this code depends on two packages:\n* A modified version of the `robot_upstart` package, available here: https://github.com/kk6axq/robot_upstart/tree/indigo-devel\n* The create_autonomy package, available here: https://github.com/AutonomyLab/create_autonomy\nBoth packages were cloned from the `indigo-devel` branch.\n#Robot Upstart\nFor my use of the serial ports and camera resources, I needed to access dialout and other groups that were assigned to my user.\n`robot_upstart` for security reasons, perhaps, uses `setuidguid` to run the ROS launch file as the user, the only disadvantage being that\nthe process does not inherit the permissions of the user. My solution was to make `sudo` passwordless(VERY risky in normal circumstances) \nand to change the `robot_upstart` package to change users to the user and then run the launch file. \n\n" }, { "alpha_fraction": 0.6594005227088928, "alphanum_fraction": 0.6839237213134766, "avg_line_length": 13.680000305175781, "blob_id": "1c82b3e64926e5c35cc11f6451a7ee387f778d30", "content_id": "f6be40ae318190108bc26633b29f5be47d386ed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 51, "num_lines": 25, "path": "/vccbot_display/src/vccbot_display.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nimport serial\ndisp = serial.Serial(\"/dev/ttyACM0\", 9600)\n\n\ndef serialSend(data):\n\tdisp.write(str(data))\n\n\n\ndef callback(msg):\n\tserialSend(msg.data)\n\n\n\ndef main():\n\trospy.init_node('vccbot_display')\n\tsub = rospy.Subscriber('display', Int32, callback)\n\trospy.spin()\n\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.7452805042266846, "alphanum_fraction": 0.7540547847747803, "avg_line_length": 29.322580337524414, "blob_id": "c1cb5e823af447d2ddb1eefb4385fdccec08859a", "content_id": "c7c812d29633945601480b1d0d717d6fe866e843", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3761, "license_type": "no_license", "max_line_length": 128, "num_lines": 124, "path": "/vccbot_move/src/vccbot_move.py~", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom vccbot_msgs.msg import GlobalState\nfrom nav_msgs.msg import Odometry\nfrom std_srvs.srv import Empty\nimport time\n#Variables that need to be changed by the user:\nmoveSpeed = 0.05# Speed to drive at. Units in m/s.\nrotationSpeed = 0.5#Speed of rotation in rad/s\n\n\n#global variables\nWAITING=0\nMOVING=1\nSPIN=2\nDISPLAY=3\nUNKNOWN=4\n\ntime_at_beginning_spin = 0\nfirstSpin = True\n\n#Variables and function for the odometry/distance measurement system\nrotNotDone = True\ninitialOdometryDist = 0\ncurrentOdomDist = 0\ndef odometryCallback(data):\n\tglobal currentOdomDist\n\tglobal WAITING\n\tglobal MOVING\n\tglobal SPIN\n\tglobal DISPLAY\n\tglobal UNKNOWN\n\tcurrentOdomDist = data.pose.pose.position.x#get current forward distance value from odometry. +X is forward. This is in meters.\n\tif currentOdomDist > maxDist and currentGlobalState == MOVING:\n\t\t\tmode_change_spin()\n\n\n# Variables and functions for the global state system\nGSFirst = True\nshouldIPublish = False\nstateName = UNKNOWN # Variable to hold state name to publish\nglobal global_state_publisher\ncurrentGlobalState = UNKNOWN\ndef globalStateCallback(data):\n\tglobal currentGlobalState\n\tglobal GSFirst\n\tglobal shouldIPublish\n\tglobal stateName\n\tglobal WAITING\n\tglobal MOVING\n\tglobal SPIN\n\tglobal DISPLAY\n\tglobal UNKNOWN\t\n\tcurrentGlobalState = data.globalstate\n\trospy.loginfo(\"Updated current global state to %s\", currentGlobalState)\n\t\n#Variables and functions for the Twist publisher system\n\ntwist_message = Twist() # variable to assemble the message into.\n\ndef main():\n\tglobal WAITING\n\tglobal MOVING\n\tglobal SPIN\n\tglobal DISPLAY\n\tglobal UNKNOWN\n\tglobal time_at_beginning_spin\n\tglobal firstSpin\n\trospy.wait_for_service('move')\n\trospy.wait_for_service('waiting')\n\trospy.wait_for_service('display')\n\trospy.wait_for_service('spin')\n\tmode_change_move = rospy.ServiceProxy('move', Empty)\n\tmode_change_waiting = rospy.ServiceProxy('waiting', Empty)\n\tmode_change_spin = rospy.ServiceProxy('spin', Empty)\n\tmode_change_display = rospy.ServiceProxy('display', Empty)\n\tglobal global_state_publisher\n\tglobal twist_publisher\n\ttwist_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\tglobal_state_publisher = rospy.Publisher('/global_state', GlobalState, queue_size=10)\n\trospy.init_node('vccbot_move', anonymous=False)\n\trospy.Subscriber(\"/global_state\", GlobalState, globalStateCallback)\n\twhile not rospy.is_shutdown():\n\t\tglobal moveSpeed\n\t\t#The program doesn't need to shutdown so loop.\n\t\tif currentGlobalState == MOVING: # The robot should move forward \n\t\t\t#drive forward at a constant speed.\n\t\t\ttwist_message = Twist()#clear any previous data in the twist_message variable.\n\t\t\ttwist_message.linear.x = moveSpeed # These six lines create a Twist message that says move forward.\n\t\t\ttwist_message.linear.y = 0\n\t\t\ttwist_message.linear.z = 0\n\t\t\ttwist_message.angular.x = 0\n\t\t\ttwist_message.angular.y = 0\n\t\t\ttwist_message.angular.z = 0\n\t\t\ttwist_publisher.publish(twist_message)\n\t\telif currentGlobalState == SPIN:# The robot needs to spin\n\t\t\tif firstSpin == True:\n\t\t\t\ttime_at_beginning_spin = time.time()\n\t\t\t\tfirstSpin = False\n\t\t\tif firstSpin == False and time.time() - time_at_beginning_spin > 25.1328:\n\t\t\t\tmode_change_display()\n\t\t\telse:\n\t\t\t\ttwist_message = Twist()\n\t\t\t\ttwist_message.linear.x = 0\n\t\t\t\ttwist_message.linear.y = 0\n\t\t\t\ttwist_message.linear.z = 0\n\t\t\t\ttwist_message.angular.x = 0\n\t\t\t\ttwist_message.angular.y = 0\n\t\t\t\ttwist_message.angular.z = rotationSpeed#Turn at set number of rad/s\n\t\t\t\ttwist_publisher.publish(twist_message)\n\t\t\t\n\t\t\t\n\t\t\n\t\tif shouldIPublish:\n\t\t\tglobal_state_publisher.publish(stateName)\n\n\n\nif __name__ == '__main__':#If this is being run as the main program, run the loop.\n\ttry:\n\t\tmain()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\n" }, { "alpha_fraction": 0.7291666865348816, "alphanum_fraction": 0.78125, "avg_line_length": 30.66666603088379, "blob_id": "7fd9aa987325f5845ed9c1fd157db71bfe1d5bb7", "content_id": "715d6b43aa2707cd1c1676b1089f32fe4f889cc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 96, "license_type": "no_license", "max_line_length": 44, "num_lines": 3, "path": "/vccbot_launch/scripts/envSetup.sh", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nexport ROS_MASTER_URI=http://localhost:11311\nexport ROS_HOSTNAME=localhost\n\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 28, "blob_id": "cbe11ea44f5a8232b2379fd0148a595f922e7d44", "content_id": "75ed46181f2fd22a517ce041cead1824487c2ad5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 58, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/vccbot_launch/scripts/script2.sh", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nroslaunch vccbot_launch vccbot.launch\n" }, { "alpha_fraction": 0.5874999761581421, "alphanum_fraction": 0.625, "avg_line_length": 39, "blob_id": "6fbd5a4080d75a4d0681136b8d97c0dbc35dcbb9", "content_id": "a578fbc81da1853400f32ccc53f5e434d8570dd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 80, "license_type": "no_license", "max_line_length": 59, "num_lines": 2, "path": "/vccbot_launch/scripts/launchScript.sh", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nsudo -u ubuntu parallel -j0 bash :::: <(ls script{1..2}.sh)\n" }, { "alpha_fraction": 0.6901626586914062, "alphanum_fraction": 0.7110766768455505, "avg_line_length": 21.64912223815918, "blob_id": "1c492bbe8f84c74bd248848510d3f8e78bba51e8", "content_id": "8c8ae5def9ed30d824ccd39dde0b318d326b6bc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1291, "license_type": "no_license", "max_line_length": 79, "num_lines": 57, "path": "/vccbot_testing/src/ri2cv2.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#file for testing rosimage to cv2 conversion\nimport rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nfrom array import array\nimage = 0\n\nWAITING=0\nMOVING=1\nSPIN=2\nDISPLAY=3\nUNKNOWN=4\n\ndef image_callback(data):\n\tglobal image\n\tglobal MOVING\n\tif currentGlobalState == MOVING\n\timage = CvBridge().imgmsg_to_cv2(data, desired_encoding=\"passthrough\")[:, 160]\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\ndef global_state_update(data):\n\tglobal currentGlobalState\n\tcurrentGlobalState = data.globalstate\n\ndef round_to_nearest(num, divisor):\n\treturn int(divisor * round(float(num) / divisor))\n\n\ndef checkImage(_image):\n\tprev_value = 0\n\tout = array(\"I\")\n\tfor i in range(0, 240):\n\t\tb = int(_image[i, 0])\n\t\tif not prev_value == b and int(_image[i+1, 0]) == b:\n\t\t\tout.append()\n\treturn out \n\t\t\n\t\n\ndef main():\n\tglobal MOVING\n\tglobal IMAGE\n\trospy.init_node('camera_listener', anonymous=False)\n\trospy.Subscriber(\"/usb_cam/image_raw\", Image, image_callback)\n\trospy.Subscriber(\"/global_state\", GlobalState, global_state_update) \n\twhile not rospy.is_shutdown():\n\t\tif currentGlobalState == MOVING:\n\t\t\t#Time to check the images.\n\t\t\timage[:, 0] = round_to_nearest(image[:, 0], 10)\n\t\t\tcheckImage(image)\n\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.686241626739502, "alphanum_fraction": 0.7030201554298401, "avg_line_length": 21.923076629638672, "blob_id": "35d58d0679f5ad291a77cc0b8ea3a5ebbd17081c", "content_id": "bbcf42e3afe698bc3eed894b17373b10da21ee10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1192, "license_type": "no_license", "max_line_length": 70, "num_lines": 52, "path": "/vccbot_testing/src/odomControl.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom math import sqrt\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\n# forward is +X\nfirstOdom = True\nspeed = 0.1 # 10cm per sec\nfirstX = 0\ncurrentDist = 0 # Distance traveled forward.\ndef odomCallback(data):\n\tglobal firstX\n\tglobal currentDist\n\tglobal firstOdom\n\tif firstOdom:\n\t\tfirstX = data.pose.pose.position.x\n\t\tfirstOdom = False\n\telse:\n\t\tcurrentDist = data.pose.pose.position.x - firstX\n\trospy.loginfo(\"Dist: %s\", currentDist)\n\n\ndef main():\n\tglobal speed\n\tcmd_vel_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\trospy.init_node('odom_control')\n\trospy.Subscriber(\"/odom\", Odometry, odomCallback)\n\trate = rospy.Rate(50) # 10hz\n\twhile not rospy.is_shutdown():\n\t\ttwist_message = Twist()\n\t\ttwist_message.linear.y = 0 \n\t\ttwist_message.linear.z = 0\n\t\ttwist_message.angular.x = 0\n\t\ttwist_message.angular.y = 0\n\t\ttwist_message.angular.z = 0\n\t\tif currentDist < 0.1:\n\t\t\ttwist_message.linear.x = speed\n\t\t\t\n\t\t\t\n\t\telse:\n\t\t\ttwist_message.linear.x = 0\n\t\t\trospy.loginfo(\"Stopping...\")\n\t\t\tf = True\n\n\t\tcmd_vel_publisher.publish(twist_message)\n\t\t\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n" }, { "alpha_fraction": 0.6191568970680237, "alphanum_fraction": 0.6323487162590027, "avg_line_length": 73.14893341064453, "blob_id": "fab5de1af32be4c7a6ff628c48f5277d77e9c8be", "content_id": "0b3965f54aad41a56f0b23eeea269a4ceaeedc01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3487, "license_type": "no_license", "max_line_length": 256, "num_lines": 47, "path": "/vccbot_matrix_analysis/src/vccbot_matrix_analysis.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nimport numpy as np\nfrom std_msgs.msg import Int32\ndef out(matrix):\n\tone = matrix[0, 0]\n\ttwo = matrix[0, 1]\n\tthree = matrix[0, 2]\n\tfour = matrix[1, 0]\n\tfive = matrix[1, 1]\n\tsix = matrix[1, 2]\n\tseven = matrix[2, 0]\n\teight = matrix[2, 1]\n\tnine = matrix[2, 2]\n\tten = matrix[3, 0]\n\televen = matrix[3, 1]\n\ttwelve = matrix[3, 2]\n\tthirteen = matrix[4, 0]\n\tfourteen = matrix[4, 1]\n\tfifteen = matrix[4, 2]\n\tout = -1\n\tif one == two and two == two and three == two and four == two and five != two and six == two and seven == two and eight != two and nine == two and ten == two and eleven != two and twelve == two and thirteen == two and fourteen == two and fifteen == two:\n\t\tout = 0\n\telif one != two and two == two and three != two and four != two and five == two and six != two and seven != two and eight == two and nine != two and ten != two and eleven == two and twelve != two and thirteen != two and fourteen == two and fifteen != two:\n\t\tout = 1\n\telif one == two and two == two and three == two and four != two and five != two and six == two and seven == two and eight == two and nine == two and ten == two and eleven != two and twelve != two and thirteen == two and fourteen == two and fifteen == two:\n\t\tout = 2\n\telif one == two and two == two and three == two and four != two and five != two and six == two and seven == two and eight == two and nine == two and ten != two and eleven != two and twelve == two and thirteen == two and fourteen == two and fifteen == two:\n\t\tout = 3\n\telif one == one and two != one and three != one and four == one and five != one and six == one and seven == one and eight != one and nine == one and ten == one and eleven == one and twelve == one and thirteen != one and fourteen != one and fifteen == one:\n\t\tout = 4\n\telif one == one and two != one and three != one and four == one and five != one and six != one and seven == one and eight != one and nine == one and ten == one and eleven == one and twelve == one and thirteen != one and fourteen != one and fifteen == one:\n\t\tout = 4\n\telif one == one and two == one and three == one and four == one and five != one and six != one and seven == one and eight == one and nine == one and ten != one and eleven != one and twelve == one and thirteen == one and fourteen == one and fifteen == one:\n\t\tout = 5\n\telif one == one and two == one and three == one and four == one and five != one and six != one and seven == one and eight == one and nine == one and ten == one and eleven != one and twelve == one and thirteen == one and fourteen == one and fifteen == one:\n\t\tout = 6\n\telif one == one and two == one and three == one and four != one and five != one and six == one and seven != one and eight != one and nine == one and ten != one and eleven != one and twelve == one and thirteen != one and fourteen != one and fifteen == one:\n\t\tout = 7\n\telif one == one and two == one and three == one and four == one and five != one and six == one and seven == one and eight == one and nine == one and ten == one and eleven != one and twelve == one and thirteen == one and fourteen == one and fifteen == one:\n\t\tout = 8\n\telif one == one and two == one and three == one and four == one and five != one and six == one and seven == one and eight == one and nine == one and ten != one and eleven != one and twelve == one and thirteen != one and fourteen != one and fifteen == one:\n\t\tout = 9\n\treturn out\n\ndef main():\n\tdisplay_publisher = rospy.Publisher(\"/display\", Int32, anonymous=False)\n\t\n" }, { "alpha_fraction": 0.7066246271133423, "alphanum_fraction": 0.7160883545875549, "avg_line_length": 21.64285659790039, "blob_id": "1a2ef822ce4896c346438c76f366b9d5c53fca0e", "content_id": "d7e5f5117637e78d705f9fe21869c6676b6f0312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1268, "license_type": "no_license", "max_line_length": 69, "num_lines": 56, "path": "/vccbot_button/src/vccbot_button.py", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nimport os\n\nimport Adafruit_BBIO.GPIO as GPIO\nfrom std_srvs.srv import Empty\nfrom vccbot_msgs.msg import GlobalState\n\npin = \"P8_12\" # Which pin the button is connected to.\n\nWAITING=0\nUNKNOWN=4\ncurrentGlobalState=UNKNOWN\ndef cleanupTime():\n\tGPIO.cleanup()\n\tprint \"cleanup\"\n\n\n\ndef getState():\n\tstate = False\n\tvalue = GPIO.input(pin)\n\tif value == 1:\n\t\tstate = True\n\n\t#print value\n\treturn state\n\ndef global_state_callback(data):\n\tglobal currentGlobalState\n\tcurrentGlobalState = data.globalstate\n\ndef main():\n\tglobal currentGlobalState\n\tos.system(\"sudo chmod a+rwx /sys/class/gpio/ -R\")\n\tos.system(\"sudo chmod a+rwx /dev/ -R\")#chmod the serial and video\n\tglobal WAITING\n\tmode_change_move = rospy.ServiceProxy('move', Empty)\n\tmode_change_waiting = rospy.ServiceProxy('waiting', Empty)\n\trospy.on_shutdown(cleanupTime)\n\tGPIO.setup(pin, GPIO.IN)\n\t#GPIO.input(pin) #returns val.\n\trospy.init_node('go')\n\trospy.Subscriber('global_state', GlobalState, global_state_callback)\n\trate = rospy.Rate(30)#loop at 30Hz\n\twhile not rospy.is_shutdown():\n\t\tif getState():\n\t\t\tif currentGlobalState == WAITING:\n\t\t\t\tmode_change_move()\n\t\t\t\trospy.sleep(1)\n\t\t\telse:\n\t\t\t\tmode_change_waiting()\n\t\t\t\trospy.sleep(1)\t\t\t\t\n\t\trate.sleep()\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.776562511920929, "alphanum_fraction": 0.7796875238418579, "avg_line_length": 21.821428298950195, "blob_id": "64da76c785fa11ef7c89b53d51ff408887e7b744", "content_id": "dd8b5b54fcb464e1b7720889d8a1725c70f8f6da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 88, "num_lines": 28, "path": "/vccbot_state_publisher/src/vccbot_state_publisher.py~", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom std_srvs.srv import Empty\nfrom vccbot_msgs import GlobalState\n\ncurrentState = GlobalState.UNKNOWN\n\ndef service_move(msg):\n\tglobal currentState\n\tcurrentState = GlobalState.MOVE\n\ndef service_waiting(msg):\n\tglobal currentState\n\tcurrentState = GlobalState.WAITING\n\ndef service_spin(msg):\n\tglobal currentState\n\tcurrentState = GlobalState.SPIN\n\ndef service_display(msg):\n\tglobal currentState\n\tcurrentState = GlobalState.DISPLAY\n\n\t\ndef main():\n\tglobal_state_publisher = rospy.Publisher('/global_state', GlobalState, queue_size = 10)\n\trospy.init_node('vccbot_state_publisher')\n\tmove = rospy.Service('move', \n" }, { "alpha_fraction": 0.6815203428268433, "alphanum_fraction": 0.6880733966827393, "avg_line_length": 20.799999237060547, "blob_id": "f8dccd96434d173d7ed9d50c08709a226dd734c7", "content_id": "430695ef94e4c29889d566f432c0888fa6ef1851", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 70, "num_lines": 35, "path": "/vccbot_testing/src/odomListener.py~", "repo_name": "kk6axq/VCC_Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom nav_msgs.msg import Odometry\n\nfirst = True\nfirstX = 0\nfirstY = 0\ndeltaX = 0\ndeltaY = 0\nd = 0\ndef callback(data):\n\tglobal first\n\tglobal firstX\n\tglobal firstY\n\tglobal deltaX\n\tglobal deltaY\n\tglobal d\n\tif first:\n\t\tfirstX = data.pose.pose.position.x\n\t\tfirstY = data.pose.pose.position.y\n\t\tfirst = False\n\telse:\n\t\tdeltaX = data.pose.pose.position.x - firstX\n\t\tdeltaY = data.pose.pose.position.y - firstY\n\t\td = sqrt((deltaX*deltaX) + (deltaY*deltaY))\n\t\trospy.loginfo(rospy.get_caller_id() + \"Distance is %s\", d)\n\t#rospy.loginfo(rospy.get_caller_id() + \"I heard %s\", data.pose.pose)\t\n\t\ndef listener():\n\trospy.init_node('listener')\n\trospy.Subscriber(\"/odom\", Odometry, callback)\n\trospy.spin()\n\nif __name__ == '__main__':\n\tlistener()\n" } ]
15
rigbysteven67/web-scraping-challenge
https://github.com/rigbysteven67/web-scraping-challenge
c58ca9ab106b4d5bd1e37dafa1d4576e7b288d9b
08fcb4997ec1ebca4bd30d381bee3c323742230d
875375f27bf74c400bdbdb3a5e921c6ec9b9be79
refs/heads/main
2023-02-12T22:58:53.847167
2021-04-03T02:19:40
2021-04-03T02:19:40
324,665,023
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6647230386734009, "alphanum_fraction": 0.6695821285247803, "avg_line_length": 21.88888931274414, "blob_id": "1746cf7bd27f4d3ceaecb029e808f59ac52a96f4", "content_id": "938d05517325242050eb05c78e3784f235f9a794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 71, "num_lines": 45, "path": "/Missions_to_Mars/app.py", "repo_name": "rigbysteven67/web-scraping-challenge", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect\nimport pymongo\n\n#Instantiate Flask app\napp = Flask(__name__)\n\n# Setup splinter\n#executable_path = {'executable_path': ChromeDriverManager().install()}\n#browser = Browser('chrome', **executable_path, headless=False)\n\n# connect to mongodb\nconn = 'mongodb://localhost:27017'\nclient = pymongo.MongoClient(conn)\n\n# connect to mars_app database\ndb = client.mars_app\n\n# connect to mars collection\nmars_coll = db.mars\n\[email protected](\"/\")\ndef index():\n mars_data = mars_coll.find_one()\n return(render_template('index.html', mars_data = mars_data))\n \[email protected](\"/scrape\")\ndef scrape():\n \n # this is the py script with all of the scraping functions\n import scrape_mars\n\n # gather document to insert\n nasa_document = scrape_mars.scrape_all()\n \n # insert\n #mars.insert_one(data_document)\n \n # upsert\n mars_coll.mars.update_one({}, {'$set': nasa_document}, upsert=True)\n \n return redirect('/')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)" }, { "alpha_fraction": 0.6353899836540222, "alphanum_fraction": 0.640512228012085, "avg_line_length": 22.06989288330078, "blob_id": "c1822a9dd0358e4252443b218a9895c2dc791921", "content_id": "ee4efaf90032604dcd5f4fe39630dcd2e2701fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4295, "license_type": "no_license", "max_line_length": 97, "num_lines": 186, "path": "/Missions_to_Mars/scrape_mars.py", "repo_name": "rigbysteven67/web-scraping-challenge", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[19]:\n\n\n#import dependencies\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nfrom splinter import Browser\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pymongo\n\n\n# In[35]:\n\n\n# Setup splinter\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\n# In[62]:\n\n\ndef scrape_all():\n news_tite, news_p = mars_news()\n featured_img_url = featured_image()\n mars_facts_html = mars_facts()\n full_img_list = hemisphere_data()\n\n nasa_document = {\n 'news_title': news_tite,\n 'news_paragraph': news_p,\n 'featured_img_url': featured_img_url,\n 'mars_facts_html': mars_facts_html,\n 'hemisphere_img_list': full_img_list\n }\n\n \n #consider closeing browser here\n browser.quit()\n \n \n return nasa_document\n\n\n# # NASA Mars News\n\n\n\n# In[47]:\n\n\ndef mars_news():\n # URL of page to be scraped\n url = ('https://mars.nasa.gov/news/')\n\n # Retrieve page with the browser module\n browser.visit(url)\n\n html = browser.html\n news_soup = BeautifulSoup(html, 'html.parser')\n\n # save the most recent article, title and date\n article = news_soup.find(\"div\", class_=\"list_text\")\n news_p = article.find(\"div\", class_=\"article_teaser_body\").text\n news_title = article.find(\"div\", class_=\"content_title\").text\n news_date = article.find(\"div\", class_=\"list_date\").text\n #print(news_date)\n #print(news_title)\n #print(news_p)\n \n return news_title, news_p\n\n\n# # JPL Mars Space Images - Featured Image\n\n# In[52]:\n\n\ndef featured_image():\n\n #Use splinter to navigate the site and find the image url for the current Featured Mars Image\n # URL of page to be scraped\n url = ('https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars')\n\n # Retrieve page with the browser module\n browser.visit(url)\n\n html = browser.html\n image_soup = BeautifulSoup(html, 'html.parser')\n \n #find the image url for the current Featured Mars Image\n image_url = image_soup.find(\"a\", class_=\"fancybox\")['data-fancybox-href']\n featured_image_url = 'https://www.jpl.nasa.gov' + image_url\n #print(featured_image_url)\n \n return featured_image_url\n\n\n# # Mars Facts\n\n# In[50]:\n\n\n\ndef mars_facts():\n url = 'https://space-facts.com/mars/'\n browser.visit(url)\n\n mars_facts_df = pd.read_html(url)\n mars_facts_df = mars_facts_df[0]\n mars_facts_df.columns = ['Description', 'Mars']\n mars_facts_df.set_index('Description', inplace = True)\n mars_facts_df\n\n mars_data_html = mars_facts_df.to_html(classes='table table-striped')\n \n return mars_data_html\n\n\n# # Mars Hemispheres\n\n# In[34]:\n\n\ndef hemisphere_data():\n\n #Visit the USGS Astrogeology site \n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n html = browser.html\n hemisphere_soup = BeautifulSoup(html, 'html.parser')\n\n #loop thru the links to the hemispheres to find the image url to the full resolution image\n #add each 'title' and 'img_url' to a list\n results = hemisphere_soup.find_all(\"div\", class_=\"item\")\n\n hemisphere_list = []\n for result in results:\n\n title = result.find(\"h3\").text\n href = result.find(\"a\", class_= 'itemLink')['href']\n img_url = 'https://astrogeology.usgs.gov' + href\n hemisphere_list.append({'title' : title, 'img_url' : img_url})\n\n #define full img list\n full_img_list = []\n\n #loop through url's of hemisphere_list and parse through html to find full_img_url\n for hemisphere_dict in hemisphere_list:\n\n url = hemisphere_dict['img_url']\n\n browser.visit(url)\n html = browser.html\n hemisphere_soup = BeautifulSoup(html, 'html.parser')\n\n results = hemisphere_soup.find_all(\"div\", class_=\"downloads\")\n\n for result in results:\n\n hemisphere_title = hemisphere_dict['title']\n full_hemisphere_img = result.find(\"a\")['href'] \n\n full_img_list.append({'title' : hemisphere_title, 'img_url' : full_hemisphere_img})\n\n return full_img_list\n\n# # Mongo DB\n\n\n\n# In[64]:\n\n# run script\nif __name__ == '__main__':\n scrape_all()\n \n\n\n\n\n# In[ ]:\n\n\n\n\n" } ]
2
whoseyourdady/httpie
https://github.com/whoseyourdady/httpie
89c809a11ff189fe79ae43c8c409d5957e741c3e
860a851a4b0ba7f6dfd3287c19d7d2ece5ec7946
e5a754fa625ebeada576e4c79a692595312f9c9d
refs/heads/master
2021-01-18T10:14:59.605482
2012-03-02T08:02:50
2012-03-02T08:02:50
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.5930232405662537, "avg_line_length": 25.27777862548828, "blob_id": "a68864fc58ed8cb83f4a08801f2aae516ea40c62", "content_id": "6e5aec6ceebf10f575e44da8459077e25e718c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 79, "num_lines": 36, "path": "/tests.py", "repo_name": "whoseyourdady/httpie", "src_encoding": "UTF-8", "text": "import unittest\nfrom StringIO import StringIO\nfrom httpie import httpie\n\n\ndef http(*args, **kwargs):\n stdout = StringIO()\n httpie.main(args=args, stdout=stdout,\n stdin_isatty=True,\n stdout_isatty=False)\n return stdout.getvalue()\n\n\n# TODO: moar!\n\nclass TestHTTPie(unittest.TestCase):\n\n def test_get(self):\n http('GET', 'http://httpbin.org/get')\n\n def test_json(self):\n response = http('POST', 'http://httpbin.org/post', 'foo=bar')\n self.assertIn('\"foo\": \"bar\"', response)\n\n def test_form(self):\n response = http('POST', '--form', 'http://httpbin.org/post', 'foo=bar')\n self.assertIn('\"foo\": \"bar\"', response)\n\n def test_headers(self):\n response = http('GET', 'http://httpbin.org/headers', 'Foo:bar')\n self.assertIn('\"User-Agent\": \"HTTPie', response)\n self.assertIn('\"Foo\": \"bar\"', response)\n\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
1
houseman/houseman.github.io
https://github.com/houseman/houseman.github.io
ef252c8fb7e72b762972135d616ee96a9acb43fa
85e10726a2a205539737808ab1d0bd6d6de7a0fe
a8cf298ade9c8ee9df36aebf3ebc759a8e08eb8b
refs/heads/main
2023-08-16T23:10:21.112922
2023-08-14T21:07:29
2023-08-14T21:07:29
365,364,868
0
0
null
2021-05-07T22:15:47
2023-08-14T21:07:34
2023-09-11T06:29:56
Python
[ { "alpha_fraction": 0.6910569071769714, "alphanum_fraction": 0.6910569071769714, "avg_line_length": 23.799999237060547, "blob_id": "22cf35be83223aae0116ec430bd2d5ef1e3dff0d", "content_id": "0ab407eb361a790681c0caf6ca97af62a21ac888", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 85, "num_lines": 5, "path": "/README.md", "repo_name": "houseman/houseman.github.io", "src_encoding": "UTF-8", "text": "# Scott Houseman\n---\n## Articles\n### Database\n- [A Better Way To Store Record Status](./articles/better-way-store-record-status.md)" }, { "alpha_fraction": 0.49741825461387634, "alphanum_fraction": 0.49741825461387634, "avg_line_length": 30.125, "blob_id": "3222d2f90ca21f18b2dc1dc8567702285fc6d578", "content_id": "b598b3d83b17dd0e708f613d36aded33c1236809", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1743, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/status.py", "repo_name": "houseman/houseman.github.io", "src_encoding": "UTF-8", "text": "# status.py\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass ProductStatus:\n \"\"\"A data model for product status\"\"\"\n\n is_in_stock: bool\n is_on_back_order: bool\n is_buyable: bool\n is_active: bool\n\n @classmethod\n def create(cls, status: str) -> ProductStatus:\n \"\"\"Create a `ProductStatus` instance derived from the given string\"\"\"\n\n match status.lower():\n case \"in stock\":\n return ProductStatus(\n is_in_stock=True,\n is_on_back_order=False,\n is_buyable=True,\n is_active=True,\n )\n case \"on order\":\n return ProductStatus(\n is_in_stock=False,\n is_on_back_order=True,\n is_buyable=True,\n is_active=True,\n )\n case \"unavailable\":\n return ProductStatus(\n is_in_stock=False,\n is_on_back_order=False,\n is_buyable=False,\n is_active=True,\n )\n case \"deleted\":\n return ProductStatus(\n is_in_stock=False,\n is_on_back_order=False,\n is_buyable=False,\n is_active=False,\n )\n case _:\n raise ValueError(f\"Unable to determine product status '{status}'\")\n\n\nif __name__ == \"__main__\":\n assert ProductStatus.create(\"in stock\") == ProductStatus.create(\"IN STOCK\")\n assert ProductStatus.create(\"unavailable\").is_in_stock is False\n assert ProductStatus.create(\"on order\").is_on_back_order is True\n" }, { "alpha_fraction": 0.6197763085365295, "alphanum_fraction": 0.6435794234275818, "avg_line_length": 31.870588302612305, "blob_id": "19a89c7a4c6650d5021945fbb50a3066316ab83a", "content_id": "3c479cbdb08ce870a234134c481baa06f3c11451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11175, "license_type": "no_license", "max_line_length": 327, "num_lines": 340, "path": "/articles/better-way-store-record-status.md", "repo_name": "houseman/houseman.github.io", "src_encoding": "UTF-8", "text": "# A Better Way To Store Record Status In A Relational Database\n\nRelational database records often require transitions between various statuses; for example `active`, `pending`, `deleted` etc.\n\nVarious database structures may be used to store this status.\n## A Naive Solution\nThe most naive database design would simply store this `status` field as a varchar type\n```sql\n-- Postgres\nDROP TABLE IF EXISTS product CASCADE;\nCREATE TABLE product (\n product_id SERIAL PRIMARY KEY,\n title VARCHAR(200) NOT NULL,\n sku VARCHAR(50) NOT NULL,\n status VARCHAR(20) NOT NULL\n);\n\n```\n\n## A Better Solution\n\nThis improved solution makes use of an `ENUM` type to define status.\n```sql\n-- Postgres\nDROP TYPE IF EXISTS product_status CASCADE;\nCREATE TYPE product_status AS ENUM ('in stock', 'on order', 'unavailable', 'deleted');\n\nDROP TABLE IF EXISTS product CASCADE;\nCREATE TABLE product (\n product_id SERIAL PRIMARY KEY,\n title VARCHAR(200) NOT NULL,\n sku VARCHAR(200) NOT NULL,\n status product_status\n);\n\n```\nThis limits the possible value of `status` to one of 'in stock', 'on order', 'unavailable' or 'deleted'.\n\nThere are several benefits of using an `enum` type over a `varchar`:\n1. **Data Integrity**: ensure that the value is always within a specific set of values. This is not possible with varchar (unless you add a `CHECK` constraint).\n2. **Performance**: `enum` values are internally sorted according to their order in the `enum` type declaration, not their lexicographical order. This can lead to more efficient sorting and indexing.\n3. **Readability**: `enum` types can make your code more readable and self-documenting by making it clear what values are allowed for a field.\n4. **Storage**: `enum` values are stored as integers, which can be more space-efficient than `varchar`.\n\n**However**, adding new values to an `enum` type requires an `ALTER TYPE` statement, which can be a heavy operation if your database is large.\n\n### Metadata\nThese enum status values have the following semantics with regards to a Product:\n\n| Value | In (warehouse) stock | On back order | Buyable | Visible in Order History |\n| - | - | - | - | - |\n| `in stock` | Yes | No | Yes | Yes |\n| `on order` | No | Yes | Yes | Yes |\n| `unavailable` | No | No | No | Yes |\n| `deleted` | No | No | No | No |\n\nThese now need to be implemented in business logic.\n\nSomething like:\n```python\n# status.py\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass ProductStatus:\n \"\"\"A data model for product status\"\"\"\n\n is_in_stock: bool\n is_on_back_order: bool\n is_buyable: bool\n is_active: bool\n\n @classmethod\n def create(cls, status: str) -> ProductStatus:\n \"\"\"Create a `ProductStatus` instance derived from the given string\"\"\"\n\n match status.lower():\n case \"in stock\":\n return ProductStatus(\n is_in_stock=True,\n is_on_back_order=False,\n is_buyable=True,\n is_active=True,\n )\n case \"on order\":\n return ProductStatus(\n is_in_stock=False,\n is_on_back_order=True,\n is_buyable=True,\n is_active=True,\n )\n case \"unavailable\":\n return ProductStatus(\n is_in_stock=False,\n is_on_back_order=False,\n is_buyable=False,\n is_active=True,\n )\n case \"deleted\":\n return ProductStatus(\n is_in_stock=False,\n is_on_back_order=False,\n is_buyable=False,\n is_active=False,\n )\n case _:\n raise ValueError(f\"Unable to determine product status '{status}'\")\n\n\n```\nThis works well enough, but it does split the domain between the database and the code base.\nIt would be better if we could represent the state better within the database.\n\n## Add state columns\nIn order to store these state values better in the database, we could add a few columns to the `product` table:\n```sql\n-- Postgres\nDROP TABLE IF EXISTS product CASCADE;\nCREATE TABLE product (\n product_id SERIAL PRIMARY KEY,\n title VARCHAR(250) NOT NULL,\n sku VARCHAR(200) NOT NULL,\n is_in_stock BOOLEAN NOT NULL,\n is_on_back_order BOOLEAN NOT NULL,\n is_buyable BOOLEAN NOT NULL,\n is_active BOOLEAN NOT NULL\n);\n\n```\n\nThis is an improvement, as we now have status attributes for each product record.\n\nBut, some limitations remain.\nWe cannot add any metadata to the various status flags. We also would need to add further columns if we ever needed a status that requires additional state flags. This would necessitate an `ALTER` operation on our large `product` table.\n\n## Normalise the database\nThe best solution would be to abstract product status from the `product` table.\nTo achieve this, we normalise the database structure by adding a foreign key to a `product_status` table:\n```sql\n-- Postgres\nDROP TABLE IF EXISTS product_status CASCADE;\nCREATE TABLE product_status (\n product_status_id SERIAL PRIMARY KEY,\n product_status_usid VARCHAR(50) NOT NULL UNIQUE, -- unique string identifier\n description VARCHAR(250) NULL,\n is_in_stock BOOLEAN NOT NULL,\n is_on_back_order BOOLEAN NOT NULL,\n is_buyable BOOLEAN NOT NULL,\n is_active BOOLEAN NOT NULL\n);\n\nDROP TABLE IF EXISTS product CASCADE;\nCREATE TABLE product (\n product_id SERIAL PRIMARY KEY,\n title VARCHAR(250) NOT NULL,\n sku VARCHAR(200) NOT NULL,\n product_status_id INTEGER NOT NULL,\n FOREIGN KEY (product_status_id) REFERENCES product_status (product_status_id)\n);\n\n```\nNext, let's create records for the various status values, and associated state flags.\n```sql\n-- Postgres\nINSERT INTO product_status\n (product_status_usid, description, is_in_stock, is_on_back_order, is_buyable, is_active)\nVALUES\n ('in stock', 'Product is in stock', true, false, true, true),\n ('on order', 'Product is on back order', false, true, true, true),\n ('unavailable', 'Product is unavailable', false, false, false, true),\n ('deleted', 'Product is deleted', false, false, false, false)\n;\nSELECT * FROM product_status;\n\n```\nWhich gives us:\n```\n product_status_id | product_status_usid | description | is_in_stock | is_on_back_order | is_buyable | is_active\n-------------------+---------------------+--------------------------+-------------+------------------+------------+-----------\n 1 | in stock | Product is in stock | t | f | t | t\n 2 | on order | Product is on back order | f | t | t | t\n 3 | unavailable | Product is unavailable | f | f | f | t\n 4 | deleted | Product is deleted | f | f | f | f\n(4 rows)\n\n```\nAnd add some junk product data:\n```sql\nINSERT INTO product\n (title, sku, product_status_id)\nVALUES\n ('EcoBoost Portable Charger', 'SKU-ECB-1234', 1),\n ('AquaPure Water Filter', 'SKU-AQPF-5678', 2),\n ('SolarGlow Garden Lights', 'SKU-SGL-9101', 3),\n ('FitFlex Yoga Mat', 'SKU-FFYM-1121', 4),\n ('BreezeAir Conditioner', 'SKU-BAC-3141', 1),\n ('CrispSound Bluetooth Speaker', 'SKU-CSBS-5161', 2),\n ('SmoothBlend Juicer', 'SKU-SBJ-7181', 3),\n ('QuickCook Microwave Oven', 'SKU-QCMO-9201', 4),\n ('UltraView Binoculars', 'SKU-UVB-1221', 1),\n ('ProFit Running Shoes', 'SKU-PFRS-3241', 1)\n;\n\n```\n\n### The value of a usid\nThe unique string identifier (usid) `product_status_usid` value is useful for reducing cognitive load when constructing queries.\nFor example:\n```sql\nSELECT\n product.title,\n product.sku,\n product_status.description status\nFROM\n product\nJOIN\n product_status\n ON\n product.product_status_id=product_status.product_status_id\nWHERE\n product_status_usid='in stock'\n;\n```\n```\n\n title | sku | status\n---------------------------+---------------+---------------------\n EcoBoost Portable Charger | SKU-ECB-1234 | Product is in stock\n BreezeAir Conditioner | SKU-BAC-3141 | Product is in stock\n UltraView Binoculars | SKU-UVB-1221 | Product is in stock\n ProFit Running Shoes | SKU-PFRS-3241 | Product is in stock\n(4 rows)\n```\nis far easier to understand at a glance, than\n```sql\nSELECT\n product.title,\n product.sku,\n product_status.description status\nFROM\n product\nJOIN\n product_status\n ON\n product.product_status_id=product_status.product_status_id\nWHERE\n product.product_status_id=1\n;\n\n```\n\n> Similarly, when referring to these foreign key records in code, we do not want to use a primary key integer value as a constant (as these are strictly-speaking *not* constant) identifier. Rather, we would want to use the usid for this.\n\n## Extensibility\n### Adding a new status\nShould we need to add a new status (for example `pre-order`) to our system, it is as simple as adding a new record to the `product_status` table. We may want to extend the structure for this as well. Fortunately altering the `product_status` table is far quicker and less risky than doing the same to the large `product` table.\n```sql\n-- Postgres\nALTER TABLE\n product_status\nADD COLUMN\n is_pre_order BOOLEAN NOT NULL DEFAULT false\n;\n\nINSERT INTO\n product_status\n(\n product_status_usid,\n description,\n is_in_stock,\n is_on_back_order,\n is_buyable,\n is_active,\n is_pre_order\n)\nVALUES\n(\n 'pre-order',\n 'Product is available for pre-order',\n false,\n false,\n true,\n true,\n true\n)\n;\n\n```\n\n### Adding a status log\nAnother benefit that this abstraction offers us, is the ability to extend our architecture fairly easily.\nFor example, to add a table to log status changes.\n```sql\n-- Postgres\nDROP TABLE IF EXISTS product_status_log CASCADE;\nCREATE TABLE product_status_log (\n product_id INTEGER NOT NULL,\n product_status_id INTEGER NOT NULL,\n logged_at TIMESTAMP WITH TIME ZONE DEFAULT now(),\n FOREIGN KEY (product_id) REFERENCES product (product_id),\n FOREIGN KEY (product_status_id) REFERENCES product_status (product_status_id)\n);\nCREATE INDEX idx_product_status ON product_status_log (product_id, product_status_id);\n\n```\nAnd we have a nice log\n```sql\nSELECT\n product_status.product_status_usid status,\n log.logged_at\nFROM product\n JOIN product_status_log log\n ON product.product_id=log.product_id\n JOIN product_status\n ON log.product_status_id=product_status.product_status_id\nWHERE\n product.sku='SKU-SGL-9101'\nORDER BY\n log.logged_at ASC\n;\n\n```\n```\n status | logged_at\n-------------+-------------------------------\n in stock | 2023-08-07 22:46:21.388738+02\n on order | 2023-08-07 22:46:57.509255+02\n in stock | 2023-08-07 22:47:01.686259+02\n on order | 2023-08-07 22:47:19.070394+02\n in stock | 2023-08-07 22:47:26.662571+02\n unavailable | 2023-08-07 22:47:31.837687+02\n deleted | 2023-08-07 22:47:37.574532+02\n(7 rows)\n\n```\n\nCheers!" } ]
3
ndhamne/Trial
https://github.com/ndhamne/Trial
91d9756a5e3a51f2193e1d32269a3a7daec6818e
f03fd210fad9378931b7dcbcca743d76c9fe6bd8
b388547bd020f578510353e399f6ced5a075bc25
refs/heads/main
2023-02-17T12:02:58.142909
2021-01-15T09:32:25
2021-01-15T09:32:25
329,825,077
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 35, "blob_id": "eda297b739a9e1adfda9d8833c1997c7fdf222cd", "content_id": "46ec58317c1b455714797ce3000a7d2ccef63c05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 35, "num_lines": 1, "path": "/demo1/add.py", "repo_name": "ndhamne/Trial", "src_encoding": "UTF-8", "text": "# code for addition of two numbers:\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 9.714285850524902, "blob_id": "0a1e68ed55232c46dff39b22fcaab8da53e6568f", "content_id": "fad6eea86f555aeeb4c60490a21771c4d8f7c7bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 75, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/README.md", "repo_name": "ndhamne/Trial", "src_encoding": "UTF-8", "text": "# Demo\n\nSome description.\n\n## Subheader\n\nText added through local commands\n" } ]
2
taku-n/aiohttp
https://github.com/taku-n/aiohttp
19c598b7f93bcdf9c492f54385d16f0a63e515bd
26382666b04dfc61932070262bfe895167d471bf
f35c0264097a6da9da5c279032a53f8939b6ec94
refs/heads/main
2023-04-11T09:38:58.190792
2021-04-22T13:32:21
2021-04-22T13:32:21
360,460,544
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6002076864242554, "alphanum_fraction": 0.6064382195472717, "avg_line_length": 25.027027130126953, "blob_id": "8acb8797c1400786e7331a7ad0b1ea72b5c521bd", "content_id": "a90fe5e6346593d84391f434f4645031af6ef035", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1926, "license_type": "no_license", "max_line_length": 81, "num_lines": 74, "path": "/server.py", "repo_name": "taku-n/aiohttp", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n# wscat -c wss://hoge.com:8888/ws\n\nimport asyncio\nimport ssl\nimport weakref\n\nimport aiohttp\nfrom aiohttp import web, WSCloseCode\n\nFULL_CHAIN = '/etc/letsencrypt/live/hoge.com/fullchain.pem'\nPRIV_KEY = '/etc/letsencrypt/live/hoge.com/privkey.pem'\n\ndef main():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(start_server())\n try:\n loop.run_forever()\n finally:\n pass\n\nasync def start_server():\n tls = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n tls.load_cert_chain(FULL_CHAIN, PRIV_KEY)\n\n runner = create_runner()\n await runner.setup()\n site = web.TCPSite(runner, '0.0.0.0', 8888, ssl_context=tls)\n try:\n await site.start()\n finally:\n pass\n\ndef create_runner():\n app = web.Application()\n app.add_routes([\n web.get('/', handler_http),\n web.get('/ws', handler_websocket),\n ])\n app['websockets'] = weakref.WeakSet()\n app.on_shutdown.append(on_shutdown)\n\n return web.AppRunner(app)\n\nasync def on_shutdown(app):\n for ws in set(app['websockets']):\n await ws.close(code=WSCloseCode.GOING_AWAY, message='Server Shutdown')\n\nasync def handler_http(req):\n return web.Response(text='hello, world\\n')\n\nasync def handler_websocket(req):\n ws = web.WebSocketResponse()\n await ws.prepare(req)\n\n req.app['websockets'].add(ws)\n try:\n await ws.send_str('To disconnect, type \"close\".')\n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.TEXT:\n if msg.data == 'close':\n await ws.close()\n else:\n await ws.send_str('You sent ' + msg.data + '.')\n elif msg.type == aiohttp.WSMsgType.ERROR:\n print('ws connection closed with exception: %s' % ws.exception())\n finally:\n req.app['websockets'].discard(ws)\n\n return ws\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6724738478660583, "alphanum_fraction": 0.6864111423492432, "avg_line_length": 25.090909957885742, "blob_id": "23177a41267fa1c479e0a700dafc41483fe03894", "content_id": "511fbb306dfec7dcb4691de45c2cae96a2141911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/server-legacy.py", "repo_name": "taku-n/aiohttp", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\nimport ssl\n\nfrom aiohttp import web\n\nFULL_CHAIN = '/etc/letsencrypt/live/hoge.com/fullchain.pem'\nPRIV_KEY = '/etc/letsencrypt/live/hoge.com/privkey.pem'\n\nasync def handle(req):\n name = req.match_info.get('name', 'Anonymouns')\n text = 'Hello, ' + name + '\\n'\n\n return web.Response(text=text)\n\napp = web.Application()\napp.add_routes([web.get('/', handle), web.get('/{name}', handle)])\n\ntls = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\ntls.load_cert_chain(FULL_CHAIN, PRIV_KEY)\n\nweb.run_app(app, host='0.0.0.0', port=8888, ssl_context=tls)\n" } ]
2
Karthi13krishna/carbonfootprint-calculator
https://github.com/Karthi13krishna/carbonfootprint-calculator
21b395704d2fd5f15f4f725c88fffd3369f28eca
00c84e8b9016f1b1e253a2c1ee2985fd27f8de87
d8240154e8ac1443cf7046f8f501aacecc5e96f6
refs/heads/master
2022-12-25T07:10:09.179754
2020-10-07T09:07:32
2020-10-07T09:07:32
287,454,474
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5966196060180664, "alphanum_fraction": 0.6492139101028442, "avg_line_length": 47.9551887512207, "blob_id": "3e19f0aff42c55b990154067a8f513c5ab5d3243", "content_id": "5dcb2a3369f396c62e94829c458b6c4ea99c4a60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21182, "license_type": "no_license", "max_line_length": 199, "num_lines": 424, "path": "/Carbon Footprint Calculator.py", "repo_name": "Karthi13krishna/carbonfootprint-calculator", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nimport tkinter as tk\r\nimport tkinter.ttk as ttk\r\nfrom ttkthemes import ThemedStyle\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nimport numpy as np\r\nimport csv\r\nfrom geopy.distance import distance\r\nfrom PIL import ImageTk\r\n\r\n# Global Variables\r\na = [0,0,0,0]\r\naverage = [1100,700,800,1800]\r\nb = ['Energy','Food','Transportation', 'Flights']\r\niata_codes = []\r\ndist = 0.0\r\ndist1 = 0.0\r\nlpg_option = 14.2\r\n\r\n# Reading values from airport database\r\nwith open('airport_codes.csv', mode = 'r') as airport_codes:\r\n csvreader = csv.reader(airport_codes)\r\n for i in csvreader:\r\n iata_codes.append(i[2])\r\n\r\n# Getting the airport's coordinates\r\ndef flight_coordinates(iata):\r\n with open('airport_codes.csv', mode = 'r') as airport_codes:\r\n csvreader = csv.reader(airport_codes)\r\n for i in csvreader:\r\n if iata in i[2]:\r\n x,y = (i[3]).split(\",\")\r\n break\r\n return float(x),float(y)\r\n\r\n# Calculating flight distance \r\ndef flight_distance(iata1, iata2):\r\n try:\r\n d1 = flight_coordinates(iata1)\r\n d2 = flight_coordinates(iata2)\r\n except:\r\n d1 = 0.0,0.0\r\n d2 = 0.0,0.0\r\n return distance(d1,d2).km\r\n\r\n# Getting value from the flight combobox\r\ndef flight_func(event):\r\n try:\r\n src_option, dest_option = src_combo.get(), dest_combo.get()\r\n src_option1, dest_option1 = src_combo2.get(), dest_combo2.get()\r\n except:\r\n src_option, dest_option = 0,0\r\n src_option1, dest_option1 = 0,0\r\n global dist, dist1 \r\n dist = round(flight_distance(src_option, dest_option),2)\r\n dist1 = round(flight_distance(src_option1, dest_option1),2)\r\n\r\n# Getting value from LPG Combobox\r\ndef lpg_func(event):\r\n lpg = lpg_combo.get()\r\n global lpg_option\r\n lpg_option = float(lpg)*1.55537*1.8\r\n\r\n# Calculations to be done when the calculate button is pressed\r\ndef button_pressed():\r\n try:\r\n x = float(trip_var.get())*int(trip_spin.get())*int(passenger_spin.get())\r\n except:\r\n x = 1\r\n try:\r\n y = float(trip_var2.get())*int(trip_spin2.get())*int(passenger_spin2.get())\r\n except:\r\n y = 1\r\n \r\n duration = (duration_value[duration_list.index(duration_choice.get())])/365\r\n avg = [round(i * duration, 2) for i in average]\r\n \r\n a[0] = (calc(elec_entry.get(), 0.708, people_entry.get()) \r\n + calc(lpg_spin.get(), lpg_option, people_entry.get()))\r\n \r\n a[1] = (calc(beef_entry.get(), 27, people_entry.get())\r\n + calc(cheese_entry.get(), 13.5/1000, people_entry.get())\r\n + calc(chicken_entry.get(), 6.9, people_entry.get())\r\n + calc(fish_entry.get(), 6.1, people_entry.get()))\r\n \r\n a[2] = (calc(bus_entry.get(),0.10312,1) \r\n + calc(local_train_entry.get(),0.036939994,1) \r\n + calc(train_entry.get(),0.00497,1) \r\n + calc(dist1_entry.get(),veh1_var.get(),mile1_entry.get()) \r\n + calc(dist2_entry.get(),veh2_var.get(),mile2_entry.get()))\r\n flight_value = class_value[class_name.index(class_choice.get())]\r\n flight_value1 = class_value[class_name.index(class_choice2.get())]\r\n \r\n a[3] = calc(dist, 0.158*1.891*flight_value*x, 1) + calc(dist1, 0.158*1.891*flight_value1*y, 1)\r\n final_text.set(\"{:.2f}\".format(sum(a)/1000) + \" metric tons of CO2\")\r\n \r\n result_button = Button(flight, text = \"Next: Result\", command = selec4).place(relx = 0.7, rely = 0.9, anchor = 'w') \r\n flight_button = Button(result, text = \"Back: Flight\", command = selec3).place(relx = 0.3, rely = 0.9, anchor = 'e')\r\n Suggestion_button = Button(result, text = \"Next: Suggestions\", command = selec5).place(relx = 0.7, rely = 0.9, anchor = 'w')\r\n result1_button = Button(suggestion, text = \"Back: Result\", command = selec4).place(relx = 0.3, rely = 0.9, anchor = 'e')\r\n \r\n \r\n f = Figure(figsize=(7,4), dpi=100)\r\n f.patch.set_facecolor('#2D2D2D')\r\n graph = f.add_subplot(111)\r\n x = np.arange(len(b))\r\n width = 0.35\r\n rects1 = graph.bar(x - width/2, a, width, label='Your carbon footprint')\r\n rects2 = graph.bar(x + width/2, avg, width, label='Average carbon footprint')\r\n graph.spines[\"bottom\"].set_color(\"white\")\r\n graph.spines[\"left\"].set_color(\"white\")\r\n graph.spines[\"top\"].set_color(\"#2D2D2D\")\r\n graph.spines[\"right\"].set_color(\"#2D2D2D\")\r\n graph.set_facecolor('#2D2D2D')\r\n graph.set_xticks(x)\r\n graph.set_xticklabels(b)\r\n graph.tick_params(axis='x', colors='white')\r\n graph.tick_params(axis='y', colors='white')\r\n graph.set_ylabel('kg of CO2', color = 'white')\r\n graph.legend()\r\n \r\n def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n graph.annotate('{:.2f}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3),\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom', color='white')\r\n autolabel(rects1)\r\n autolabel(rects2)\r\n \r\n result_bar = FigureCanvasTkAgg(f, result)\r\n result_bar.get_tk_widget().place(relx = 0.5, rely = 0.5, anchor = 'center')\r\n result_bar.draw()\r\n \r\n toolbar = NavigationToolbar2Tk(result_bar, result)\r\n toolbar.update()\r\n toolbar.config(background='#2D2D2D')\r\n result_bar.get_tk_widget().place(relx = 0.5, rely = 0.5, anchor = 'center')\r\n \r\n maxi = b[a.index(max(a))]\r\n result_text.set(\"Your \" + maxi + \" emission is too high!. Try to reduce it :)\")\r\n tabControl.select(4)\r\n tabControl.add(suggestion, text = 'Suggestions')\r\n\r\n \r\n# Functions for switching tabs \r\ndef selec0():\r\n tabControl.select(0)\r\ndef selec1():\r\n tabControl.select(1)\r\ndef selec2():\r\n tabControl.select(2)\r\ndef selec3():\r\n tabControl.select(3) \r\ndef selec4():\r\n tabControl.select(4)\r\ndef selec5():\r\n tabControl.select(5)\r\n\r\n# Function for backend calculation \r\ndef calc(var, val, people):\r\n try:\r\n carbon = (float(var) * float(val)) / float(people)\r\n except:\r\n carbon = 0\r\n return round(carbon,2)\r\n\r\n# Graphical user inferface\r\nroot = tk.Tk()\r\nstyle = ThemedStyle(root)\r\nstyle.set_theme(\"plastik\")\r\nstyle.configure('TButton', background = '#C5D961', foreground = '#2D2D2D', focusthickness=3, focuscolor='none')\r\nstyle.map('TButton', background=[('active','#C5D961')])\r\nroot.title(\"CarbonFootprint Calculator\")\r\nroot.iconbitmap('D:/Carbon footprint calculator/logo.ico')\r\nroot.geometry(\"700x600\")\r\nroot['bg'] = '#2D2D2D'\r\n\r\n# Creating tabs\r\ntabControl = ttk.Notebook(root)\r\nwelcome = Frame(tabControl, bg = '#2D2D2D')\r\nhouse = Frame(tabControl, bg = '#2D2D2D') \r\ntransport = Frame(tabControl, bg = '#2D2D2D')\r\nflight = Frame(tabControl, bg = '#2D2D2D')\r\nresult = Frame(tabControl, bg = '#2D2D2D')\r\nsuggestion = Frame(tabControl, bg = '#2D2D2D')\r\n\r\n# Adding tabs\r\ntabControl.add(welcome, text ='Welcome') \r\ntabControl.add(house, text ='House') \r\ntabControl.add(transport, text ='Transport')\r\ntabControl.add(flight, text ='Flight')\r\ntabControl.add(result, text = 'Result')\r\n\r\ntabControl.hide(result)\r\nstyle.configure(\"Tab\", focuscolor=style.configure(\".\")[\"background\"])\r\ntabControl.pack(expand = 1, fill =\"both\")\r\n\r\n# Welcome tab (1st tab)\r\nwelcome_message = tk.Message(welcome, text = '''A carbon footprint is the measure of total greenhouse gas emissions, caused \r\nby an individual or an organization, expressed as carbon dioxide equivalent.\r\nThese greenhouse gases are released by our day-to-day activities including, \r\nbut not limited to, transportation, electricity usage, the dress we wear and even \r\nthe food we eat. We are taking buried carbon in the form of fossil fuels and \r\nreleasing it to the atmosphere again. An increase in greenhouse gas \r\nemissions is the main cause for climate change. Many people are ignorant of \r\nthis and keep adding more carbon to the atmosphere. To limit temperature rise\r\nto 1.5°C, we must reduce our greenhouse gas emissions 7.6% each year for \r\nthe next decade. Each individual must know how much carbon footprint they leave and know \r\nhow much their activity affect the environment, so that they can be aware of \r\ntheir actions. In this project an easy-to-use carbon footprint calculator is \r\ndeveloped. It can be used by anyone to calculate how much carbon they emit. \r\nIt also provides smart suggestions based on their carbon emission and help \r\nthem reduce their carbon footprint. Moving forward in technology is good, but \r\nwe also want to start taking care of our environment, smartly''', bg = '#2D2D2D', fg = 'white').pack(padx = 10, pady = 10)\r\n\r\nduration_label = tk.Label(welcome, text = 'Enter the period this calculation covers:', padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').place(relx = 0.1, rely = 0.78, anchor = 'w')\r\nduration_choice = StringVar(welcome)\r\nduration_list = ['1 Week', '1 Month', '1 Year']\r\nduration_value = [7,30,365]\r\nduration_option = OptionMenu(welcome, duration_choice, *duration_list)\r\nduration_option.config(bg = '#2D2D2D',fg = 'white', activebackground = '#2D2D2D', activeforeground = 'white')\r\nduration_option['menu'].config(bg = '#2D2D2D',fg = 'white', activeforeground = '#2D2D2D', activebackground = 'white')\r\nduration_choice.set(duration_list[2])\r\nduration_option.place(relx = 0.6, rely = 0.75)\r\n\r\n\r\nhouse1_button = Button(welcome, text = \"Next: House\", command = selec1).place(relx = 0.7, rely = 0.9, anchor = 'w')\r\n\r\n# House tab (2nd tab)\r\nhouse1 = LabelFrame(house, text=\"Energy\", bg = '#2D2D2D', fg = 'white', padx = 10, pady =5) \r\nhouse1.pack(fill=\"both\", expand=\"yes\")\r\n\r\npeople_label = tk.Label(house1, text = \"No of persons in the house:\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=0, column=0)\r\npeople_entry = tk.Spinbox(house1,from_ = 1, to = 10, width = 8)\r\npeople_entry.grid(row = 0, column=1)\r\n\r\nelec_label = tk.Label(house1, text = \"Electricity (in kWh):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=1, column=0)\r\nelec_entry = Entry(house1, width = 10)\r\nelec_entry.grid(row = 1, column=1)\r\n\r\nlpg_label = tk.Label(house1, text = \"Number of LPG cylinders used:\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=2, column=0)\r\nlpg_spin = ttk.Spinbox(house1,from_ = 1, to = 10, width = 8)\r\nlpg_spin.grid(row = 2, column=1)\r\nlpg_combo = ttk.Combobox(house1, value= [14.2,5,19,47.5], width = 8)\r\nlpg_combo.bind(\"<<ComboboxSelected>>\", lpg_func)\r\nlpg_combo.set(14.2)\r\nlpg_combo.grid(row = 2, column = 2, padx = 10)\r\n\r\nhouse2 = LabelFrame(house, text=\"Food\", bg = '#2D2D2D', fg = 'white', padx = 10, pady =5) \r\nhouse2.pack(fill=\"both\", expand=\"yes\")\r\n\r\nbeef_label = tk.Label(house2, text = \"Beef (in kg):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=0, column=0)\r\nbeef_entry = Entry(house2, width = 10)\r\nbeef_entry.grid(row = 0, column=1)\r\n\r\ncheese_label = tk.Label(house2, text = \"Cheese (in g):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=1, column=0)\r\ncheese_entry = Entry(house2, width = 10)\r\ncheese_entry.grid(row = 1, column=1)\r\n\r\nchicken_label = tk.Label(house2, text = \"Chicken (in kg):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=2, column=0)\r\nchicken_entry = Entry(house2, width = 10)\r\nchicken_entry.grid(row = 2, column=1)\r\n\r\nfish_label = tk.Label(house2, text = \"fish (in kg):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=3, column=0)\r\nfish_entry = Entry(house2, width = 10)\r\nfish_entry.grid(row = 3, column=1)\r\n\r\nwelcome_button = Button(house, text = \"Back: Welcome\", command = selec0).place(relx = 0.3, rely = 0.9, anchor = 'e')\r\nfinal_button = Button(house, text = \"Calculate Carbon Footprint\", command = button_pressed).place(relx = 0.5, rely = 0.9, anchor = 'center')\r\ntransport_button = Button(house, text = \"Next: Transport\", command = selec2).place(relx = 0.7, rely = 0.9, anchor = 'w')\r\n\r\n# Transport tab (3rd tab)\r\nlabelframe1 = LabelFrame(transport, text=\"Personal Vehicle\", bg = '#2D2D2D', fg = 'white', padx = 10, pady =5) \r\nlabelframe1.pack(fill=\"both\", expand=\"yes\") \r\n\r\nmile1_label = tk.Label(labelframe1, text = \"Milage (vehicle 1)\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=0, column=0)\r\nmile1_entry = Entry(labelframe1, width = 10)\r\nmile1_entry.grid(row = 0, column=1)\r\n\r\nveh1_var = DoubleVar()\r\nveh1_var.set(2.68)\r\nveh1_R1 = Radiobutton(labelframe1, text=\"Diesel\",variable = veh1_var, value=2.68, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\nveh1_R1.grid(row = 0, column = 2, padx = 10)\r\nveh1_R2 = Radiobutton(labelframe1, text=\"Petrol\",variable = veh1_var, value=2.31, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\nveh1_R2.grid(row = 0, column = 3, padx = 10)\r\n\r\ndist1_label = tk.Label(labelframe1, text = \"Distance (vehicle 1)\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=1, column=0)\r\ndist1_entry = Entry(labelframe1, width = 10)\r\ndist1_entry.grid(row = 1, column=1)\r\n\r\nmile2_label = tk.Label(labelframe1, text = \"Milage (vehicle 2)\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=2, column=0)\r\nmile2_entry = Entry(labelframe1, width = 10)\r\nmile2_entry.grid(row = 2, column=1)\r\n\r\nveh2_var = DoubleVar()\r\nveh2_var.set(2.68)\r\nveh2_R1 = Radiobutton(labelframe1, text=\"Diesel\",variable = veh2_var, value=2.68, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\nveh2_R1.grid(row = 2, column = 2, padx = 10)\r\nveh2_R2 = Radiobutton(labelframe1, text=\"Petrol\",variable = veh2_var, value=2.31, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\nveh2_R2.grid(row = 2, column = 3, padx = 10)\r\n\r\ndist2_label = tk.Label(labelframe1, text = \"Distance (vehicle 2)\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=3, column=0)\r\ndist2_entry = Entry(labelframe1, width = 10)\r\ndist2_entry.grid(row = 3, column=1)\r\n\r\nlabelframe2 = LabelFrame(transport, text=\"Public transport\", bg = '#2D2D2D', fg = 'white', padx = 10, pady =5) \r\nlabelframe2.pack(fill=\"both\", expand=\"yes\")\r\n\r\nbus_label = tk.Label(labelframe2, text = \"Bus (in km):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=0, column=0)\r\nbus_entry = Entry(labelframe2, width = 10)\r\nbus_entry.grid(row = 0, column=1)\r\n\r\nlocal_train_label = tk.Label(labelframe2, text = \"Local Train (in km):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=1, column=0)\r\nlocal_train_entry = Entry(labelframe2, width = 10)\r\nlocal_train_entry.grid(row = 1, column=1)\r\n\r\ntrain_label = tk.Label(labelframe2, text = \"Train (in km):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=2, column=0)\r\ntrain_entry = Entry(labelframe2, width = 10)\r\ntrain_entry.grid(row = 2, column=1)\r\n\r\nhouse_button = Button(transport, text = \"Back: House\", command = selec1).place(relx = 0.3, rely = 0.9, anchor = 'e')\r\nfinal_button = Button(transport, text = \"Calculate Carbon Footprint\", command = button_pressed).place(relx = 0.5, rely = 0.9, anchor = 'center')\r\nflight_button = Button(transport, text = \"Next: Flight\", command = selec3).place(relx = 0.7, rely = 0.9, anchor = 'w')\r\n\r\n# Flight tab (4th Tab)\r\nflight1 = LabelFrame(flight, text=\"Trip 1\", bg = '#2D2D2D', fg = 'white', padx = 10, pady =5) \r\nflight1.pack(fill=\"both\")\r\n\r\nsrc_label = tk.Label(flight1, text = \"Source (IATA Code):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=0, column=0)\r\nsrc_combo = ttk.Combobox(flight1, value = iata_codes[1:] , width = 10)\r\nsrc_combo.bind(\"<<ComboboxSelected>>\", flight_func)\r\nsrc_combo.grid(row = 0, column = 1, padx = 10, pady = 10)\r\n\r\ndest_label = tk.Label(flight1, text = \"Destination (IATA Code):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=1, column=0)\r\ndest_combo = ttk.Combobox(flight1, value = iata_codes[1:] , width = 10)\r\ndest_combo.bind(\"<<ComboboxSelected>>\", flight_func)\r\ndest_combo.grid(row = 1, column = 1, padx = 10, pady = 10)\r\n\r\nclass_choice = StringVar(flight1)\r\nclass_label = tk.Label(flight1, text = \"Class:\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=2, column=0)\r\nclass_name = ['Economy', 'Business', 'First Class']\r\nclass_value = [1,2.37,3]\r\nflight_class = OptionMenu(flight1, class_choice, *class_name)\r\nflight_class.config(bg = '#2D2D2D',fg = 'white', activebackground = '#2D2D2D', activeforeground = 'white')\r\nflight_class['menu'].config(bg = '#2D2D2D',fg = 'white', activeforeground = '#2D2D2D', activebackground = 'white')\r\nclass_choice.set(class_name[0])\r\nflight_class.grid(row = 2, column = 1, padx = 10, pady = 10)\r\n\r\ntrip_var = DoubleVar()\r\ntrip_var.set(1)\r\nreturn_trip = Radiobutton(flight1, text=\"Return trip\",variable = trip_var, value=1, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\nreturn_trip.grid(row = 2, column = 2, padx = 10)\r\none_way_trip = Radiobutton(flight1, text=\"One Way trip\",variable = trip_var, value=0.5, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\none_way_trip.grid(row = 2, column = 3, padx = 10)\r\n\r\npassenger_label = tk.Label(flight1, text = \"Number of Passengers:\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=3, column=0)\r\npassenger_spin = ttk.Spinbox(flight1,from_ = 1, to = 10, width = 10)\r\npassenger_spin.grid(row = 3, column=1)\r\n\r\ntrip_label = tk.Label(flight1, text = \"Number of trips\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=3, column=2)\r\ntrip_spin = ttk.Spinbox(flight1,from_ = 1, to = 10, width = 10)\r\ntrip_spin.grid(row = 3, column=3)\r\n\r\n\r\nflight2 = LabelFrame(flight, text=\"Trip 2\", bg = '#2D2D2D', fg = 'white', padx = 10, pady =5) \r\nflight2.pack(fill=\"both\", expand='yes')\r\n\r\nsrc_label2 = tk.Label(flight2, text = \"Source (IATA Code):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=0, column=0)\r\nsrc_combo2 = ttk.Combobox(flight2, value = iata_codes[1:] , width = 10)\r\nsrc_combo2.bind(\"<<ComboboxSelected>>\", flight_func)\r\nsrc_combo2.grid(row = 0, column = 1, padx = 10, pady = 10)\r\n\r\ndest_label2 = tk.Label(flight2, text = \"Destination (IATA Code):\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=1, column=0)\r\ndest_combo2 = ttk.Combobox(flight2, value = iata_codes[1:] , width = 10)\r\ndest_combo2.bind(\"<<ComboboxSelected>>\", flight_func)\r\ndest_combo2.grid(row = 1, column = 1, padx = 10, pady = 10)\r\n\r\nclass_choice2 = StringVar(flight2)\r\nclass_label2 = tk.Label(flight2, text = \"Class:\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=2, column=0)\r\nflight_class2 = OptionMenu(flight2, class_choice2, *class_name)\r\nflight_class2.config(bg = '#2D2D2D',fg = 'white', activebackground = '#2D2D2D', activeforeground = 'white')\r\nflight_class2['menu'].config(bg = '#2D2D2D',fg = 'white', activeforeground = '#2D2D2D', activebackground = 'white')\r\nclass_choice2.set(class_name[0])\r\nflight_class2.grid(row = 2, column = 1, padx = 10, pady = 10)\r\n\r\ntrip_var2 = DoubleVar()\r\ntrip_var2.set(1)\r\nreturn_trip2 = Radiobutton(flight2, text=\"Return trip\",variable = trip_var2, value=1, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\nreturn_trip2.grid(row = 2, column = 2, padx = 10)\r\none_way_trip2 = Radiobutton(flight2, text=\"One Way trip\",variable = trip_var2, value=0.5, bg = '#2D2D2D', fg = 'white', activebackground = '#2D2D2D',activeforeground= 'white',selectcolor = '#2D2D2D')\r\none_way_trip2.grid(row = 2, column = 3, padx = 10)\r\n\r\npassenger_label2 = tk.Label(flight2, text = \"Number of Passengers:\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=3, column=0)\r\npassenger_spin2 = ttk.Spinbox(flight2,from_ = 1, to = 10, width = 10)\r\npassenger_spin2.grid(row = 3, column=1)\r\n\r\ntrip_label2 = tk.Label(flight2, text = \"Number of trips\", padx = 20, pady =10, bg = '#2D2D2D', fg = 'white').grid(row=3, column=2)\r\ntrip_spin2 = ttk.Spinbox(flight2,from_ = 1, to = 10, width = 10)\r\ntrip_spin2.grid(row = 3, column=3)\r\n\r\ntransport_button_button = Button(flight, text = \"Back: Transport\", command = selec2).place(relx = 0.3, rely = 0.9, anchor = 'e')\r\nfinal_button = Button(flight, text = \"Calculate Carbon Footprint\", command = button_pressed).place(relx = 0.5, rely = 0.9, anchor = 'center')\r\n\r\n# Result tab (5th tab)\r\nresult_text = tk.StringVar()\r\nresult_text.set(\"Enter the values to calculate your Carbon footprint\")\r\nfinal_text = tk.StringVar()\r\nfinal_text.set(\"Calculate Carbon Footprint\")\r\n\r\nresult_message = tk.Label(result, textvariable = result_text, bg = '#2D2D2D', fg = 'white').place(relx = 0.5, rely = 0.05, anchor = 'center')\r\nfinal_message = tk.Label(result, textvariable = final_text, bg = '#2D2D2D', fg = 'white').place(relx = 0.5, rely = 0.1, anchor = 'center')\r\n\r\n\r\n\r\n# Suggestion tab (6th tab)\r\n'''suggestion_text = tk.StringVar()\r\nsuggestion_text.set(\"Calculate Carbon Footprint\")\r\nsuggestion_message = tk.Text(suggestion, textvariable = suggestion_text, bg = '#2D2D2D', fg = 'white').place(relx = 0.5, rely = 0.05, anchor = 'center')'''\r\n\r\nroot.mainloop()\r\n" } ]
1
stevengritz/street-tree-map
https://github.com/stevengritz/street-tree-map
6e5d1755ece5c85e9f817d246e13930eda0ab365
77c319c963db888b8b8d5d026a77da71a57e84a1
266f3b186028626be5fcfc5e00c479405964fd69
refs/heads/master
2020-03-28T22:13:14.920477
2018-09-18T03:05:21
2018-09-18T03:05:21
149,215,189
0
1
null
2018-09-18T02:11:03
2018-09-18T02:11:05
2018-09-18T03:05:22
JavaScript
[ { "alpha_fraction": 0.6445395946502686, "alphanum_fraction": 0.6659528613090515, "avg_line_length": 28.1875, "blob_id": "3aff023652b159967578716079fa4b18fccb9949", "content_id": "304f6f368066629e533046865b2f8f356ffc269c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 79, "num_lines": 16, "path": "/load_data.py", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import sqlite3\nimport pandas as pd\n\nsm_tree_data_url = 'https://data.smgov.net/resource/w8ue-6cnd.csv?$limit=50000'\nconn = sqlite3.connect('database.sqlite3')\ndownload = False \n\nif download:\n import requests\n data = requests.get(sm_tree_data_url)\n with open('trees.csv', 'w') as f:\n f.write(data.text)\n\ndf = pd.read_csv('./trees.csv')\ndf = df.rename(lambda x: x.replace(' ', '_').lower(), axis='columns')\ndf.to_sql('trees', conn, if_exists='replace')\n" }, { "alpha_fraction": 0.6117647290229797, "alphanum_fraction": 0.6117647290229797, "avg_line_length": 28.864864349365234, "blob_id": "78b43ca5b5f43208d3b5c8752870523a238b0312", "content_id": "d6420c48d92e67d669281b2fc1a6d90dc4a03c2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1105, "license_type": "no_license", "max_line_length": 64, "num_lines": 37, "path": "/street_tree_map/schema.py", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import graphene\nfrom graphene import relay\nfrom graphene_sqlalchemy import SQLAlchemyObjectType\nfrom .models import db_session, Tree as TreeModel\n\n\nclass Tree(SQLAlchemyObjectType):\n class Meta:\n model = TreeModel\n\n\nclass Query(graphene.ObjectType):\n all_trees = graphene.List(Tree,\n maxLat=graphene.Float(),\n minLat=graphene.Float(),\n maxLon=graphene.Float(),\n minLon=graphene.Float()\n )\n\n def resolve_all_trees(self, info, **kwargs):\n query = Tree.get_query(info)\n max_lat = kwargs.get('maxLat')\n min_lat = kwargs.get('minLat')\n max_lon = kwargs.get('maxLon')\n min_lon = kwargs.get('minLon')\n if max_lat:\n query = query.filter(TreeModel.latitude <= max_lat)\n if min_lat:\n query = query.filter(TreeModel.latitude >= min_lat)\n if max_lon:\n query = query.filter(TreeModel.longitude <= max_lon)\n if min_lon:\n query = query.filter(TreeModel.longitude >= min_lon)\n\n return query.all()\n\nschema = graphene.Schema(query=Query)\n" }, { "alpha_fraction": 0.5772265791893005, "alphanum_fraction": 0.5851183533668518, "avg_line_length": 30.122806549072266, "blob_id": "28794b14d85e96954b97ce9ffed166b98c80310b", "content_id": "18e5370379df137761872c0633a8c8cf23e86af5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1774, "license_type": "no_license", "max_line_length": 66, "num_lines": 57, "path": "/src/StatsContainer.js", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport './StatsContainer.css';\nimport { Row, Column } from 'react-foundation';\n\nexport default class StatsContainer extends Component {\n constructor(props) {\n super(props);\n this.state = {\n species: 'CA SYCAMORE',\n speciesTotal: 324,\n urbanForest: 'SM URBAN FOREST',\n perTotal: '1%',\n family: 'PLANE TREES',\n familyTotal: 1145,\n treePerTotal: '4%',\n treeCount: 14500,\n };\n }\n\n //first view is total trees in view\n //% of total native\n //# of species in view\n //% of species native\n //# of families in view\n //onClick to specific tree will show above stats for tree type\n\n render() {\n return (\n <Row className=\"StatContainer\">\n <Column className=\"box\">\n <div className=\"stat\">{this.state.treeCount}</div>\n <div className=\"statInfo\">TOTAL TREES IN VIEW</div>\n <div />\n </Column>\n <Column className=\"box\">\n <div className=\"stat\">{this.state.speciesTotal}</div>\n <div className=\"statInfo\">{this.state.species}</div>\n <div className=\"KPCOFGS\">Species</div>\n </Column>\n <Column className=\"box\">\n <div className=\"stat\">{this.state.perTotal}</div>\n <div className=\"statInfo\">% OF TOTAL NATIVE</div>\n </Column>\n <Column className=\"box\">\n <div className=\"stat\">{this.state.familyTotal}</div>\n <div className=\"statInfo\">{this.state.family}</div>\n <div className=\"KPCOFGS\">Family</div>\n </Column>\n <Column className=\"box\">\n <div className=\"stat\">{this.state.treePerTotal}</div>\n <div className=\"statInfo\">{this.state.urbanForest}</div>\n <div />\n </Column>\n </Row>\n );\n }\n}\n" }, { "alpha_fraction": 0.7541290521621704, "alphanum_fraction": 0.7745504975318909, "avg_line_length": 78.2967758178711, "blob_id": "0913935af4c790b9c0c33b6b433c40691bc9199a", "content_id": "72e23a3cd4e59a70c3b0f57ad18c36f81074b21f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12299, "license_type": "no_license", "max_line_length": 318, "num_lines": 155, "path": "/README.md", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "# street-tree-map\n\nStreet Tree Map uses open datasets to document publicly owned street trees + landmark trees in Santa Monica, California\n\n## Protocol for pull requests + code review\n\n- Please review open issues and link your pull reuqest to the relevant issue.\n- Please create new branch __within this repo__ !\n- In your pull request, please list and explain all proposed changes to the code base (additions, deletions). If you reuse code from elsewhere, please make sure you've attributed it.\n- Please apply all relevant labels to your pull request.\n- Please request a review (either from a specific person or from the slack channel).\n- Reviewers: please review all proposed changes, write comments and questions in line notes. Please review all updates made at your request.\n- Reviewer and requester: please confirm with each other that the PR is ready to merge. Please make sure that the PR branch name documents the new changes. Please squash and merge.\n\n## Running locally\n\nRunning this requires Python 3 and nodejs.\n\n1. Install the Python requirements:\n\n```bash\npip install -r requirements.txt\n```\n\n2. Install the Javascript requirements:\n\n```bash\nnpm install\n```\n\n3. Build the project and install it into the flask app:\n\n```bash\nnpm run flask:build\n```\n\n4. Start the application\n\n```bash\npython run.py\n```\n\n5. Navigate to `localhost:5000` in your browser.\n\n## Data Sources\n\n- [Biodiversity Heritage Library - API documentation](https://www.biodiversitylibrary.org/api2/docs/docs.html)\n- [Calflora](http://www.calflora.org/)\n- [California Invasive Plant Council](http://www.cal-ipc.org/plants/inventory/)\n- [Canopy Tree Library](https://canopy.org/tree-info/canopy-tree-library/)\n- [Encyclopedia of Life (EOL) - API documentation](http://eol.org/api)\n- [Google Street View - API documentation](https://developers.google.com/maps/documentation/streetview/)\n- [iNaturalist - API documentation](https://www.inaturalist.org/pages/api+reference)\n- [Implementing and managing urban forests: A much needed conservation strategy to increase ecosystem services and urban wellbeing - open access PDF](https://www.sciencedirect.com/science/article/pii/S0304380017300960?via%3Dihub)\n- [IUCN Red List of Threatened Species - API documentation](http://apiv3.iucnredlist.org/)\n- [The Jepson Herbarium eFlora](http://ucjeps.berkeley.edu/eflora/)\n- [LA City - Urban Forestry Division - Street Tree Selection Guide (Bureau of Street Services)](http://bss.lacity.org/urbanforestry/streettreeselectionguide.htm)\n- [LA Times' Neighborhood Profile](http://maps.latimes.com/neighborhoods/neighborhood/santa-monica/)\n- [Missouri Botanical Garden - Plant Finder](http://www.missouribotanicalgarden.org/plantfinder/plantfindersearch.aspx)\n- [The Plant List](http://www.theplantlist.org/)\n- [Santa Monica - Open Data - Neighborhood Organization Boundaries](https://data.smgov.net/Public-Assets/Neighborhood-Organization-Boundaries/juzu-tcbz/data)\n- [Santa Monica - Open Data - Trees](https://data.smgov.net/Public-Assets/Trees/ekya-mi9c)\n- [Santa Monica - Open Data - Trees Inventory](https://data.smgov.net/Public-Assets/Trees-Inventory/w8ue-6cnd)\n- [Santa Monica - Urban Forest - Heritage Trees](https://www.smgov.net/Portals/UrbanForest/content.aspx?id=53687092939)\n- [Santa Monica - Urban Forest - Landmark Trees](https://www.smgov.net/Portals/UrbanForest/content.aspx?id=53687091867)\n- [Santa Monica - Urban Forest - Watering Frequencies for Mature Trees PDF (pp9-13)](https://www.smgov.net/uploadedFiles/Portals/UrbanForest/FINAL%20Trees%20Watering%20Guidelines.pdf)\n- [Santa Monica - Urban Forest - Watering Street Trees in Santa Monica - Water Requirements by Species PDF (pp13-19)](https://www.smgov.net/uploadedFiles/Portals/UrbanForest/Maintenance/WateringStreetTrees.pdf)\n- [SelecTree - CalPoly](https://selectree.calpoly.edu/)\n- [Theodore Payne Foundation - California Native Plant Database](http://www.theodorepayne.org/mediawiki/index.php?title=California_Native_Plant_Library)\n\n## Tree attributes and current sources\n\n- [List of attribute fields and views for our application - gist](https://gist.github.com/Reltre/6554dfc430986803553d84742f1b88a9)\n- Initial views (desktop):\n - 1 - no tree selected (home/map view)\n - 2 - native CFP tree species view\n - 3 - non-native tree species view\n - 4 - Washingtonia filifera (only native CFP palm species) view\n - 5 - non-native palm species view\n - 6 - tree family view\n- Species Imagery - [Encyclopedia of Life](http://eol.org/api)\n- CA Native status - [Calflora.org](www.calflora.org) and [Theodore Payne Foundation](http://www.theodorepayne.org/mediawiki/index.php?title=California_Native_Plant_Library)\n- Nearest Address, GPS Coordinates, Height Range, Trunk Diameter (DBH) Range, Tree ID - [Trees Inventory - Santa Monica Open Data](https://data.smgov.net/Public-Assets/Trees-Inventory/w8ue-6cnd)\n- Geographic Range description (countries occurrence), IUCN Red List Status - [IUCN Red List API - v3](http://apiv3.iucnredlist.org/)\n- Recommended Watering Frequency - [City of Santa Monica Public Works Department PDF (pp9-13)](https://www.smgov.net/uploadedFiles/Portals/UrbanForest/FINAL%20Trees%20Watering%20Guidelines.pdf)\n- Species Growth, Shade Production, Shedability, Spread, Trunk clearnance - [Canopy Tree Library](https://canopy.org/tree-info/canopy-tree-library/)\n- Street View Imagery - [Google Street View](https://developers.google.com/maps/documentation/streetview/)\n- For CA native species:\n- Species Height by Width, Native Distribution, Native Habitat - [Theodore Payne Foundation](http://www.theodorepayne.org/mediawiki/index.php?title=California_Native_Plant_Library)\n- For non-native tree species:\n- Invasive Status - [California Invasive Plant Council](https://www.cal-ipc.org/plants/inventory/)\n- Our public [google drive folder](https://drive.google.com/drive/u/1/folders/1PfSpH5yuydJEK-sD-PPTXcj9jHA6QLi4)\n\n## Related projects/inspiration\n\n- [A Very Detailed, Interactive Map of Chicago’s Tree Canopy (Atlas Obscura)](https://www.atlasobscura.com/articles/chicago-tree-canopy-map-2017)\n- [Arnold Arboretum map explorer](https://arboretum.harvard.edu/explorer/?utm_source=topnav&utm_medium=nav&utm_campaign=top-menu-map)\n- [Canopy](https://github.com/seeread/canopy) and descriptive [blog post](http://www.datakind.org/projects/out-on-a-limb-for-data) from DataKind\n- [The effects of urban trees on air quality - USDA 2002 PDF](https://www.nrs.fs.fed.us/units/urban/local-resources/downloads/Tree_Air_Qual.pdf)\n- [i-Tree](https://www.itreetools.org/)\n- [Increased home size and hardscape decreases urban forest cover in Los Angeles County’s single-family residential neighborhoods PDF](http://johnwilson.usc.edu/wp-content/uploads/2018/03/Increased-home-size-and-hardscape-decreases-urban-forest-cover-in-Los-Angeles-Countys-single-family-residential-neighborhoods.pdf)\n- [Jill Hubley's NYC street tree map](https://github.com/jhubley/street-trees)\n- [Melbourne - Urban Forest Visual](http://melbourneurbanforestvisual.com.au/)\n- [Minimum Requirements for an Arborist Report - City of Atlanta PDF](https://www.atlantaga.gov/home/showdocument?id=20151)\n- [NYC Parks' New York City Street Tree Map](https://tree-map.nycgovparks.org/)\n- [The Need to Standardize At-planting Data PDF](https://urbanforestry.indiana.edu/doc/publications/2015-need-to-standardize.pdf)\n- [Pasadena Beautiful Foundation's Endangered Trees List](http://www.pasadenabeautiful.org/green-links/endangered-trees-list/)\n- [Rancho Santa Ana Botanic Garden - app (Guru LLC)](https://itunes.apple.com/us/app/rancho-santa-ana-botanic-garde/id1389785599?mt=8)\n- [RegisTree](http://www.vision.caltech.edu/registree/)\n- [Santa Monica's Top 15 Tree Speices PDF (2010)](http://csmgisweb.smgov.net/docs/mapcatalog/trees.pdf)\n- [TreeMapLA](https://www.opentreemap.org/latreemap/map/)\n- [Urban Tree Growth & Longevity (UTGL) Working Group - Urban Tree Monitoring Protocols Field Guide](http://www.urbantreegrowth.org/field-guide.html)\n- [We calculated how much money trees save for your city - The Conversation](http://theconversation.com/we-calculated-how-much-money-trees-save-for-your-city-95198)\n\n* [List of attribute fields and views for our application - gist](https://gist.github.com/Reltre/6554dfc430986803553d84742f1b88a9)\n* Initial views (desktop):\n - 1 - no tree selected (home/map view)\n - 2 - native CFP tree species view\n - 3 - non-native tree species view\n - 4 - Washingtonia filifera (only native CFP palm species) view\n - 5 - non-native palm species view\n - 6 - tree family view\n* Species Imagery - [Encyclopedia of Life](http://eol.org/api)\n* CA Native status - [Calflora.org](www.calflora.org) and [Theodore Payne Foundation](http://www.theodorepayne.org/mediawiki/index.php?title=California_Native_Plant_Library)\n* Nearest Address, GPS Coordinates, Height Range, Trunk Diameter (DBH) Range, Tree ID - [Trees Inventory - Santa Monica Open Data](https://data.smgov.net/Public-Assets/Trees-Inventory/w8ue-6cnd)\n* Geographic Range description (countries occurrence), IUCN Red List Status - [IUCN Red List API - v3](http://apiv3.iucnredlist.org/)\n* Recommended Watering Frequency - [City of Santa Monica Public Works Department PDF (pp9-13)](https://www.smgov.net/uploadedFiles/Portals/UrbanForest/FINAL%20Trees%20Watering%20Guidelines.pdf)\n* Species Growth, Shade Production, Shedability, Spread, Trunk clearnance - [Canopy Tree Library](https://canopy.org/tree-info/canopy-tree-library/)\n* Street View Imagery - [Google Street View](https://developers.google.com/maps/documentation/streetview/)\n* For CA native species:\n* Species Height by Width, Native Distribution, Native Habitat - [Theodore Payne Foundation](http://www.theodorepayne.org/mediawiki/index.php?title=California_Native_Plant_Library)\n* For non-native tree species:\n* Invasive Status - [California Invasive Plant Council](https://www.cal-ipc.org/plants/inventory/)\n\n## Related projects/inspiration\n\n- [A Very Detailed, Interactive Map of Chicago’s Tree Canopy (Atlas Obscura)](https://www.atlasobscura.com/articles/chicago-tree-canopy-map-2017)\n- [Arnold Arboretum map explorer](https://arboretum.harvard.edu/explorer/?utm_source=topnav&utm_medium=nav&utm_campaign=top-menu-map)\n- [Canopy](https://github.com/seeread/canopy) and descriptive [blog post](http://www.datakind.org/projects/out-on-a-limb-for-data) from DataKind\n- [The effects of urban trees on air quality - USDA 2002 PDF](https://www.nrs.fs.fed.us/units/urban/local-resources/downloads/Tree_Air_Qual.pdf)\n- [i-Tree](https://www.itreetools.org/)\n- [Increased home size and hardscape decreases urban forest cover in Los Angeles County’s single-family residential neighborhoods PDF](http://johnwilson.usc.edu/wp-content/uploads/2018/03/Increased-home-size-and-hardscape-decreases-urban-forest-cover-in-Los-Angeles-Countys-single-family-residential-neighborhoods.pdf)\n- [Jill Hubley's NYC street tree map](https://github.com/jhubley/street-trees)\n- [Melbourne - Urban Forest Visual](http://melbourneurbanforestvisual.com.au/)\n- [Minimum Requirements for an Arborist Report - City of Atlanta PDF](https://www.atlantaga.gov/home/showdocument?id=20151)\n- [NYC Parks' New York City Street Tree Map](https://tree-map.nycgovparks.org/)\n- [The Need to Standardize At-planting Data PDF](https://urbanforestry.indiana.edu/doc/publications/2015-need-to-standardize.pdf)\n- [Pasadena Beautiful Foundation's Endangered Trees List](http://www.pasadenabeautiful.org/green-links/endangered-trees-list/)\n- [Rancho Santa Ana Botanic Garden - app (Guru LLC)](https://itunes.apple.com/us/app/rancho-santa-ana-botanic-garde/id1389785599?mt=8)\n- [RegisTree](http://www.vision.caltech.edu/registree/)\n- [Santa Monica's Top 15 Tree Speices PDF (2010)](http://csmgisweb.smgov.net/docs/mapcatalog/trees.pdf)\n- [TreeMapLA](https://www.opentreemap.org/latreemap/map/)\n- [Urban Tree Growth & Longevity (UTGL) Working Group - Urban Tree Monitoring Protocols Field Guide](http://www.urbantreegrowth.org/field-guide.html)\n- [West Coast Arborists (WCA)](https://westcoastarborists.com/services) - see the description of ArborAccess, WCA's GPS tree inventory service for their clients\n- [We calculated how much money trees save for your city - The Conversation](http://theconversation.com/we-calculated-how-much-money-trees-save-for-your-city-95198)\n" }, { "alpha_fraction": 0.6385836601257324, "alphanum_fraction": 0.6398046612739563, "avg_line_length": 34.60869598388672, "blob_id": "a1f743c05bcffce345de4ef09da24ff7bbc73d5b", "content_id": "922c0d7b5a8da531ff961f593d6b22ca8392686c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/street_tree_map/models.py", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "from sqlalchemy import *\nfrom sqlalchemy.orm import (scoped_session, sessionmaker, relationship,\n backref)\nfrom sqlalchemy.ext.declarative import declarative_base\n\nengine = create_engine('sqlite:///database.sqlite3', convert_unicode=True)\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n\nBase = declarative_base()\n# We will need this for querying\nBase.query = db_session.query_property()\n\nclass Tree(Base):\n __tablename__ = 'trees'\n tree_id = Column(Integer, primary_key=True)\n name_botanical = Column(String)\n name_common = Column(String)\n latitude = Column(Float)\n longitude = Column(Float)\n address = Column(String)\n street = Column(String)\n" }, { "alpha_fraction": 0.654275119304657, "alphanum_fraction": 0.654275119304657, "avg_line_length": 17.55172348022461, "blob_id": "910e64cd0a0ac37c11e4d653e94f4acc57afca11", "content_id": "5b400976d7184779f93d391698a6c31b3b5bbd2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 57, "num_lines": 29, "path": "/street_tree_map/app.py", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import flask\nfrom flask_graphql import GraphQLView\n\nfrom .models import db_session\nfrom .schema import schema\n\napp = flask.Flask(__name__)\napp.debug = True\n\napp.add_url_rule(\n '/graphql',\n view_func=GraphQLView.as_view(\n 'graphql',\n schema=schema,\n graphiql=True # for having the GraphiQL interface\n )\n)\n\[email protected]('/')\ndef index():\n return flask.send_file('./index.html')\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\nif __name__ == '__main__':\n app.run()\n" }, { "alpha_fraction": 0.7361111044883728, "alphanum_fraction": 0.7361111044883728, "avg_line_length": 17, "blob_id": "95981c1387124149bb0488946198af76f65f2b29", "content_id": "16c5a914c986f2a5ca52940d303fbf1b1f5fa2d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/run.py", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport street_tree_map\nstreet_tree_map.app.run()\n" }, { "alpha_fraction": 0.5691669583320618, "alphanum_fraction": 0.5767056345939636, "avg_line_length": 24.757282257080078, "blob_id": "13376be98d23912c93928a5408c823e8eca2c013", "content_id": "b3a038ee6cf45f3e80006d5646212e594ca5dd24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2653, "license_type": "no_license", "max_line_length": 189, "num_lines": 103, "path": "/src/Leaflet.js", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport { Map, TileLayer, Marker } from 'react-leaflet';\nimport './Leaflet.css';\n\nconst GRAPHQL_URL = 'graphql';\nconst QUERY =\n '{ allTrees(maxLat: {{maxLat}}, minLat: {{minLat}}, minLon: {{minLon}}, maxLon: {{maxLon}}) { latitude longitude nameCommon address street } }';\n\nconst TILE_URL = 'https://{s}.tile.openstreetmap.se/hydda/full/{z}/{x}/{y}.png';\nconst MAX_ZOOM = 18;\nconst ATTRIBUTION =\n 'Tiles courtesy of <a href=\"http://openstreetmap.se/\" target=\"_blank\">OpenStreetMap Sweden</a> &mdash; Map data &copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a>';\n\nexport default class LeafletWrapper extends Component {\n constructor() {\n super();\n this.state = {\n view: {\n center: [34.02, -118.48],\n zoom: 14,\n },\n markers: [],\n };\n this._debouncer = undefined;\n }\n\n componentDidMount() {\n this._fetchData(this.state.view);\n }\n\n componentWillUnmount() {\n if (this._debouncer) {\n window.clearTimeout(this._debouncer);\n }\n }\n\n _fetchData(viewport) {\n const bounds = this.refs.map.leafletElement.getBounds();\n const sw = bounds.getSouthWest();\n const ne = bounds.getNorthEast();\n const query = QUERY.split('{{minLon}}')\n .join(sw.lng)\n .split('{{maxLon}}')\n .join(ne.lng)\n .split('{{minLat}}')\n .join(sw.lat)\n .split('{{maxLat}}')\n .join(ne.lat);\n const headers = new Headers();\n headers.append('content-type', 'application/json');\n fetch(GRAPHQL_URL, {\n body: JSON.stringify({ query }),\n method: 'POST',\n cache: 'no-cache',\n headers,\n credentials: 'include',\n }).then(response => {\n response.json().then(json => this._setData(json.data));\n });\n }\n\n _setData(data) {\n const trees = data.allTrees;\n const markers = trees\n .map(t => [t.latitude, t.longitude])\n .filter((t, i) => i % 100 === 0);\n const state = {\n ...this.state,\n markers,\n };\n this.setState(state);\n }\n\n onViewportChanged(viewport) {\n if (this._debouncer) {\n window.clearTimeout(this._debouncer);\n }\n this._debouncer = window.setTimeout(() => {\n this._fetchData(viewport);\n }, 300);\n }\n\n render() {\n const markerList = this.state.markers.map(m => {\n return <Marker position={m} />;\n });\n\n return (\n <Map\n ref=\"map\"\n viewport={this.state.view}\n onViewportChanged={v => this.onViewportChanged(v)}\n >\n <TileLayer\n attribution={ATTRIBUTION}\n url={TILE_URL}\n maxZoom={MAX_ZOOM}\n />\n {markerList}\n </Map>\n );\n }\n}\n" }, { "alpha_fraction": 0.5520833134651184, "alphanum_fraction": 0.71875, "avg_line_length": 18.200000762939453, "blob_id": "9e51477c1288ec5dc61fcf526b539aead25a3203", "content_id": "8840d2f03327195c14c745893c5492b0112983b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/requirements.txt", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "flask==1.0.2\nsqlalchemy==1.2.10\nflask-graphql==2.0.0\ngraphene==2.1.3\ngraphene_sqlalchemy==2.1.0\n" }, { "alpha_fraction": 0.496724009513855, "alphanum_fraction": 0.496724009513855, "avg_line_length": 43.418182373046875, "blob_id": "879121a7faa3c6d549f2d5ee0ce3b4da9d12763c", "content_id": "e53a46f32b42bc47118037fec04bf9e0cfafefa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2442, "license_type": "no_license", "max_line_length": 110, "num_lines": 55, "path": "/src/NavContainer.js", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport './NavContainer.css';\nimport { Button, ButtonGroup } from 'react-foundation';\n\n\nexport default class NavContainer extends Component {\n constructor(props) {\n super(props);\n this.state = {};\n }\n\n render() {\n return (\n <div className=\"container\">\n <div className=\"navGroup\">\n <div>Heatmap</div>\n <ButtonGroup isExpanded className=\"buttonGroup\">\n <Button className=\"button\">Canopy Cover</Button>\n <Button className=\"button\">Height</Button>\n </ButtonGroup>\n </div>\n <div className=\"navGroup\">\n <div>Filter</div>\n <ButtonGroup isExpanded className=\"buttonGroup\">\n <Button className=\"button\">CA Native</Button>\n <Button className=\"button\">Endangered</Button>\n <Button className=\"button\">Heritage</Button>\n <Button className=\"button\">Invasive</Button>\n </ButtonGroup>\n </div>\n <div className=\"navGroup\">\n <div >Meet the Locals</div>\n <ButtonGroup isExpanded className=\"buttonGroup\">\n <Button className=\"localsButton\">One</Button>\n <Button className=\"localsButton\">One</Button>\n <Button className=\"localsButton\">One</Button>\n </ButtonGroup>\n <ButtonGroup isExpanded className=\"buttonGroup\">\n <Button className=\"localsButton\">One</Button>\n <Button className=\"localsButton\">One</Button>\n <Button className=\"localsButton\">One</Button>\n </ButtonGroup>\n <ButtonGroup isExpanded className=\"buttonGroup\">\n <Button className=\"localsButton\">One</Button>\n <Button className=\"localsButton\">One</Button>\n <Button className=\"localsButton\">One</Button>\n </ButtonGroup>\n </div>\n <form className=\"searchBar\">\n <input type='text' name='searchNavBar' placeholder='Search all Species' className=\"bar\" />\n </form>\n </div>\n )\n }\n}" }, { "alpha_fraction": 0.5967302322387695, "alphanum_fraction": 0.6021798253059387, "avg_line_length": 20.58823585510254, "blob_id": "6e78a41dff4e46f38282eb089a178df5c0ca0101", "content_id": "882f33ccf4cc2da8478b7a2f4a5ea7c61a515fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 367, "license_type": "no_license", "max_line_length": 58, "num_lines": 17, "path": "/src/Info.js", "repo_name": "stevengritz/street-tree-map", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport logo from './logo.png';\nimport './Info.css';\n\nexport default class InfoPanel extends Component {\n render() {\n return (\n <div className=\"InfoPanel\">\n\t<h3>This is a tree name</h3>\n <img src={logo} className=\"App-logo\" alt=\"logo\" />\n\t<p>\n\t Here is some neat information\n\t</p>\n </div>\n );\n }\n}\n" } ]
11
Daniel-1234567/fitness-learning
https://github.com/Daniel-1234567/fitness-learning
aa1cc3b3dc7b62d2c9e0cecdb8e71c434b6d8d9e
a5b995944dfa6a6eee7f63bf705a442faa9874e3
8c4a6891be481e67da7d83b341c836f7bc71bbd4
refs/heads/master
2022-07-03T05:24:16.672916
2018-05-20T22:41:17
2018-05-20T22:41:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6468461155891418, "alphanum_fraction": 0.6484531760215759, "avg_line_length": 25.76344108581543, "blob_id": "7a46f676df34f025a70d46e35318723a3179133e", "content_id": "580162be335930e7f7898bf7c6a77e01e4ab361a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 2491, "license_type": "permissive", "max_line_length": 160, "num_lines": 93, "path": "/MotionTracking-master/MotionTracking WatchKit Extension/InterfaceController.swift", "repo_name": "Daniel-1234567/fitness-learning", "src_encoding": "UTF-8", "text": "/*\n Copyright (C) 2016 Apple Inc. All Rights Reserved.\n See LICENSE.txt for this sample’s licensing information\n \n Abstract:\n This class is responsible for managing interactions with the interface.\n */\n\nimport WatchKit\nimport Foundation\nimport Dispatch\n\nclass InterfaceController: WKInterfaceController, WorkoutManagerDelegate {\n // MARK: Properties\n\n let workoutManager = WorkoutManager()\n var active = false\n \n var workoutStr = \"\"\n var gravityStr = \"\"\n var attitudeStr = \"\"\n var userAccelStr = \"\"\n var rotationRateStr = \"\"\n\n // MARK: Interface Properties\n \n @IBOutlet weak var titleLabel: WKInterfaceLabel!\n @IBOutlet weak var gravityLabel: WKInterfaceLabel!\n @IBOutlet weak var userAccelLabel: WKInterfaceLabel!\n @IBOutlet weak var rotationLabel: WKInterfaceLabel!\n @IBOutlet weak var attitudeLabel: WKInterfaceLabel!\n @IBOutlet weak var workoutLabel: WKInterfaceLabel!\n \n // MARK: Initialization\n \n override init() {\n super.init()\n \n workoutManager.delegate = self\n }\n\n // MARK: WKInterfaceController\n \n override func willActivate() {\n super.willActivate()\n active = true\n\n // On re-activation, update with the cached values.\n updateLabels()\n }\n\n override func didDeactivate() {\n super.didDeactivate()\n active = false\n }\n\n // MARK: Interface Bindings\n \n @IBAction func start() {\n titleLabel.setText(\"Tracking...\")\n workoutManager.startWorkout()\n }\n\n @IBAction func stop() {\n titleLabel.setText(\"Stopped Recording\")\n workoutManager.stopWorkout()\n }\n\n // MARK: WorkoutManagerDelegate\n func didUpdateMotion(_ manager: WorkoutManager, gravityStr: String, rotationRateStr: String, userAccelStr: String, attitudeStr: String, prediction:String) {\n DispatchQueue.main.async {\n \n self.gravityStr = gravityStr\n self.userAccelStr = userAccelStr\n self.rotationRateStr = rotationRateStr\n self.attitudeStr = attitudeStr\n self.workoutStr = prediction\n self.updateLabels();\n }\n }\n\n // MARK: Convenience\n func updateLabels() {\n if active {\n gravityLabel.setText(gravityStr)\n userAccelLabel.setText(userAccelStr)\n rotationLabel.setText(rotationRateStr)\n attitudeLabel.setText(attitudeStr)\n workoutLabel.setText(workoutStr)\n }\n }\n\n}\n" }, { "alpha_fraction": 0.5644413828849792, "alphanum_fraction": 0.574566662311554, "avg_line_length": 34.27878952026367, "blob_id": "123e5c22b505c4a470c9155523696b54a33fe5ec", "content_id": "92c34fd8b68e03f4af6cc28875a75449204c0d93", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5827, "license_type": "permissive", "max_line_length": 100, "num_lines": 165, "path": "/XMLParser/parser.py", "repo_name": "Daniel-1234567/fitness-learning", "src_encoding": "UTF-8", "text": "# parser.py --- \n# \n# Filename: parser.py\n# Description: \n# Parser class for loading and extracting data\n# Author: Yu Lu\n# Email: [email protected]\n# Github: https://github.com/SuperYuLu \n# \n# Created: Sun May 20 13:16:00 2018 (-0500)\n# Version: \n# Last-Updated: Sun May 20 14:32:11 2018 (-0500)\n# By: yulu\n# Update #: 44\n# \nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\n\n\nclass Parser:\n \"\"\"\n Class Parser\n -------------\n Read in XML file of watch workout data from *PowerSence* app, extract wanted work types\n and export as csv files or plot the result\n \n Input\n ------------\n fileName: filename (including path) to export xml file from apple watch app *PowerSense\"\n startDate: Specify start date of the data wanted, format example: 2018-12-01\n \n Properties\n ------------\n * listTypes: list avaliable workout types recorded in the xml file \n\n Methods\n ------------\n * loadWorkOutSummary: list summary report on the workout data in the xml file\n * loadTypeDate: load the data of specific type of workout and return a pandas dataframe\n * to_csv: save loaded data to csv \n\n \"\"\"\n\n factors = {\n 'ActiveEnergyBurned':'HKQuantityTypeIdentifierActiveEnergyBurned' ,\n 'BasalEnergyBurned': 'HKQuantityTypeIdentifierBasalEnergyBurned',\n 'DistanceWalkingRunning': 'HKQuantityTypeIdentifierDistanceWalkingRunning',\n 'HeartRate':'HKQuantityTypeIdentifierHeartRate',\n 'StepCount': 'HKQuantityTypeIdentifierStepCount',\n }\n \n def __init__(self, fileName, startDate):\n self.fileName = fileName\n self.treeroot = Parser._gen_element_tree_root(fileName)\n self.startDate = startDate\n \n @staticmethod\n def _gen_element_tree_root(fileName):\n \"\"\"\n initialize xml elementary tree root obj\n \"\"\"\n tree = ET.parse(fileName)\n tree_root = tree.getroot()\n return tree_root\n\n @property\n def listTypes(self):\n \"\"\"\n List avaliable work out type data in the xml file\n \"\"\"\n types = []\n for r in self.treeroot.findall('Record'):\n date = r.attrib['startDate'][:10]\n if date == self.startDate:\n type = r.attrib['type']\n types.append(type)\n return(np.unique(types))\n\n\n def loadTypeData(self, typeName, plot = False, as_csv = False):\n \"\"\"\n load data for specifice type of workout\n \"\"\"\n startTime, endTime, units, values = [], [], [], []\n factors = self.factors\n for r in self.tree_root.findall('Record'):\n startDate = r.attrib['startDate']\n endDate = r.attrib['endDate']\n date = startDate[:10]\n if date == self.startDate and r.attrib['type'] == factors[typeName]:\n unit = r.attrib['unit']\n value = r.attrib['value']\n startTime.append(startDate)\n endTime.append(endDate)\n values.append(value)\n units.append(unit)\n else:\n pass\n labels = ['StartTime', 'EndTime', typeName, 'units']\n tempData = pd.DataFrame(dict(zip(labels, [startTime, endTime, values, units])))\n data = tempData.copy()\n data.iloc[:,0] = tempData.StartTime.astype(str).str[:-6] #strip out time zone \n data.iloc[:,1] = tempData.EndTime.astype(str).str[:-6]\n data.iloc[:,2] = tempData[typeName]\n data.iloc[:,3] = tempData['units']\n data.columns = labels\n\n if plot: Parser._plot_workout(datta, typeName)\n if as_csv: Parser.to_csv(data, self.fileName.split('.')[0] + typeName + '.csv')\n \n return data\n \n def loadWorkOutSummary(self):\n \"\"\"\n Generate a summary report on the workout, including workout type, starting time \n stop time, etc\n \"\"\"\n startTimes, endTimes, duration, durationUnit, activityType = [], [], [], [], []\n for r in self.tree_root.findall('Workout'):\n startTime = r.attrib['startDate']\n endTime = r.attrib['endDate']\n if startTime[:10] == self.startDate:\n duration.append(r.attrib['duration'])\n durationUnit.append(r.attrib['durationUnit'])\n acttype = r.attrib['workoutActivityType'][21:]\n else:\n pass\n # Correct wrong logging\n # if acttype == 'StairClimbing':\n # acttype = 'Elliptical'\n # elif acttype == 'Yoga':\n # acttype = 'TraditionalStrengthTraining'\n # elif acttype == 'Bowling':\n # acttype ='Walking'\n # else:\n # pass\n activityType.append(acttype)\n startTimes.append(startTime)\n endTimes.append(endTime)\n labels = ['StartTime', 'EndTime', 'Duration', 'ActivityType']\n data = pd.DataFrame(dict(list(zip(labels, [startTimes, endTimes, duration, activityType]))))\n data.StartTime = data.StartTime.astype(str).str[:-6]\n data.EndTime = data.EndTime.astype(str).str[:-6]\n return data\n\n @staticmethod\n def to_csv(data, fileName):\n \"\"\"\n save as csv\n \"\"\"\n if type(data) == pd.core.frame.DataFrame:\n data.to_csv(fileName, index = False)\n else:\n raise TypeError(\"Data has to be type pandas DataFrame\")\n \n @staticmehtod\n def _plot_workout(dataframe, typeName):\n \"\"\"\n plot single workout data\n \"\"\"\n dataframe.StartTime = pd.to_datetime(dataframe.StartTime)\n dataframe.EndTime = pd.to_datetime(dataframe.EndTime)\n dataframe.plot(y = typeName, x = 'StartTime')\n\n \n" }, { "alpha_fraction": 0.6971019506454468, "alphanum_fraction": 0.7302226424217224, "avg_line_length": 39.62272644042969, "blob_id": "48136f04e15f029a02eb982a41a2bd36e706baff", "content_id": "45521a6e54697594c3c2b64e04aabe39a34474be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8937, "license_type": "permissive", "max_line_length": 611, "num_lines": 220, "path": "/README.md", "repo_name": "Daniel-1234567/fitness-learning", "src_encoding": "UTF-8", "text": "# Fitness Learning\n----\n## Table of Contents\n- [About](#about)\n- [Data](#data)\n- [Logistic Classifier](#logistic-classifier)\n- [Apple Watch App](#apple-watch-app)\n- [Running the App](#running-the-app)\n- [Tools](#tools)\n- [Authors](#authors)\n- [License](#license)\n- [Acknowledgements](#acknowledgements)\n\n## About \nA light weight apple watch app with real time recognition of workout categories with pretrained machine learning algorithm.\n\n## Motivation \nWhile apple watch provide fitness data monitoring and analysis, it relies on users to manually input the workout category to track. It'll be much more user-friendly if apple watch could automatically detect and categorize the type of workout in the real time.\n\n\n# Data \n\n## Data Acquisition \nData was collected directly from the Apple Watch and with the assitance of a third party application called [PowerSense](https://itunes.apple.com/us/app/powersense-motion-sensor-data-logging-tool/id1050491381?mt=8),which is a free and powerful motion sensor logging tool that can track and record various sensor data on iPhone and Apple Watch. The main reason we use it is that it provides high sampling rate of data collection. To get more details of the workout, we suggest export the data as XML files, then use the XMLParser to extract interested workout types and export it as csv files for later analysis.\n\nGetting workout data can be simple by using *PowerSense*: \n```\nSetup sampling rate (e.g. 50 Hz) --> click start -- > start workout --> click stop --> export files \n```\n\n## Data Parsing \nData of interest is parsed from the raw xml files export from *PowerSense*, using our own python module XMLParser:\n\nDependencies \n+ pandas \n+ xml\n+ numpy \n+ matplotlib \n\n\nInitialize \n```python\nfrom XMLParser import Parser \npar = Parser(\"/path/to/file.xml\", startDate = \"format like 2018-12-01\") \n```\n\nList recorded workout types: \n```python\n# In: \npar.listTypes \n# Out:\narray(['HKCategoryTypeIdentifierAppleStandHour',\n 'HKQuantityTypeIdentifierActiveEnergyBurned',\n 'HKQuantityTypeIdentifierAppleExerciseTime',\n 'HKQuantityTypeIdentifierBasalEnergyBurned',\n 'HKQuantityTypeIdentifierDistanceWalkingRunning',\n 'HKQuantityTypeIdentifierHeartRate',\n 'HKQuantityTypeIdentifierHeartRateVariabilitySDNN',\n 'HKQuantityTypeIdentifierRestingHeartRate',\n 'HKQuantityTypeIdentifierStepCount',\n 'HKQuantityTypeIdentifierWalkingHeartRateAverage'], dtype='<U48')\n```\n\nExtract workout summary: \n\n```\n# In: \npar.loadWorkOutSummary() \n# Out:\n ActivityType Duration EndTime \\\n0 Elliptical 5.665340749422709 2018-03-31 09:49:03 \n1 Rowing 5.387559982140859 2018-03-31 09:58:07 \n2 TraditionalStrengthTraining 3.668238099416097 2018-03-31 10:04:51 \n3 Walking 5.929301750659943 2018-03-31 10:14:32 \n\n StartTime \n0 2018-03-31 09:43:23 \n1 2018-03-31 09:52:44 \n2 2018-03-31 10:01:11 \n3 2018-03-31 10:08:36 \n\n```\n\nLoad a specific workout data: \n```python\n# In:\npar.loadTypeData('HeartRate', plot = False, to_csv = False)\n# Out:\n#pandas dataframe with columns\nIndex(['StartTime', 'EndTime', 'HeartRate', 'units'], dtype='object')\n```\n## Feature Selection \nFeatures saved by apple watch include 'HeartRate', 'ActiveEnergyBurned', 'BasalEnergyBurned', 'DistanceWalkingRunning', and accelerometer measurements (X, Y, Z axis). Explortary data analysis (as in jupyterNotebook) shows that data for 'HeartRate', 'ActiveEnergyBurned', 'BasalEnergyBurned', 'DistanceWalkingRunning' are very noisey and not easily distinguisiable, we focus on the accelerometer data along 3 directions as our selected features and model input. The idea is that accelerometer motion of apple watch should contain enough information for most workouts with arm movement. \n\n## Logistic Classifier\n\nA Logistic Regression classifier was trained and tested using the accelerometer data collected to predict the type of activity being performed by the user. The classifier was built in MATLAB and is composed of the following files:\n\n* **logistic.m**: The main script. Process data, trains and tests a logistic regression model, and outputs a coefficients matrix.\n\n* **dataprocess.m**: A script that compiles accelerometer data from the .csv files found in \\\\data and splits it into an `nxm` matrix of features, called X, and an `nx1` labels matrix, Y. The matrices are also shuffled before being exported into their corresponding .mat file.\n\n* **splitData.m**: A function to split matrices X and Y into a training set and a test set, based on the passed 'training ratio'.\n\n* **oneVsAll.m**: Function trains multiple logistic regression classifiers and returns all the classifiers in a matrix all_theta, where the i-th row of all_theta corresponds to the classifier for label i.\n\n* **predictOneVsAll.m**: Predict the label for a trained one-vs-all classifier. The labels are in the range 1..K, where `K = size(all_theta, 1)`\n\n* **fmincg.m**: Function that allows us to find the minimum point in our cost function.\n\n* **lrCostFunction.m**: Computes the cost and gradient for logistic regression with\nregularization.\n\n* **sigmod.m**: Evaluates the sigmoid function at a given point.\n\n### Features and Labels\n\nFor our model, we used 13 features, which are the [raw accelerometer events](https://developer.apple.com/documentation/coremotion/getting_raw_accelerometer_events) logged by the Apple Watch:\n - attitude_roll [radians]\n - attitude_pitch [radians]\n -\tattitude_yaw [radians]\n -\trotation_rate (x, y, z)[radians/s]\n -\tgravity (x, y, z)[G],\n - user_acc (x, y, z)[G]\n\nOur data consisted of 4 types of activity: rowing, elliptical, push ups, and treadmill, so our outcome labels were classified as follows:\n - Elliptical=1, Pushups=2, Rowing=3, Treadmill=4\n\n### Accuracy\n\nAfter splitting the input data into a 70% Training and 30% Test data, our classifier achieved an accuracy of **94.12%**\n\n## Apple Watch App \n\nWe built a light weight Apple Watch app with real time recognition of workout categories with pre-trained machine learning algorithm. It allows exercise aficionados to track their workout without having to enter the exercise set manually.\n\nThe app uses Core Motion from Apple's Library, to collect data from the Apple Watch accelerometer and gyroscope plus a machine learning model that can automatically categorize and log an exercise from the collected motion data. As of now, the app can identify the following traditional workouts: Elliptical, rowing machine, treadmill and pushups.\n\n### Screenshots\n\nThe following are screenshots that show every watch face the user will see before and during the workout.\n\n<table width=\"500\" cellspacing=\"0\" cellpadding=\"0\" style=\"border: none;\">\n<tr>\n<td align=\"center\" valign=\"center\">\n<img src=\"/res/loading.jpg\" alt=\"Loading Screen\" width=\"200\" height=\"250\"></img>\n<br />\nMyFitnessPal loading screen\n</td>\n<td align=\"center\" valign=\"center\">\n<img src=\"/res/notrecording.jpg\" alt=\"Home Screen\" width=\"200\" height=\"250\"></img>\n<br />\nView before user starts workout\n</td>\n<td align=\"center\" valign=\"center\">\n<img src=\"/res/start_stop.jpg\" alt=\"Start or Stop Workout\" width=\"200\" height=\"250\"></img>\n<br />\nStart and stop workout button options\n</td>\n</tr>\n<tr>\n<td align=\"center\" valign=\"center\">\n<img src=\"/res/pushups.jpg\" alt=\"Pushups Example\" width=\"200\" height=\"250\"></img>\n<br />\nPushups workout view\n</td>\n<td align=\"center\" valign=\"center\">\n<img src=\"/res/elliptical.jpg\" alt=\"Elliptical Example\" width=\"200\" height=\"250\"></img>\n<br />\nElliptical workout view\n</td>\n<td align=\"center\" valign=\"center\">\n<img src=\"/res/rowing.jpg\" alt=\"Rowing Example\" width=\"200\" height=\"250\"></img>\n<br />\nRowing workout view\n</td>\n</tr>\n</table>\n\n## Tools\n\nThis project made use of the following tools:\n\n* **Apple Watch Development**\n - Xcode\n - Swift\n\n* **Data Parsing**\n - Python\n - Jupyter\n - Matplotlib\n - NumPy\n\n* **Logistic Classifier**\n - MATLAB\n\n## Authors\n\n* **Carlos Trejo**\n - [GitHub](https://github.com/cdt876)\n - [LinkedIn](https://www.linkedin.com/in/carlostrejomtz/)\n - [Home](https://cdt876.github.io)\n\n* **Juan Trejo**\n - [GitHub](https://github.com/jtrejo13)\n - [LinkedIn](https://www.linkedin.com/in/jtrejo13/)\n - [Home](https://jtrejo13.github.io/)\n\n* **Yu Lu**\n - [GitHub](https://github.com/SuperYuLu)\n - [LinkedIn](https://www.linkedin.com/in/yu-lu-12b123a6/)\n - [Home](https://superyulu.github.io/)\n\n## License\n\nThis project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details\n\n## Acknowledgements\n* [PowerSense](https://itunes.apple.com/us/app/powersense-motion-sensor-data-logging-tool/id1050491381?mt=8) to collect raw accelerometer vents from the Apple Watch\n* [Eric Hsiao](https://github.com/hsiaoer) for providing a [template](https://github.com/hsiaoer/MotionTracking) that served as the base for our Apple Watch app\n" }, { "alpha_fraction": 0.6319546103477478, "alphanum_fraction": 0.6451317667961121, "avg_line_length": 27.74210548400879, "blob_id": "bb4aff829b27dfdccfe6785c0f7378a764fde603", "content_id": "365a5cc5bdb4fd2d38de72da82c7d76e59c98555", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5464, "license_type": "permissive", "max_line_length": 111, "num_lines": 190, "path": "/DataExtract.py", "repo_name": "Daniel-1234567/fitness-learning", "src_encoding": "UTF-8", "text": "\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\n\n\n# # 1. XML Data Parser\n\n# In[2]:\n\n\nimport xml.etree.ElementTree as ET\n\n\n# In[3]:\n\n\npath = '/home/yulu/FreeTailHack/data/apple_health_export/'\nfile1 = 'export.xml'\nfile2 = 'export_cda.xml'\n\n\n# In[4]:\n\n\ndef listTypes(root):\n types = []\n for r in root.findall('Record'):\n date = r.attrib['startDate'][:10]\n if date == '2018-03-31':\n type = r.attrib['type']\n types.append(type)\n return(np.unique(types))\n\n\n# In[5]:\n\n\ndef loadTypeData(root, keyType):\n startTime = []\n endTime = []\n units = []\n values = []\n global factors\n for r in root.findall('Record'):\n startDate = r.attrib['startDate']\n endDate = r.attrib['endDate']\n date = startDate[:10]\n if date == '2018-03-31' and r.attrib['type'] == factors[keyType]:\n unit = r.attrib['unit']\n value = r.attrib['value']\n startTime.append(startDate)\n endTime.append(endDate)\n values.append(value)\n units.append(unit)\n labels = ['StartTime', 'EndTime', keyType, 'units']\n tempData = pd.DataFrame(dict(zip(labels, [startTime, endTime, values, units])))\n data = tempData.copy()\n data.iloc[:,0] = tempData.StartTime.astype(str).str[:-6] #strip out time zone \n data.iloc[:,1] = tempData.EndTime.astype(str).str[:-6]\n #df['datetime'] = df['datetime'].astype(str).str[:-6]\n data.iloc[:,2] = tempData[keyType]\n data.iloc[:,3] = tempData['units']\n data.columns = labels\n return data\n \n\n\n# In[14]:\n\n\ndef loadWorkOut(root):\n startTimes = []\n endTimes = []\n duration = []\n durationUnit = []\n activityType = []\n for r in root.findall('Workout'):\n startTime = r.attrib['startDate']\n endTime = r.attrib['endDate']\n if startTime[:10] == '2018-03-31':\n duration.append(r.attrib['duration'])\n durationUnit.append(r.attrib['durationUnit'])\n acttype = r.attrib['workoutActivityType'][21:]\n # Correct wrong logging\n if acttype == 'StairClimbing':\n acttype = 'Elliptical'\n elif acttype == 'Yoga':\n acttype = 'TraditionalStrengthTraining'\n elif acttype == 'Bowling':\n acttype ='Walking'\n activityType.append(acttype)\n startTimes.append(startTime)\n endTimes.append(endTime)\n labels = ['StartTime', 'EndTime', 'Duration', 'ActivityType']\n data = pd.DataFrame(dict(list(zip(labels, [startTimes, endTimes, duration, activityType]))))\n data.StartTime = data.StartTime.astype(str).str[:-6]\n data.EndTime = data.EndTime.astype(str).str[:-6]\n return data\n\n\n# # 2. export\n\n# ## 2.1 All three people data\n\n# In[15]:\n\n\npath = '/home/yulu/FreeTailHack/'\nsubpath = ['Juan_data', 'Tori_data', 'Carlos_data']\nfilename = 'export.xml'\n\n\n# In[16]:\n\n\nfactors = {\n 'ActiveEnergyBurned':'HKQuantityTypeIdentifierActiveEnergyBurned' ,\n 'BasalEnergyBurned': 'HKQuantityTypeIdentifierBasalEnergyBurned',\n 'DistanceWalkingRunning': 'HKQuantityTypeIdentifierDistanceWalkingRunning',\n 'HeartRate':'HKQuantityTypeIdentifierHeartRate', # sub yes \n 'StepCount': 'HKQuantityTypeIdentifierStepCount',\n \n}\n\n\n# In[17]:\n\n\nfor sp in subpath:\n file = path + sp + '/' + filename\n tree = ET.parse(file)\n root = tree.getroot()\n savepath = './' + sp + '/'\n loadTypeData(root, 'BasalEnergyBurned').to_csv(savepath + 'BasalEnergyBurned.csv', index = False)\n loadTypeData(root, 'ActiveEnergyBurned').to_csv(savepath + 'ActiveEnergyBurned.csv', index = False)\n loadTypeData(root, 'DistanceWalkingRunning').to_csv(savepath + 'DistanceWalkingRunning.csv', index = False)\n loadTypeData(root, 'HeartRate').to_csv(savepath + 'HeartRate.csv', index = False)\n loadTypeData(root, 'StepCount').to_csv(savepath + 'StepCount.csv', index = False)\n loadWorkOut(root,).to_csv(savepath + 'WorkOutCategory.csv', index = False)\n \n\n\n# ## 2.2 Single Juan data\n\n# In[471]:\n\n\ntree = ET.parse(path + subpath[0] + '/' + filename)\nroot = tree.getroot()\n#listTypes(root)\n\n\n# In[461]:\n\n\nloadTypeData(root, 'BasalEnergyBurned').to_csv('BasalEnergyBurned.csv', index = False)\nloadTypeData(root, 'ActiveEnergyBurned').to_csv('ActiveEnergyBurned.csv', index = False)\nloadTypeData(root, 'DistanceWalkingRunning').to_csv('DistanceWalkingRunning.csv', index = False)\nloadTypeData(root, 'HeartRate').to_csv('HeartRate.csv', index = False)\nloadTypeData(root, 'StepCount').to_csv('StepCount.csv', index = False)\nloadWorkOut(root,).to_csv('WorkOutCategory.csv', index = False)\n\n\n# # 3. Analysis\n\n# In[432]:\n\n\nhr = pd.read_csv('HeartRate.csv', parse_dates=True)\nhr.StartTime = pd.to_datetime(hr.StartTime)\nhr.EndTime = pd.to_datetime(hr.EndTime)\nhr.plot(y = 'HeartRate', x = 'StartTime')\n\naeb = pd.read_csv('ActiveEnergyBurned.csv')\naeb.StartTime = pd.to_datetime(aeb.StartTime)\naeb.EndTime = pd.to_datetime(aeb.EndTime)\naeb.plot(y = 'ActiveEnergyBurned', x = 'StartTime')\n\nbeb = pd.read_csv('BasalEnergyBurned.csv', parse_dates = True)\nbeb.StartTime = pd.to_datetime(beb.StartTime)\nbeb.EndTime= pd.to_datetime(beb.EndTime)\nbeb.plot(y = 'BasalEnergyBurned', x = 'StartTime')\n\n\ndwr = pd.read_csv('DistanceWalkingRunning.csv', parse_dates = True)\ndwr.StartTime = pd.to_datetime(dwr.StartTime)\ndwr.EndTime= pd.to_datetime(dwr.EndTime)\ndwr.plot(y = 'DistanceWalkingRunning', x = 'StartTime')\n\nplt.show()\n\n" }, { "alpha_fraction": 0.5139891505241394, "alphanum_fraction": 0.5719013214111328, "avg_line_length": 35.3278694152832, "blob_id": "036ddd8df59915393b1f24c0b43e145f9173e71c", "content_id": "9300807048f8b887f28478e83789b21dc0d2f1d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 6650, "license_type": "permissive", "max_line_length": 173, "num_lines": 183, "path": "/MotionTracking-master/MotionTracking WatchKit Extension/MotionManager.swift", "repo_name": "Daniel-1234567/fitness-learning", "src_encoding": "UTF-8", "text": "/*\n Copyright (C) 2016 Apple Inc. All Rights Reserved.\n See LICENSE.txt for this sample’s licensing information\n \n Abstract:\n This class manages the CoreMotion interactions and \n provides a delegate to indicate changes in data.\n */\n\nimport Foundation\nimport CoreMotion\nimport WatchKit\nimport os.log\n/**\n `MotionManagerDelegate` exists to inform delegates of motion changes.\n These contexts can be used to enable application specific behavior.\n */\nprotocol MotionManagerDelegate: class {\n func didUpdateMotion(_ manager: MotionManager, gravityStr: String, rotationRateStr: String, userAccelStr: String, attitudeStr: String, prediction:String)\n}\n\nextension Date {\n var millisecondsSince1970:Int64 {\n return Int64((self.timeIntervalSince1970 * 1000.0).rounded())\n }\n}\n\nclass MotionManager {\n // MARK: Properties\n \n let theta1 = [11.149, -1.6496, -2.5578, 0.18047, -0.63865, -1.0305, -0.82277, -1.8907,\n -2.0255, -1.7771, -2.0681, -1.5767, -1.9441, -1.8621]\n let theta2 = [2.4541, 8.3452, -7.8079, -11.527, 0.31066, 0.44647, -0.040977, 4.9584, 3.4946,\n 4.0185, -1.0828, -1.1043, -2.1308, 0.52694]\n let theta3 = [-5.4841, -1.8485, 0.22756, -0.47925, 0.039934, -0.074275, -0.12836, 1.0105,\n 1.1816, -2.7029, 0.54086, -0.32585, 1.7187, 1.3845]\n let theta4 = [-25.715, 0.28693, 2.4225, 0.072567, 0.72679, 0.20328, 0.31253, 0.63696,\n 2.75, 10.144, 1.1484, 1.9702, -1.3331, -3.7885]\n \n let motionManager = CMMotionManager()\n let queue = OperationQueue()\n let wristLocationIsLeft = WKInterfaceDevice.current().wristLocation == .left\n\n // MARK: Application Specific Constants\n \n // The app is using 50hz data and the buffer is going to hold 1s worth of data.\n let sampleInterval = 1.0 / 50\n let rateAlongGravityBuffer = RunningBuffer(size: 50)\n \n weak var delegate: MotionManagerDelegate?\n \n var gravityStr = \"\"\n var rotationRateStr = \"\"\n var userAccelStr = \"\"\n var attitudeStr = \"\"\n var prediction = \"\"\n var data: [Double] = []\n var accelx = 0.0\n var accely = 0.0\n var accelz = 0.0\n var accel = 0.0\n\n var recentDetection = false\n\n // MARK: Initialization\n \n init() {\n // Serial queue for sample handling and calculations.\n queue.maxConcurrentOperationCount = 1\n queue.name = \"MotionManagerQueue\"\n }\n\n // MARK: Motion Manager\n\n func startUpdates() {\n if !motionManager.isDeviceMotionAvailable {\n print(\"Device Motion is not available.\")\n return\n }\n \n os_log(\"Start Updates\");\n\n motionManager.deviceMotionUpdateInterval = sampleInterval\n motionManager.startDeviceMotionUpdates(to: queue) { (deviceMotion: CMDeviceMotion?, error: Error?) in\n if error != nil {\n print(\"Encountered error: \\(error!)\")\n }\n\n if deviceMotion != nil {\n self.processDeviceMotion(deviceMotion!)\n }\n }\n }\n\n func stopUpdates() {\n if motionManager.isDeviceMotionAvailable {\n motionManager.stopDeviceMotionUpdates()\n }\n }\n\n // MARK: Motion Processing\n \n func processDeviceMotion(_ deviceMotion: CMDeviceMotion) {\n gravityStr = String(format: \"X: %.1f Y: %.1f Z: %.1f\" ,\n deviceMotion.gravity.x,\n deviceMotion.gravity.y,\n deviceMotion.gravity.z)\n userAccelStr = String(format: \"X: %.1f Y: %.1f Z: %.1f\" ,\n deviceMotion.userAcceleration.x,\n deviceMotion.userAcceleration.y,\n deviceMotion.userAcceleration.z)\n rotationRateStr = String(format: \"X: %.1f Y: %.1f Z: %.1f\" ,\n deviceMotion.rotationRate.x,\n deviceMotion.rotationRate.y,\n deviceMotion.rotationRate.z)\n attitudeStr = String(format: \"r: %.1f p: %.1f y: %.1f\" ,\n deviceMotion.attitude.roll,\n deviceMotion.attitude.pitch,\n deviceMotion.attitude.yaw)\n \n accelx = deviceMotion.userAcceleration.x + deviceMotion.gravity.x\n accely = deviceMotion.userAcceleration.y + deviceMotion.gravity.y\n accelz = deviceMotion.userAcceleration.z + deviceMotion.gravity.z\n \n accel = sqrt(accelx * accelx + accely * accely + accelz * accelz)\n \n data = [deviceMotion.attitude.roll, deviceMotion.attitude.pitch, deviceMotion.attitude.yaw,\n deviceMotion.rotationRate.x, deviceMotion.rotationRate.y, deviceMotion.rotationRate.z,\n deviceMotion.gravity.x, deviceMotion.gravity.y, deviceMotion.gravity.z,\n deviceMotion.userAcceleration.x, deviceMotion.userAcceleration.y, deviceMotion.userAcceleration.z,\n accel]\n \n prediction = predictWorkout(data);\n \n// let timestamp = Date().millisecondsSince1970\n \n os_log(\"Prediction: %@ \", String(prediction))\n \n updateMetricsDelegate();\n }\n \n func predictWorkout(_ data: [Double]) -> String {\n var predictions: [Double] = [0, 0, 0, 0]\n \n predictions[0] = hfun(theta1, data)\n predictions[1] = hfun(theta2, data)\n predictions[2] = hfun(theta3, data)\n predictions[3] = hfun(theta4, data)\n \n let max_val = predictions.max()\n \n if max_val == predictions[0] {\n return \"Elliptical\"\n } else if max_val == predictions[1] {\n return \"Pushups\"\n } else if max_val == predictions[2] {\n return \"Rowing\"\n } else if max_val == predictions[3] {\n return \"Treadmill\"\n } else {\n return \"\"\n }\n }\n \n func hfun(_ theta:[Double], _ data:[Double]) -> Double {\n var sum:Double = theta[0]\n for i in 0..<data.count {\n sum = sum + theta[i+1] * data[i]\n }\n return sum\n }\n \n func sigmoid(_ value: Double) -> Double {\n return 1.0 / (1.0 + exp(-1 * value))\n }\n \n\n // MARK: Data and Delegate Management\n \n func updateMetricsDelegate() {\n delegate?.didUpdateMotion(self,gravityStr:gravityStr, rotationRateStr: rotationRateStr, userAccelStr: userAccelStr, attitudeStr: attitudeStr, prediction: prediction)\n }\n}\n" }, { "alpha_fraction": 0.4776119291782379, "alphanum_fraction": 0.5761194229125977, "avg_line_length": 19.9375, "blob_id": "1d2455e0db8057aeba17c90a299ba55ecdb3fe00", "content_id": "5f2b96764ba080697f92dc2d13d6e6d3dc1d25f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "permissive", "max_line_length": 48, "num_lines": 16, "path": "/XMLParser/__init__.py", "repo_name": "Daniel-1234567/fitness-learning", "src_encoding": "UTF-8", "text": "# __init__.py --- \n# \n# Filename: __init__.py\n# Description: \n# \n# Author: Yu Lu\n# Email: [email protected]\n# Github: https://github.com/SuperYuLu \n# \n# Created: Sun May 20 13:14:37 2018 (-0500)\n# Version: \n# Last-Updated: Sun May 20 14:31:59 2018 (-0500)\n# By: yulu\n# Update #: 2\n# \nfrom XMLParser.parser import Parser\n" } ]
6
carysmills/McData
https://github.com/carysmills/McData
7fec51948ea30ae67d43f866a819232f2c7cd7dd
07279ec6adeb037d185d1b9fadf8d71a8cce02e3
0c3b994698e112d14c1cbda9766d10e0c56ce135
refs/heads/master
2021-05-09T04:46:52.694080
2018-01-28T18:57:11
2018-01-28T18:57:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8636363744735718, "alphanum_fraction": 0.8636363744735718, "avg_line_length": 7.800000190734863, "blob_id": "c4101b988e69628c74c5b0ca958fc1b58f194f5d", "content_id": "a997d9808701345d3b8c73310bdf041da13bc321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "no_license", "max_line_length": 11, "num_lines": 5, "path": "/requirements.txt", "repo_name": "carysmills/McData", "src_encoding": "UTF-8", "text": "requests\nclick\npandas\ntabula-py\nwaybackpack\n" }, { "alpha_fraction": 0.570290744304657, "alphanum_fraction": 0.5722819566726685, "avg_line_length": 27.522727966308594, "blob_id": "3fbaec0e39091be7c16dacd21a40fb47e8d6d64a", "content_id": "011ba2583624595ee9456d42326b4219357acfe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2515, "license_type": "no_license", "max_line_length": 109, "num_lines": 88, "path": "/parse_pdfs.py", "repo_name": "carysmills/McData", "src_encoding": "UTF-8", "text": "# test script\nfrom tabula import read_pdf\nfrom subprocess import CalledProcessError\nimport pandas as pd\nfrom glob import glob\nimport click\n\nCOLUMNS = [\n \"Item\",\n \"Serving Size\",\n \"Calories\",\n \"Calories From Fat\",\n \"Total Fat (g)\",\n \"Total Fat % Daily Value**\",\n \"Saturated Fat (g)\",\n \"Saturated Fat % Daily Value**\",\n \"Trans Fat (g)\",\n \"Cholesterol (mg)\",\n \"Cholesterol % Daily Value**\",\n \"Sodium (mg)\",\n \"Sodium % Daily Value**\",\n \"Carbohydrates (g)\",\n \"Carbohydrates % Daily Value**\",\n \"Dietary Fiber (g)\",\n \"Dietary Fiber % Daily Value**\",\n \"Sugars (g)\",\n \"Protein (g)\",\n \"Vitamin A\",\n \"Vitamin C\",\n \"Calcium\",\n \"Iron\"]\n\n\ndef extract_timestamp(path):\n _, _, timestamp_pdf = path.split(\"/\")\n timestamp_as_string = timestamp_pdf.strip(\".pdf\")\n return pd.to_datetime(timestamp_as_string)\n\n\ndef extract_data_from_page(path, page):\n df = read_pdf(path, pages=page, lattice=True, guess=False, pandas_options={'skiprows': [0]}, silent=True)\n if df is None:\n return None\n df.columns = COLUMNS\n df['Item'] = df['Item'].str.replace(\"\\r\", \" \")\n df['Item'] = df['Item'].str.replace('†', \"\")\n df['Item'] = df['Item'].str.replace('+', \"\")\n df['Item'] = df['Item'].str.replace('®', \"\")\n df['Item'] = df['Item'].str.replace('§', \"\")\n df['Item'] = df['Item'].str.replace('*', \"\")\n df['Item'] = df['Item'].str.strip()\n df['Serving Size'] = df['Serving Size'].str.replace(\"\\r\", \" \")\n df['extracted_at'] = extract_timestamp(path)\n to_drop = pd.isnull(df['Calories'])\n df = df.loc[~to_drop]\n return df\n\n\ndef extract_data_from_pdf(path):\n df = pd.DataFrame(columns=COLUMNS)\n page = 1\n while True:\n try:\n print(\" page: %d\" % page)\n _df = extract_data_from_page(path, page)\n except CalledProcessError:\n break\n if _df is not None:\n df = df.append(_df)\n page += 1\n return df\n\n\[email protected]()\[email protected]('--input-path', help='Directory that contains the PDFs.')\[email protected]('--output', help='The name of the csv file to save to.')\ndef main(input_path, output):\n df = pd.DataFrame(columns=COLUMNS)\n for pdf_count, pdf_path in enumerate(sorted(glob(input_path + \"/*.pdf\")), start=1):\n print(\" PDF: %d\" % pdf_count)\n df = df.append(extract_data_from_pdf(pdf_path))\n if pdf_count == 2:\n break\n df.to_csv(output, index=False)\n return\n\nif __name__ == '__main__':\n main()\n\n" } ]
2
OrHavraPerry/NanyCam
https://github.com/OrHavraPerry/NanyCam
de778ffe920930016c137f9ee3d6b6e7f1120b59
8cb12c040c3b344d20b08350c34f9520f48bb27d
d8e9af5f1fdeea3d2e1790432cd1ff36cbc617e7
refs/heads/master
2021-04-26T22:49:30.726109
2018-03-06T23:56:38
2018-03-06T23:56:38
124,151,457
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7614840865135193, "alphanum_fraction": 0.7667844295501709, "avg_line_length": 30.44444465637207, "blob_id": "e6f0b5885d932dd1dc09eb2a3a05762e38138116", "content_id": "608389008b391d00f02fa926d7557e29f0caa6c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 566, "license_type": "no_license", "max_line_length": 112, "num_lines": 18, "path": "/README.md", "repo_name": "OrHavraPerry/NanyCam", "src_encoding": "UTF-8", "text": "# NanyCam\nAI Nany for 3d Printers\n\nImagin yourself that we wont have to be a Nany for a 3d printer.\n\nWe live in times that ai is not a dream, but a realety\n\n\n# What will it do?\nusing cammera, NanyCam will Investigate the data and warn you for faults.\n\nthe data will be sent to server witch is acssesible from everywhere\n\n# The Goal\nto make web interface and web service that will anable 3d printers to work without the need to watch the printer\n\n# We Just need some data to make it work\nif you want to help, you can send us cam data (using the CamCupture.py script)\n" }, { "alpha_fraction": 0.5425434708595276, "alphanum_fraction": 0.5937786102294922, "avg_line_length": 21.306121826171875, "blob_id": "0e7fa79cf5b44796fc08d9b8bbfd3e8cc930924e", "content_id": "788470460bda43bc6b4bea92b767331f98e19447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1093, "license_type": "no_license", "max_line_length": 79, "num_lines": 49, "path": "/CamCupture.py", "repo_name": "OrHavraPerry/NanyCam", "src_encoding": "UTF-8", "text": "import cv2\nimport datetime as td\nimport time\nimport os\n\n# Change Path if u need\npath = 'Capture'\nif not os.path.exists(path):\n os.mkdir(path)\n\n# Open Cams\ncap1 = cv2.VideoCapture(0)\ncap2 = cv2.VideoCapture(1)\n\n# Check if cams ok\nif not (cap1.isOpened() and cap2.isOpened()):\n print('Error: Openning Cams')\n print('cam1: ', cap1.isOpened())\n print('cam2: ', cap2.isOpened())\n\n# saves data and show it\nwhile cap1.isOpened() and cap2.isOpened():\n # delay\n time.sleep(2)\n\n # get time\n dt = str(td.datetime.now()).replace(\" \",\"_\").replace(\":\",\"-\").split(\".\")[0]\n\n # capture\n _, image1 = cap1.read()\n _, image2 = cap2.read()\n\n # Show images\n cv2.imshow('Cam1', image1)\n cv2.imshow('Cam2', image2)\n\n # resize\n im1 = cv2.resize(image1, (100, 100))\n im2 = cv2.resize(image2, (100, 100))\n\n # exit if key is q\n if cv2.waitKey(1) == ord('q'):\n cap1.release()\n cap2.release()\n cv2.destroyAllWindows()\n break\n\n cv2.imwrite('{}\\\\Cam1_{}.png'.format(path,dt), im1)\n cv2.imwrite('{}\\\\Cam2_{}.png'.format(path,dt), im2)\n" } ]
2
fmount/nopaste
https://github.com/fmount/nopaste
242329e6efa70a2036c9d8f2c2a5c4f9db874955
be72e3c4f1aacd961bacf960ef02c617a064dae5
15ba2b8eb9a0550256b6a3b42961f15cb6b42ab0
refs/heads/master
2021-08-31T08:51:15.283853
2017-12-20T20:44:00
2017-12-20T20:44:00
113,416,824
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.5247108340263367, "alphanum_fraction": 0.5410094857215881, "avg_line_length": 26.565217971801758, "blob_id": "3462bbf9e48b26c197a73ad7051fe905995198d3", "content_id": "ddebf9c8a9b88cbcc8c7bc9a02fc421e6d949e8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1902, "license_type": "permissive", "max_line_length": 84, "num_lines": 69, "path": "/nopaste/utils/short_url.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\nimport uuid\n\nimport os\nimport sys\nimport json\nimport string\nfrom math import floor\n\n\nclass Shorturl(object):\n def __init__(self):\n pass\n\n # Base62 Encoder and Decoder\n @classmethod\n def toBase62(self, num, b=62):\n if b <= 0 or b > 62:\n print(\"Exiting\")\n return 0\n base = string.digits + string.lowercase + string.uppercase\n r = num % b\n res = base[r]\n q = floor(num / b)\n while q:\n r = q % b\n q = floor(q / b)\n res = base[int(r)] + res\n return res\n\n @classmethod\n def toBase10(self, msg, b=62):\n basecode = string.digits + string.lowercase + string.uppercase\n limit = len(msg)\n res = 0\n for i in range(0, limit):\n res = b * res + basecode.find(msg[i], 0)\n return res\n \n\n#Make some tests\n#for i in range(0, pow(10, 7)):\n# print(Shorturl.toBase62(i))\n#h = uuid.uuid4().int\n#print(h)\n#a = Shorturl.toBase62(h)\n#print(Shorturl.toBase10(a))\n" }, { "alpha_fraction": 0.626386821269989, "alphanum_fraction": 0.6311843991279602, "avg_line_length": 30.761905670166016, "blob_id": "7c2b05f946745a115b28d5b54705f7797a4ec621", "content_id": "68a143408b817f77650f3ad47c9a75d6dc6012d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3335, "license_type": "permissive", "max_line_length": 86, "num_lines": 105, "path": "/nopaste/lib/resources/userapi.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import backref, mapper, relation, sessionmaker\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nfrom flask import Flask, abort, jsonify, request, url_for\nimport logging\nimport json\nimport sqlite_middleware\nfrom model import User\nfrom config import CONF\n\nfrom flask_httpauth import HTTPBasicAuth\n\nLOG = logging.getLogger(__name__)\n\nauth = HTTPBasicAuth()\n\n\nclass UsersAPI(Resource):\n\n\n def get(self, uuid):\n pass\n\n\n def post(self):\n username = request.json.get('username')\n password = request.json.get('password')\n\n\n if username is None or password is None:\n abort(400) # missing arguments\n\n engine = create_engine(CONF.database.sql_engine_prefix + CONF.database.dbname)\n Session = sessionmaker(bind=engine)\n\n if sqlite_middleware._get_user(username, Session()) is not None:\n abort(400) # existing user\n\n cur_id = sqlite_middleware._get_last_object(Session(), User) + 1\n user = User(cur_id, username, password)\n \n \n sqlite_middleware._insert_link(user, Session())\n return jsonify(\"user\", user._tojson())\n\n def delete(self):\n pass\n\n\nclass UserAPI(Resource):\n\n\n @auth.login_required\n def get(self, uuid):\n engine = create_engine(CONF.database.sql_engine_prefix + CONF.database.dbname)\n Session = sessionmaker(bind=engine)\n item = sqlite_middleware._get_object(uuid, Session(), User)\n if item is not None:\n return item._tojson()\n abort(404)\n\n\n @auth.login_required\n def delete(self, uuid):\n engine = create_engine(CONF.database.sql_engine_prefix + CONF.database.dbname)\n Session = sessionmaker(bind=engine)\n cur_user = sqlite_middleware._get_object(uuid, Session(), User)\n if sqlite_middleware._delete_object(uuid, Session(), User) is None:\n abort(400)\n return jsonify(\"user\", cur_user._tojson())\n\n\n @auth.verify_password\n def verify_password(username, password):\n engine = create_engine(CONF.database.sql_engine_prefix + CONF.database.dbname)\n Session = sessionmaker(bind=engine)\n user = sqlite_middleware._get_user(username, Session())\n if not user or not user.verify_password(password) or not user.isadmin():\n return False\n return True\n" }, { "alpha_fraction": 0.6501901149749756, "alphanum_fraction": 0.6519449949264526, "avg_line_length": 26.572580337524414, "blob_id": "afc4c939ab003db810db17c6ca4d6bb35055fcb8", "content_id": "e5433ee26596e091dfb3714a3bceede986fa0552", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3419, "license_type": "permissive", "max_line_length": 99, "num_lines": 124, "path": "/nopaste/sqlite_middleware.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\n\nimport os\nimport sys\nimport json\nimport sqlite3 as lite\nfrom sqlite3 import OperationalError\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import MetaData, Column, Table, ForeignKey\nfrom sqlalchemy import Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import backref, mapper, relation, sessionmaker\nfrom sqlalchemy.orm.exc import UnmappedInstanceError\nfrom model import Link, User\n\nfrom config import CONF\nimport logging\n\n\nLOG = logging.getLogger(__name__)\n\nengine = create_engine(CONF.database.sql_engine_prefix + CONF.database.dbname)\n\n\ndef _init_engine(Base):\n Base.metadata.bind = engine\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n if _get_user(CONF.default.admin_user, Session()) is None:\n cur_id = _get_last_object(Session(), User)\n _insert_user(User(cur_id, CONF.default.admin_user, CONF.default.admin_password), Session())\n return engine\n\n\ndef _insert_link(link, session):\n session.add(link)\n session.commit()\n\n\ndef _insert_user(user, session):\n session.add(user)\n session.commit()\n\n\ndef _find_objects(uid, session):\n result = session.query(Link).filter_by(uuid=uid).all()\n return result\n\n\ndef _get_object(uid, session, Obj):\n result = session.query(Obj).filter_by(uuid=uid).first()\n return result\n\n\ndef _get_user(username, session):\n result = session.query(User).filter_by(username=username).first()\n return result\n\n\ndef _get_all_users(session):\n result = session.query(User).all()\n return result\n\n\ndef _get_last_object(session, Obj):\n result = session.query(Obj).order_by(Obj.uuid.desc()).first()\n if result is None:\n return 0\n return result.uuid\n\n\ndef _get_all_objects(session, Obj):\n try:\n result = session.query(Obj).all()\n return result\n except UnmappedInstanceError as e:\n LOG.err(\"[ERROR] Item not found\")\n return None\n\n\ndef _delete_object(uid, session, Obj):\n try:\n obj = session.query(Obj).filter_by(uuid=uid).first()\n session.delete(obj)\n session.commit()\n return obj\n except UnmappedInstanceError as e:\n LOG.err(\"[ERROR] Item not found\")\n return None\n\n\ndef _clear_table_by_name(Base, session, tname):\n for table in reversed(Base.metadata.sorted_tables):\n if table.name == tname:\n session.execute(table.delete())\n session.commit()\n\n\ndef _clear_database(Base):\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n" }, { "alpha_fraction": 0.5553877353668213, "alphanum_fraction": 0.596676766872406, "avg_line_length": 29.55384635925293, "blob_id": "46df8f5f1066fdd61488edf0bb483280b5698cc9", "content_id": "d7c21bfa0035d61294ca12dc587366c494aabc3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1986, "license_type": "permissive", "max_line_length": 84, "num_lines": 65, "path": "/nopaste/utils/helper.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\nfrom flask import render_template, make_response, Response\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime as dt, timedelta\nimport logging\n\nLOG = logging.getLogger(__name__)\n\n\ndef render(resource, ua):\n\n content = open(resource).read()\n\n if \"curl\" not in str(ua):\n LOG.info(\"[UA] Request rendering from %s \" % str(ua))\n if bool(BeautifulSoup(content, \"html.parser\").find()):\n return Response(content, mimetype=\"text/html\")\n\n return content\n\n\ndef is_expired(t1, expire_time):\n \"\"\"\n Define the mode of setting link expired\n \"\"\"\n LOG.info(\"Timedelta seconds: %d\\n\" % int(expire_time))\n now = dt.now()\n link_dt = dt.strptime(t1, \"%Y-%m-%d %H:%M:%S\")\n\n if abs(now - link_dt) > timedelta(seconds=expire_time):\n LOG.info(\"[EXPIRED] - TRUE\")\n return True\n\n LOG.info(\"[EXPIRED] - FALSE\")\n return False\n\n\n# JUST FOR TEST PURPOSES ...\n#print(is_expired(\"2017-12-01 20:32:00\", 86400))\n#print(is_expired(\"2017-12-06 15:32:00\", 86400))\n#print(is_expired(\"2017-12-07 20:32:00\", 86400))\n#print(is_expired(\"2017-12-05 20:32:00\", 86400))\n" }, { "alpha_fraction": 0.6633663177490234, "alphanum_fraction": 0.6881188154220581, "avg_line_length": 13.428571701049805, "blob_id": "f6354bc57a276e1f89806840d9c3b08cfe96c68c", "content_id": "ac283c55093515ab8b1f322334ef0b494b87437c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 202, "license_type": "permissive", "max_line_length": 51, "num_lines": 14, "path": "/tox.ini", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "# content of: tox.ini , put in same dir as setup.py\n[tox]\nenvlist = py27,py36\nskipsdist = true\n\n#[testenv]\n#deps=pytest\n#commands=pytest\n[testenv]\ndeps =\n nose\n unittest2\ncommands =\n nosetests\n" }, { "alpha_fraction": 0.601270318031311, "alphanum_fraction": 0.6043283939361572, "avg_line_length": 28.93661880493164, "blob_id": "8924c9f408aaaf01880d7499845007baca3dff58", "content_id": "fd84be41c8266f83ddbb002bc03aa4c6b71c07f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4251, "license_type": "permissive", "max_line_length": 115, "num_lines": 142, "path": "/nopaste/lib/resources/linksapi.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\nfrom flask import Flask, jsonify, abort, make_response, request\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nfrom werkzeug import secure_filename, FileStorage\nfrom time import gmtime, strftime\nfrom utils.short_url import Shorturl\nfrom model import Link, Base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nimport sqlite_middleware\nfrom config import CONF\nfrom flask_httpauth import HTTPBasicAuth\n\nimport logging\nimport json\nimport os\n\n\"\"\"\nDatamodel is made like this:\n\n links: [\n {\n id: [ID],\n url: [URL],\n timestamp: [timestamp]\n },\n {\n id: [ID],\n url: [URL],\n timestamp: [timestamp]\n } ]\n\n\"\"\"\n\nLOG = logging.getLogger(__name__)\nauth = HTTPBasicAuth()\n\nengine = create_engine(CONF.database.sql_engine_prefix + CONF.database.dbname)\n\n\nclass Links(Resource):\n \n\n # Make an authorization model to print all the links\n # present on the database, else return the helper\n\n @auth.login_required\n def get(self):\n Session = sessionmaker(bind=engine)\n items = sqlite_middleware._get_all_objects(Session(), Link)\n return jsonify(\"links\", str(items))\n\n\n def put(self):\n pass\n\n\n def post(self):\n f = request.files['file']\n Session = sessionmaker(bind=engine)\n try:\n cur_id = sqlite_middleware._get_last_object(Session(), Link) + 1\n es = Shorturl.toBase62(cur_id)\n f.save(CONF.default.upload_folder + \"/\" + secure_filename(es))\n\n l = Link(cur_id, CONF.default.uri + \"/\" + secure_filename(es), strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n sqlite_middleware._insert_link(l, Session())\n LOG.info(\"[POST] - Generating tinyUrl: %s\\n\" % (CONF.default.uri + es))\n\n return l._tojson()\n except:\n return \"DB Operational Error\"\n\n # Drop the entire db and delete all the created files\n\n @auth.login_required\n def delete(self):\n Session = sessionmaker(bind=engine)\n sqlite_middleware._clear_table_by_name(Base, Session(), \"links\")\n filelist = [f for f in os.listdir(CONF.default.upload_folder)]\n\n for f in filelist:\n LOG.info(\"[DELETE] - %s\" % str(CONF.default.upload_folder + \"/\" + f))\n os.remove(CONF.default.upload_folder + \"/\" + f)\n\n return \"Cleaning db..\"\n\n\n @auth.verify_password\n def verify_password(username, password):\n Session = sessionmaker(bind=engine)\n user = sqlite_middleware._get_user(username, Session())\n if not user or not user.verify_password(password):\n return False\n return True\n\n\nclass LinkAPI(Resource):\n\n\n def get(self, uuid):\n Session = sessionmaker(bind=engine)\n item = sqlite_middleware._get_object(uuid, Session(), Link)\n if item is not None:\n LOG.info(\"[GET] - %s\\n\" % str(item._tojson()))\n return item._tojson()\n abort(404)\n\n def put(self):\n pass\n\n\n @auth.login_required\n def delete(self, uuid):\n Session = sessionmaker(bind=engine)\n link = sqlite_middleware._delete_object(uuid, Session(), Link)\n LOG.info(\"[DELETE] - %s\" % str(CONF.default.upload_folder + \"/\" + Shorturl.toBase62(uuid)))\n os.remove(CONF.default.upload_folder + \"/\" + Shorturl.toBase62(uuid))\n return link._tojson()\n" }, { "alpha_fraction": 0.620620608329773, "alphanum_fraction": 0.623289942741394, "avg_line_length": 27.00934600830078, "blob_id": "d30fb10909aa5ad2a2cba1ae13609a4bf40d2f21", "content_id": "698cbd99e99c9f7c30642c930dec4cd8e51485d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2997, "license_type": "permissive", "max_line_length": 103, "num_lines": 107, "path": "/nopaste/model.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import backref, mapper, relation, sessionmaker\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nfrom flask import Flask, abort, jsonify\nfrom passlib.apps import custom_app_context as pwd_context\nfrom config import CONF\nimport logging\nimport json\nimport sqlite_middleware\n\nBase = declarative_base()\n\nLOG = logging.getLogger(__name__)\n\n\nclass Link(Base):\n\n __tablename__ = \"links\"\n\n uuid = Column(Integer, primary_key=True, autoincrement=True, unique=True)\n url = Column(String)\n timestamp = Column(String)\n\n\n def __init__(self, uuid, url, timestamp):\n self.uuid = uuid\n self.url = url\n self.timestamp = timestamp\n\n\n def __repr__(self):\n return str(dict({\"uuid\": self.uuid, \"url\": self.url, \"timestamp\": self.timestamp}))\n\n\n def __str__(self):\n return str({\"uuid\": self.uuid, \"url\": self.url, \"timestamp\": self.timestamp})\n\n\n def _tojson(self):\n return jsonify(\"link\", dict({\"uuid\": self.uuid, \"url\": self.url, \"timestamp\": self.timestamp}))\n\n\nclass User(Base):\n\n __tablename__ = \"users\"\n\n uuid = Column(Integer, autoincrement=True, primary_key=True)\n username = Column(String(32), index=True)\n password_hash = Column(String(128))\n\n def __init__(self, uuid, username, password):\n self.uuid = uuid\n self.username = username\n self.password_hash = self.hash_password(password)\n\n\n def hash_password(self, password):\n return pwd_context.encrypt(password)\n\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\n def isadmin(self):\n if self.username == CONF.default.admin_user and \\\n self.verify_password(CONF.default.admin_password):\n return True\n return False\n\n\n def __repr__(self):\n pass\n\n\n def __str__(self):\n pass\n\n\n def _tojson(self):\n return dict({\"uuid\": self.uuid, \"name\": self.username})\n" }, { "alpha_fraction": 0.6604866981506348, "alphanum_fraction": 0.6865585446357727, "avg_line_length": 22.630136489868164, "blob_id": "eb9d497b6fb505e713b5cc83bc3b36e401545aa0", "content_id": "8faf73949b7af1221a2db9a2cb628711bb98d50d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1726, "license_type": "permissive", "max_line_length": 138, "num_lines": 73, "path": "/README.md", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "LINKS API\n====\nA pastebin service to upload/share in a simple and fast way files, pieces of code/configuration,\netc..\nThe service is presented as a restful set of APIs written using the python Flask restful framework.\nAdd to your bashrc/zshrc the magic _nopaste_ alias to start sharing your snippets:\n\n alias nopaste = \"curl -F c=@- https://IP:PORT/api/links\"\n\nThe main features of this little webserver are the following:\n\n\n* Upload a file\n\n cat $FILE | curl -F file=@- http://localhost:5000/api/links\n\n* Show an uploaded file:\n\n curl -i http://localhost:5000/<URL>\n\n* Show all the links present on the server (admin purposes: require authentication)\n\n curl -i -u user:password -X GET http://localhost:5000/api/links\n\n* Show the helper (with curl or in a web browser)\n\n curl -i http://localhost:5000\n\n* Show file metadata \n\n curl -i http://localhost:5000/api/link/<ID>\n\n* Delete a file\n\n curl -i -u fmount:fmount -X DELETE http://localhost:5000/api/link/<ID>\n\n\n* Drop all links (delete files and clear the db table) **REQUIRE AUTH**\n\n curl -i -u fmount:fmount -X DELETE http://localhost:5000/api/links\n\n\nUSER API\n===\n\n* Create a user\n\n curl -i -X POST -H \"Content-Type: application/json\" -d '{\"username\":\"fmount\",\"password\":\"fmount\"}' http://localhost:5000/api/users\n\n* Get a user (admin only)\n\n curl -i -u admin:admin -X GET http://localhost:5000/api/user/<ID>\n\n* Delete a user (admin only)\n\n curl -i -u admin:admin -X DELETE http://localhost:5000/api/user/<ID>\n\n\n\nTESTS\n===\n\n* Massive file upload\n\n while true; do for i in $(ls); do cat $i | curl -F file=@- http://localhost:5000/api/links; sleep 1; done; done\n\nKNOWN ISSUES\n===\n\n\nTODO\n===\n* Put db in /var/lib/nopaste by default\n\n" }, { "alpha_fraction": 0.6381579041481018, "alphanum_fraction": 0.6463815569877625, "avg_line_length": 28.18400001525879, "blob_id": "1343bd15f6b3f8f049574114faa3fed40e5f62fd", "content_id": "97b47e2636949aad91eb6ec2e19e4c5ef722f621", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3648, "license_type": "permissive", "max_line_length": 90, "num_lines": 125, "path": "/nopaste/basicpastev2.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\nfrom flask import Flask, Response, jsonify, abort, render_template, make_response, request\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nimport logging\nimport os\nimport sqlite_middleware\nfrom lib.resources.linksapi import Links, LinkAPI\nfrom model import Link, User, Base\nfrom lib.resources.userapi import UsersAPI, UserAPI\nimport jinja2\nimport json\nimport uuid\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom utils.short_url import Shorturl\nfrom utils import helper\nfrom config import CONF\n\n\"\"\"\nDatamodel is made like this:\n\n links: [\n {\n id: [ID],\n url: [URL],\n timestamp: [timestamp]\n },\n {\n id: [ID],\n url: [URL],\n timestamp: [timestamp]\n } ]\n\nThe main flask resource is implemented on the linksv2 and remaind to\nthe more generic model called Link (you can find it on the model.py)\n\n\"\"\"\n\nLOG = logging.getLogger(__name__)\n#logging.basicConfig(filename='/tmp/nopaste.log', level=logging.DEBUG)\n\napp = Flask(__name__, static_folder=CONF.default.upload_folder, static_url_path=\"\")\napi = Api(app)\n\n\nmy_loader = jinja2.ChoiceLoader([\n app.jinja_loader,\n jinja2.FileSystemLoader(CONF.default.templates_folder),\n])\n\napp.jinja_loader = my_loader\nengine = sqlite_middleware._init_engine(Base)\n\n\[email protected](400)\ndef bad_request(error):\n LOG.warn(jsonify({'error': 'Bad request'}))\n return make_response(jsonify({'error': 'Bad request'}), 400)\n\n\[email protected](404)\ndef not_found(error):\n LOG.warn(jsonify({'error': 'Not found'}))\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected]('/', methods=['GET'])\ndef home():\n LOG.info(\"request for home.html\")\n return make_response(render_template('home.html'))\n\n\[email protected]('/<url>')\ndef show_me_thefile(url):\n identifier = Shorturl.toBase10(url)\n LOG.info(\"Resolved identifier: %s\\n\" % str(identifier))\n \n Session = sessionmaker(bind=engine)\n if sqlite_middleware._get_object(identifier, Session(), Link) is None or not \\\n os.path.exists(CONF.default.upload_folder + \"/\" + url) or \\\n helper.is_expired(sqlite_middleware._get_object(\\\n identifier, Session(), Link).timestamp, CONF.default.expire_time):\n abort(404)\n\n LOG.info(\"[Rendering] %s\\n\" % str(CONF.default.upload_folder + \"/\" + url))\n return helper.render((CONF.default.upload_folder + \"/\" + url), request.user_agent)\n\n\napi.add_resource(Links, \"/api/links\")\napi.add_resource(LinkAPI, \"/api/link/<int:uuid>\", endpoint=\"link\")\napi.add_resource(UsersAPI, \"/api/users\")\napi.add_resource(UserAPI, \"/api/user/<int:uuid>\", endpoint=\"user\")\n\n\ndef run():\n app.run(host='127.0.0.1', debug=CONF.default.debug)\n\n\nif __name__ == '__main__':\n run()\n" }, { "alpha_fraction": 0.6003577709197998, "alphanum_fraction": 0.6057245135307312, "avg_line_length": 30.404495239257812, "blob_id": "166bdc7251863ff2ee5deb72c44daa6b200a27db", "content_id": "fa9808c061fbfadbbed626c28c3c5a09a7e92faa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2795, "license_type": "permissive", "max_line_length": 84, "num_lines": 89, "path": "/nopaste/config.py", "repo_name": "fmount/nopaste", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n############################################################################\n#\n# Licensed under the MIT License (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# in the LICENSE file or at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# author: fmount <[email protected]>\n# version: 0.1\n# company: --\n#\n#############################################################################\n\nfrom oslo_config import cfg\n\n\n# Register the stanza\nopt_default_group = cfg.OptGroup(name='default', \\\n title='Default Options')\n\nopt_database_group = cfg.OptGroup(name='database', \\\n title='Database options')\n\n# Register the options\n\ndefault_opts = [\n\n cfg.StrOpt('upload_folder', default='uploads',\n help=('Where store/retrieve files')),\n \n cfg.StrOpt('templates_folder', default='templates',\n help=('Templates\\' location')),\n \n cfg.StrOpt('uri', default='http://localhost:5000',\n help=('Endpoint of the flask webserver')),\n \n cfg.BoolOpt('debug', default=False, \\\n help=('True enables, False disables debug mode')),\n\n cfg.IntOpt('expire_time', default=86400, \\\n help=('Expiring time of a link')),\n \n cfg.StrOpt('admin_user', default='admin', \\\n help=('admin user to manage private resources')),\n\n cfg.StrOpt('admin_password', default='password', \\\n help=('Admin password related to the admin user'))\n]\n\ndatabase_opts = [\n\n cfg.StrOpt('dbname', default='weburl.db',\n help=('The sqlalchemy database name')),\n\n cfg.StrOpt('sql_engine_prefix', default='sqlite:///',\n help=('Prefix of the connection stub for the db')),\n]\n\n\nCONF = cfg.CONF\nCONF(default_config_files=['config/nopaste.conf'])\nCONF.register_group(opt_default_group)\nCONF.register_opts(default_opts, opt_default_group)\n\nCONF.register_group(opt_database_group)\nCONF.register_opts(database_opts, opt_database_group)\n\nCONF.default.host = CONF.default.uri.split(\":\")[1].split(\"//\")[1]\nCONF.default.port = CONF.default.uri.split(\":\")[2]\n\n\n#if __name__ == '__main__':\n#print(CONF.default.upload_folder)\n#print(CONF.default.upload_folder)\n#print(CONF.default.uri)\n#print(CONF.default.debug)\n#print(CONF.database.dbname)\n#print(CONF.database.sql_engine_prefix)\n" } ]
10
Coderangshu/Vigenere-cipher-XOR-based
https://github.com/Coderangshu/Vigenere-cipher-XOR-based
09f1ba38471ac6441a977b184eb3b6aace09ec34
ddb92cabb84ace1ef762ab292548efe43c6f8684
1a2a4818932f85ef2bba5820126b8acab420b074
refs/heads/master
2023-02-13T06:17:59.048654
2021-01-13T09:16:52
2021-01-13T09:16:52
269,578,953
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8008849620819092, "alphanum_fraction": 0.8008849620819092, "avg_line_length": 74.33333587646484, "blob_id": "5c6b235f910992fcb7646b5fc60bc9397b368ed7", "content_id": "0d33cdda149f9f57c5cb74cfcecf623f1b9fdcf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 226, "license_type": "no_license", "max_line_length": 182, "num_lines": 3, "path": "/README.md", "repo_name": "Coderangshu/Vigenere-cipher-XOR-based", "src_encoding": "UTF-8", "text": "# Vigenere-cipher-XOR-based\n[Cryptography]\n<br>Using the most common letter namely the letter 'E' to find the difinitive pattern in the given sequence the letters and the words are guessed to decipher the given text sequence.\n" }, { "alpha_fraction": 0.5210620164871216, "alphanum_fraction": 0.6949195861816406, "avg_line_length": 49.21794891357422, "blob_id": "5d0bf0c0691fb1bd5e19cf7e9f58afba9b701054", "content_id": "711fbdc45b945df248fea21a7958706c343c68cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3917, "license_type": "no_license", "max_line_length": 949, "num_lines": 78, "path": "/decoder.py", "repo_name": "Coderangshu/Vigenere-cipher-XOR-based", "src_encoding": "UTF-8", "text": "#The actual cypher given in the question\ncypher=\"F96DE8C227A259C87EE1DA2AED57C93FE5DA36ED4EC87EF2C63AAE5B9A7EFFD673BE4ACF7BE8923CAB1ECE7AF2DA3DA44FCF7AE29235A24C963FF0DF3CA3599A70E5DA36BF1ECE77F8DC34BE129A6CF4D126BF5B9A7CFEDF3EB850D37CF0C63AA2509A76FF9227A55B9A6FE3D720A850D97AB1DD35ED5FCE6BF0D138A84CC931B1F121B44ECE70F6C032BD56C33FF9D320ED5CDF7AFF9226BE5BDE3FF7DD21ED56CF71F5C036A94D963FF8D473A351CE3FE5DA3CB84DDB71F5C17FED51DC3FE8D732BF4D963FF3C727ED4AC87EF5DB27A451D47EFD9230BF47CA6BFEC12ABE4ADF72E29224A84CDF3FF5D720A459D47AF59232A35A9A7AE7D33FB85FCE7AF5923AA31EDB3FF7D33ABF52C33FF0D673A551D93FFCD33DA35BC831B1F43CBF1EDF67F0DF23A15B963FE5DA36ED68D378F4DC36BF5B9A7AFFD121B44ECE76FEDC73BE5DD27AFCD773BA5FC93FE5DA3CB859D26BB1C63CED5CDF3FE2D730B84CDF3FF7DD21ED5ADF7CF0D636BE1EDB79E5D721ED57CE3FE6D320ED57D469F4DC27A85A963FF3C727ED49DF3FFFDD24ED55D470E69E73AC50DE3FE5DA3ABE1EDF67F4C030A44DDF3FF5D73EA250C96BE3D327A84D963FE5DA32B91ED36BB1D132A31ED87AB1D021A255DF71B1C436BF479A7AF0C13AA14794\"\nprint(\"The actual cypher code is: \\n{}\\n\".format(cypher))\nhex_to_dec=[]\nfor i in range(0, len(cypher), 2):\n hex_to_dec.append(int(str('0x'+cypher[i:i+2]),0)) #The second parameter for int is given as zero to include the padding if only 1 hex char is present\n#print(hex_to_dec)\n#print(len(hex_to_dec))\n\n#Frequency of characters in english literature\nfreq={'A':0.08497,'B':0.01492,'C':0.02202,'D':0.04253,'E':0.11162,'F':0.02228,'G':0.02015,'H':0.06094,'I':0.07546,'J':0.00153,'K':0.01292,'L':0.04025,'M':0.02406,'N':0.06749,'O':0.07507,'P':0.01929,'Q':0.00095,'R':0.07587,'S':0.06327,'T':0.09356,'U':0.02758,'V':0.00978,'W':0.02560,'X':0.00150,'Y':0.01994,'Z':0.0077}\n#print(freq)\n\ndef frequency_list_sum(list_of_elements):\n counts={}\n l=0\n for k in list_of_elements:\n l+=1\n counts[k]=counts.get(k,0)+1\n values=list(map(lambda x:x/l,list(counts.values())))\n sos=sum(map(lambda x: x ** 2,values))\n return sos\n\n#To find the key lenth of the cypher\nimport numpy as np\nmax_dist=[]\nfor i in range(13): #The key lengths we try are 1 to 13(as given in question)\n t=[hex_to_dec[j] for j in range(0,len(hex_to_dec),i+1)]\n max_dist.append(frequency_list_sum(t))\nmax_dist=np.array(max_dist)\nkey_length=np.argmax(max_dist)+1\nprint(\"We try the cypher with key lengths ranging from 1 to 13.\\nAnd we find the value of summation of frequency square is greatest for the {} key length.\\nTherefore the key length is {}\\n\".format(key_length,key_length))\n\ndef get_key(my_dict,val):\n for key, value in my_dict.items():\n if val == value:\n return key\n\ndef get_key_byte(n):\n t=t=[hex_to_dec[i] for i in range(0+n,len(hex_to_dec),key_length)]\n b={}\n for i in range(256):\n l=[ac^i for ac in t]\n if not any(char<32 or char>127 for char in l):\n l=list(map(lambda x:x.upper(),[chr(i) for i in l if 97<=i<=122])) #Taking only the lowercase letters from each in situ deciphered codes and converting them to uppercase for matching the key of the freq table that contains the frequency of english charactes in the language\n q,summation={},0\n for k in l:\n q[k]=q.get(k,0)+1\n for kq,vq in q.items():\n q[kq]=vq/26\n for kp,vp in freq.items():\n if kp==kq:\n summation+=vp*vq\n b[i]=summation\n return get_key(b,max(list(b.values())))\n\nkey=[]\nfor i in range(key_length):\n key.append(get_key_byte(i))\nprint(\"The key is:\")\nprint(key)\nprint()\n\nmessage=[chr(hex_to_dec[i]^key[i%key_length]) for i in range(len(hex_to_dec))]\np=[]\nw=[]\nfor i in message:\n if i==' ':\n p.append(\"\".join(w))\n w=[]\n else:\n w.append(i)\np.append(\"\".join(w))\nprint(\"The resultant message is:\")\nfor i in range(len(p)):\n print(p[i],end=\" \")\n if i%20==0 and i!=0:\n print()\n" } ]
2
ManolescuSebastian/Droid-Status
https://github.com/ManolescuSebastian/Droid-Status
0dd4fd73f43ab588a922f767172c901dc0fb1745
cc01998783af686bc086553e2f04261001c4c7cb
21dc0217c63da2fa87f82bedb3135ad05b5bf906
refs/heads/master
2022-12-31T22:14:17.901183
2020-10-26T01:15:48
2020-10-26T01:15:48
266,215,412
0
0
Apache-2.0
2020-05-22T21:58:06
2020-05-26T19:09:58
2020-05-26T19:29:26
C++
[ { "alpha_fraction": 0.7068965435028076, "alphanum_fraction": 0.7413793206214905, "avg_line_length": 15.571428298950195, "blob_id": "992cd6e7fb72c5c1668a3109f54ba2184ea41f34", "content_id": "fb22cf517f3648e69f56034e71b445ce8ffe52e5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 116, "license_type": "permissive", "max_line_length": 24, "num_lines": 7, "path": "/mac_os_app/DroidStatus/Podfile", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "platform :macos, '10.15'\nuse_frameworks!\n\ntarget 'DroidStatus' do\n project 'DroidStatus'\n \tpod \"ORSSerialPort\"\nend\n" }, { "alpha_fraction": 0.6111273765563965, "alphanum_fraction": 0.6474377512931824, "avg_line_length": 16.683937072753906, "blob_id": "021a037cf0bdc5e92bdeae9a3fb2079b6e63a9c2", "content_id": "9f08cf6fd7165b6daedbe88d2d5c10f66d47849f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3415, "license_type": "permissive", "max_line_length": 63, "num_lines": 193, "path": "/arduino/ESP8266/droid_status.ino", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": " /**\n Android status signal\n Description: This application should control two servo motors\n*/\n\n#include <Servo.h>\n\n#define SERVO_PIN_LEFT 10\n#define SERVO_PIN_RIGHT 11\n\n#define LEFT_PUSH 2\n#define RIGHT_PUSH 3\n\n#define LED_L 7\n#define LED_R 8\n\nServo leftHandServo;\nServo rightHandServo;\n\nint leftButtonState = 0;\nint rightButtonState = 0;\n\nboolean disableSteppers = false;\nboolean disablePushButtons = false;\n\nvoid setup() {\n Serial.begin(9600);\n Serial.print(\"Setup started\");\n\n pinMode(LEFT_PUSH, INPUT);\n pinMode(RIGHT_PUSH, INPUT);\n\n pinMode(LED_L, OUTPUT);\n pinMode(LED_R, OUTPUT);\n\n\n if (disableSteppers) {\n return;\n }\n\n leftHandServo.attach(SERVO_PIN_LEFT);\n leftHandServo.write(90);\n rightHandServo.attach(SERVO_PIN_RIGHT);\n rightHandServo.write(90);\n\n delay(1000);\n\n leftHandServo.detach();\n rightHandServo.detach();\n\n Serial.print(\"Setup ended\");\n}\n\nvoid loop() {\n\n pushButtonDetect();\n\n while (Serial.available() > 0)\n {\n char recieved = Serial.read();\n\n if (recieved == '0') {\n leftSideActiveOnly();\n disablePushButtons = true;\n leftButtonState = 1;\n rightButtonState = 1;\n }\n\n if (recieved == '1') {\n rightSideActiveOnly();\n disablePushButtons = false;\n leftButtonState = 0;\n rightButtonState = 0;\n }\n\n }\n}\n\nvoid pushButtonDetect() {\n if (!disablePushButtons) {\n leftButtonState = digitalRead(LEFT_PUSH);\n rightButtonState = digitalRead(RIGHT_PUSH);\n }\n\n if (leftButtonState == HIGH) {\n digitalWrite(LED_L, HIGH);\n leftSideActiveOnly();\n } else {\n digitalWrite(LED_L, LOW);\n }\n\n if (rightButtonState == HIGH) {\n digitalWrite(LED_R, HIGH);\n rightSideActiveOnly();\n } else {\n digitalWrite(LED_R, LOW);\n }\n\n}\n\n/**\n When left side is active, right side is inactive\n*/\nvoid leftSideActiveOnly() {\n if (disableSteppers) {\n return;\n }\n\n rightHandServo.attach(SERVO_PIN_RIGHT);\n rightHandServo.write(20);\n delay(1000);\n rightHandServo.detach();\n\n leftHandServo.attach(SERVO_PIN_LEFT);\n delay(200);\n\n for (int pos = 120; pos >= 20; pos --) {\n leftHandServo.write(pos);\n delay(10);\n }\n\n delay(200);\n leftHandServo.detach();\n}\n\n/**\n When right side is active, leftht side is inactive\n*/\nvoid rightSideActiveOnly() {\n if (disableSteppers) {\n return;\n }\n leftHandServo.attach(SERVO_PIN_LEFT);\n leftHandServo.write(110);\n delay(1000);\n leftHandServo.detach();\n\n rightHandServo.attach(SERVO_PIN_RIGHT);\n delay(200);\n\n for (int pos = 20; pos <= 110; pos ++) {\n rightHandServo.write(pos);\n delay(20);\n }\n delay(200);\n rightHandServo.detach();\n}\n\n\nvoid moveLeftHand() {\n\n for (int pos = 120; pos >= 20; pos --) {\n leftHandServo.write(pos);\n delay(10);\n }\n\n leftHandServo.detach();\n delay(3000);\n leftHandServo.attach(SERVO_PIN_LEFT);\n delay(200);\n\n for (int pos = 20; pos <= 120; pos ++) {\n leftHandServo.write(pos);\n delay(20);\n }\n\n leftHandServo.detach();\n delay(3000);\n leftHandServo.attach(SERVO_PIN_LEFT);\n delay(200);\n}\n\nvoid moveRightHand() {\n for (int pos = 20; pos <= 110; pos ++) {\n rightHandServo.write(pos);\n delay(20);\n }\n\n rightHandServo.detach();\n delay(3000);\n rightHandServo.attach(SERVO_PIN_RIGHT);\n delay(200);\n\n for (int pos = 110; pos >= 20; pos --) {\n rightHandServo.write(pos);\n delay(10);\n }\n\n rightHandServo.detach();\n delay(3000);\n rightHandServo.attach(SERVO_PIN_RIGHT);\n delay(200);\n}\n" }, { "alpha_fraction": 0.7008129954338074, "alphanum_fraction": 0.7430894374847412, "avg_line_length": 23.479999542236328, "blob_id": "e750bb98cd9e87b4873c1b6b8c6a05a8da4d77c7", "content_id": "a69a31eaf62c4894dfdf933f8a13cf19c17d4d86", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 615, "license_type": "permissive", "max_line_length": 163, "num_lines": 25, "path": "/mac_os_app/README.md", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "# Droid-Status\n\nControl droid servo motors and lights using menu bar application\n\n<img align=\"center\" src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/mac_os_app/images/Screenshot%202020-05-26%20at%2022.01.44.png\" width=\"20%\">\n\nLibraries\n-----\n\nIn order to communicate with the Arduino nano I've used **ORSSerial** library. \n\nMore details related to ORSerial - [github source](https://github.com/armadsen/ORSSerialPort/wiki/Installing-ORSSerialPort)\n\nPodfile \n\n```\nplatform :macos, '10.15'\nuse_frameworks!\n\ntarget 'DroidStatus' do\n project 'DroidStatus'\n \tpod \"ORSSerialPort\"\nend\n\n```\n \n" }, { "alpha_fraction": 0.5827767848968506, "alphanum_fraction": 0.608435869216919, "avg_line_length": 21.571428298950195, "blob_id": "9a7f85b517e50cb1f4e8141673e9f8fc18c1b170", "content_id": "076314dae3e754bd86615ad8c74c0c75505a6ba9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2845, "license_type": "permissive", "max_line_length": 77, "num_lines": 126, "path": "/raspberry_pi_droid/droid_status.py", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\nfrom time import sleep\n\n#====================\n# LED setup\n#====================\n\nled_R = 2\nled_L = 3\n\nGPIO.setup(led_R, GPIO.OUT)\nGPIO.setup(led_L, GPIO.OUT)\n\n#====================\n# Push Button setup\n#====================\nleftPushButton = 17\nrightPushButton = 27\n\nGPIO.setup(leftPushButton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(rightPushButton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n#====================\n# Servo motors setup\n#====================\narmLFPin = 12\narmRTPin = 13\n\nGPIO.setup(armLFPin, GPIO.OUT)\nGPIO.setup(armRTPin, GPIO.OUT)\n\npwmArmLF= GPIO.PWM(armLFPin, 50)\npwmArmRT= GPIO.PWM(armRTPin, 50)\n\npwmArmLF.start(0)\npwmArmRT.start(0)\n\nsleep(0.5)\n\npwmArmLF.ChangeDutyCycle(7)\npwmArmRT.ChangeDutyCycle(7)\nsleep(0.5)\n\npwmArmLF.start(0)\npwmArmRT.start(0)\nsleep(0.5)\n\n#leftArm.stop(0)\n#rightArm.stop(0)\n\ndef leftArmUp():\n\tstartVal: Float = 2\n\tfor x in range (10):\n pwmArmLF.ChangeDutyCycle(startVal + 0.5)\n sleep(0.5)\n pwmArmLF.start(0)\n\ndef leftArmDown():\n\tfor x in range (7, 12):\n pwmArmLF.ChangeDutyCycle(7.5)\n sleep(0.2)\n pwmArmLF.start(0)\n\ndef rightArmUp():\n\tstartVal: Float = 2\n\tfor x in range (10):\n pwmArmRT.ChangeDutyCycle(startVal + 0.5)\n sleep(0.5)\n pwmArmRT.start(0)\n\ndef rightArmDown():\n\tfor x in range (7, 12):\n pwmArmRT.ChangeDutyCycle(7.5)\n sleep(0.2)\n pwmArmRT.start(0)\n\n\nwhile True:\n inputValueLeft = GPIO.input(leftPushButton)\n inputValueRight = GPIO.input(rightPushButton)\n if (inputValueLeft == True):\t\n print('Left button pressed')\n GPIO.output(led_R, GPIO.LOW)\n GPIO.output(led_L, GPIO.HIGH)\n sleep(0.2)\n pwmArmRT.ChangeDutyCycle(7.5)\n sleep(0.5)\n #leftArmUp()\n #rightArmDown()\n\n elif (inputValueRight == True):\n #leftArmRotateOposite()\n print('Right button pressed')\n GPIO.output(led_R, GPIO.HIGH)\n GPIO.output(led_L, GPIO.LOW)\n sleep(0.2)\n pwmArmRT.ChangeDutyCycle(3)\n sleep(0.5)\n #leftArmDown()\n #rightArmUp()\n\n\n\n\n#def leftSideAction(channel):\n# leftArmRotate()\t\n# print('Left button pressed')\n# GPIO.output(led_R, GPIO.LOW)\n# GPIO.output(led_L, GPIO.HIGH)\n# sleep(1)\n\n#def rightSideAction(channel):\n# leftArmRotateOposite()\n# print('Right button pressed')\n# GPIO.output(led_R, GPIO.HIGH)\n# GPIO.output(led_L, GPIO.LOW)\n# sleep(1)\n\n#GPIO.add_event_detect(leftPushButton,GPIO.RISING,callback=leftSideAction)\n#GPIO.add_event_detect(rightPushButton,GPIO.RISING,callback=rightSideAction)\n\n#message = input(\"Press enter to quit\\n\\n\") # Run until someone presses enter\n#GPIO.cleanup() # Clean up\n\n" }, { "alpha_fraction": 0.6365478038787842, "alphanum_fraction": 0.6726093888282776, "avg_line_length": 22.711538314819336, "blob_id": "fc3de68d89fdf938799779574f7ee34ad661a2e0", "content_id": "50a60779bcef045cec97285dfd66ef4ccd012ed1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2468, "license_type": "permissive", "max_line_length": 137, "num_lines": 104, "path": "/README.md", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/anroid_status_banner.jpg\" width=\"65%\"></img>\n</p>\n\nDemo\n-----\n<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/simple_droid_status_demo.gif\" width=\"80%\"></img>\n</p>\n\nYoutube Demo\n-----\nhttps://www.youtube.com/watch?v=08_v_BuvqyA\n\nThingiverse\n-----\nAll the 3D models are available here:\nhttps://www.thingiverse.com/thing:4561049\n\n\nHardware requirements\n-----\n- Arduino Nano OR Wemos D1 OR Raspberry Pi Zero\n- 2x Servo Motor (SG90)\n- 2x push buttton\n- 2x LED\n- 2x 10k resistor\n- 2x 220k resistor\n- wires\n\n\nPin and board connection\n-----\n\n### Arduino NANO \n\nNano | Components\n------------ | -------------\nD12 | Servo Motor 1\nD11 | Servo Motor 2\nD8 | LED 1\nD7 | LED 2\nD3 | Push Button 1\nD2 | Push Button 2\n\n\n<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/droid_status_breadboard.png\" width=\"80%\"></img>\n</p>\n\n\n### ESP8266 (Wemos D1 mini)\n\nWemos D1 mini | Components\n------------ | -------------\nD6 | Servo Motor 1\nD5 | Servo Motor 2\nD8 | LED 1\nD7 | LED 2\nD2 | Push Button 1\nD1 | Push Button 2 \n\n<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/wemosd1_droid_status_schetch_bb.jpg\" width=\"80%\"></img>\n</p>\n\n### Raspberry pi \n\nRaspberry pi | Components\n------------ | -------------\nGPIO23 | Servo Motor 1\nGPIO24 | Servo Motor 2\nGPIO17 | Push Button 1\nGPIO27 | Push Button 2\nGPIO2 | LED 1\nGPIO3 | LED 2\n\n\n<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/raspberry_droid_status_schetch_bb.jpg\" width=\"80%\"></img>\n</p>\n\n\nBoard\n-----\n<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/board_img_1.jpg\" width=\"80%\"></img>\n</p>\n\nLicense\n------\n \n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\n" }, { "alpha_fraction": 0.6677384972572327, "alphanum_fraction": 0.6913183331489563, "avg_line_length": 28.15625, "blob_id": "491b2499ea8ac171fc9c1ec4766f7d57be075541", "content_id": "a0f86d072434ee60e06531d6a0314c9042410e50", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 933, "license_type": "permissive", "max_line_length": 137, "num_lines": 32, "path": "/raspberry_pi_droid/README.md", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "# Raspberry pi \n\nNano | Components\n------------ | -------------\nD12 | Push Button 1\nD13 | Push Button 2\nGPIO17 | Push Button 1\nGPIO27 | Push Button 2\nGPIO2 | LED 1\nGPIO3 | LED 2\n\n\n<p align=\"center\">\n <img src=\"https://github.com/ManolescuSebastian/Droid-Status/blob/master/res/raspberry_droid_status_schetch_bb.jpg\" width=\"80%\"></img>\n</p>\n\n\nLicense\n------\n \n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 14, "blob_id": "e66d01b56db595a4b40a300ff42f9be807628a57", "content_id": "ddbe425c9731ff14c6beac0760d61bff3e7a9553", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17, "license_type": "permissive", "max_line_length": 14, "num_lines": 1, "path": "/arduino/ESP8266/README.md", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "# Droid-Status \n\n" }, { "alpha_fraction": 0.6295118927955627, "alphanum_fraction": 0.633439302444458, "avg_line_length": 31.210844039916992, "blob_id": "bac2ff62ae46e4e8cbf647424367d37931ee5114", "content_id": "44e4dfd77b6eb397100e7c53491e801955adff7d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 5351, "license_type": "permissive", "max_line_length": 101, "num_lines": 166, "path": "/mac_os_app/DroidStatus/DroidStatus/AppDelegate.swift", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "//\n// AppDelegate.swift\n// AndroidStatus\n//\n// Created by MSI on 12/01/2020.\n// Copyright © 2020 MSI. All rights reserved.\n\nimport Foundation\nimport Cocoa\nimport SwiftUI\nimport ORSSerial\n\nenum ApplicationState {\n case initializationState\n case waitingForPortSelectionState([ORSSerialPort])\n case waitingForBaudRateInputState\n case waitingForUserInputState\n}\n\n@NSApplicationMain\nclass AppDelegate: NSObject, NSApplicationDelegate, ORSSerialPortDelegate {\n \n var window: NSWindow!\n var statusBarItem: NSStatusItem!\n \n var currentSelectedPort : String = \"\"\n \n func applicationDidFinishLaunching(_ aNotification: Notification) {\n \n let statusBar = NSStatusBar.system\n statusBarItem = statusBar.statusItem(withLength: NSStatusItem.squareLength)\n statusBarItem.button?.title = \"🚦\"\n \n let statusBarMenu = NSMenu(title: \"Show status\")\n statusBarItem.menu = statusBarMenu\n \n statusBarMenu.addItem(\n withTitle: \"Test Failed\",\n action: #selector(AppDelegate.displayAvailableStatus),\n keyEquivalent: \"\")\n \n statusBarMenu.addItem(\n withTitle: \"Test Passed\",\n action: #selector(AppDelegate.displayBusyStatus),\n keyEquivalent: \"\")\n \n let submenu = NSMenu()\n let mainDropdown = NSMenuItem(title: \"USB ports\", action: nil, keyEquivalent: \"\")\n statusBarMenu.addItem(mainDropdown)\n statusBarMenu.setSubmenu(submenu, for: mainDropdown)\n \n let availablePorts = ORSSerialPortManager.shared().availablePorts\n \n for port in availablePorts {\n let aSelector = #selector(AppDelegate.portSelector(_:))\n let submenuItem = NSMenuItem(title: port.path, action: aSelector, keyEquivalent: \"\")\n submenu.addItem(submenuItem)\n }\n \n statusBarMenu.addItem(\n withTitle: \"Quit\",\n action: #selector(AppDelegate.quitApplication),\n keyEquivalent: \"\")\n }\n \n @objc func displayBusyStatus() {\n print(\"Busy sign must be displayed\")\n setPortState()\n let value = Data(\"1\".utf8)\n self.handleUserInput(value)\n }\n \n @objc func displayAvailableStatus() {\n print(\"Available sign must be displayed\")\n setPortState()\n let value = Data(\"2\".utf8)\n self.handleUserInput(value)\n }\n \n @objc func portSelector(_ sender: NSMenuItem){\n print(sender.title)\n currentSelectedPort = sender.title\n }\n \n @objc func quitApplication(){\n NSApplication.shared.terminate(self)\n }\n \n \n // MARK: USB communication\n var currentState = ApplicationState.initializationState\n \n var serialPort: ORSSerialPort? {\n didSet {\n serialPort?.delegate = self;\n serialPort?.open()\n }\n }\n \n func setPortState(){\n let availablePorts = ORSSerialPortManager.shared().availablePorts\n currentState = .waitingForPortSelectionState(availablePorts)\n }\n \n // MARK: Port Settings\n func setupAndOpenPortWithSelectionString() -> Bool {\n let serialPort = ORSSerialPort(path: currentSelectedPort)\n print(\"Current port name: \", currentSelectedPort)\n self.serialPort = serialPort\n return true\n }\n \n // MARK: BaudRate Settings\n func setBaudRateOnPortWithString(_ selectionString: String) -> Bool {\n var selectionString = selectionString\n selectionString = selectionString.trimmingCharacters(in: CharacterSet.whitespacesAndNewlines)\n if let baudRate = Int(selectionString) {\n self.serialPort?.baudRate = NSNumber(value: baudRate)\n print(\"Baud rate set to \\(baudRate)\", terminator: \"\")\n return true\n } else {\n return false\n }\n }\n \n // MARK: Data Processing\n func handleUserInput(_ dataFromUser: Data) {\n \n if !setupAndOpenPortWithSelectionString() {\n print(\"\\nError: Invalid port selection.\")\n return\n }\n //case .waitingForBaudRateInputState:\n if !setBaudRateOnPortWithString(\"9600\") {\n print(\"\\nError: Invalid baud rate. Baud rate should consist only of numeric digits.\")\n return;\n }\n currentState = .waitingForUserInputState\n self.serialPort?.send(dataFromUser)\n }\n \n // ORSSerialPortDelegate\n func serialPort(_ serialPort: ORSSerialPort, didReceive data: Data) {\n if let string = NSString(data: data, encoding: String.Encoding.utf8.rawValue) {\n print(\"\\nReceived: \\\"\\(string)\\\" \\(data)\", terminator: \"\")\n }\n }\n \n func serialPortWasRemovedFromSystem(_ serialPort: ORSSerialPort) {\n self.serialPort = nil\n }\n \n func serialPort(_ serialPort: ORSSerialPort, didEncounterError error: Error) {\n print(\"Serial port (\\(serialPort)) encountered error: \\(error)\")\n }\n \n func serialPortWasOpened(_ serialPort: ORSSerialPort) {\n print(\"Serial port \\(serialPort) was opened\", terminator: \"\")\n currentState = .waitingForBaudRateInputState\n }\n \n func applicationWillTerminate(_ aNotification: Notification) {\n // Insert code here to tear down your application\n serialPort?.close()\n }\n}\n" }, { "alpha_fraction": 0.5763888955116272, "alphanum_fraction": 0.6597222089767456, "avg_line_length": 15, "blob_id": "d789c384f14974e27dee0877049eb29d2d6bdf2c", "content_id": "1684ab5c004bb63af101aa157de36dde5f92abfe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 145, "license_type": "permissive", "max_line_length": 46, "num_lines": 9, "path": "/mac_os_app/DroidStatus/DroidStatus/ContentView.swift", "repo_name": "ManolescuSebastian/Droid-Status", "src_encoding": "UTF-8", "text": "//\n// ContentView.swift\n// DroidStatus\n//\n// Created by MSI on 26/05/2020.\n// Copyright © 2020 MSI. All rights reserved.\n//\n\nimport SwiftUI\n" } ]
9
washreve/hyp3-rtc-gamma
https://github.com/washreve/hyp3-rtc-gamma
f8f1d51966a5cff1aae049cd2991b51a5afd13a3
522a53d3733029a1dabf5666d3bb30619e14fcc4
aa13ecded26194e5c13c57d9fb518e4981fc43d8
refs/heads/master
2023-04-09T13:59:21.212634
2020-08-01T00:17:11
2020-08-01T00:17:11
284,743,367
0
0
null
2020-08-03T15:57:47
2020-08-01T01:25:06
2020-08-01T01:25:05
null
[ { "alpha_fraction": 0.6298524737358093, "alphanum_fraction": 0.641598641872406, "avg_line_length": 31.31944465637207, "blob_id": "7badb7be1d528010a055b464374b84425c03c421", "content_id": "c773ba7617a7037109c2647bb18f3b6ac4a4dc89", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6981, "license_type": "permissive", "max_line_length": 101, "num_lines": 216, "path": "/hyp3_rtc_gamma/__main__.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "\"\"\"\nrtc_gamma processing for HyP3\n\"\"\"\nimport glob\nimport logging\nimport os\nimport re\nimport sys\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\nfrom mimetypes import guess_type\nfrom shutil import make_archive\n\nimport boto3\nfrom PIL import Image\nfrom hyp3lib import GranuleError\nfrom hyp3lib.fetch import download_file\nfrom hyp3proclib import (\n extra_arg_is,\n failure,\n get_extra_arg,\n success,\n upload_product,\n)\nfrom hyp3proclib.db import get_db_connection\nfrom hyp3proclib.file_system import cleanup_workdir\nfrom hyp3proclib.logger import log\nfrom hyp3proclib.proc_base import Processor\nfrom pkg_resources import load_entry_point\n\nimport hyp3_rtc_gamma\nfrom hyp3_rtc_gamma.rtc_sentinel import rtc_sentinel_gamma\n\n# v2 constants\nSENTINEL_DISTRIBUTION_URL = 'https://d2jcx4uuy4zbnt.cloudfront.net'\nEARTHDATA_LOGIN_DOMAIN = 'urs.earthdata.nasa.gov'\nS3_CLIENT = boto3.client('s3')\n\n\ndef entry():\n parser = ArgumentParser(prefix_chars='+', formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '++entrypoint', choices=['hyp3_rtc_gamma', 'hyp3_rtc_gamma_v2'], default='hyp3_rtc_gamma',\n help='Select the HyP3 entrypoint version to use'\n )\n args, unknowns = parser.parse_known_args()\n\n sys.argv = [args.entrypoint, *unknowns]\n sys.exit(\n load_entry_point('hyp3_rtc_gamma', 'console_scripts', args.entrypoint)()\n )\n\n\n# v2 functions\ndef write_netrc_file(username, password):\n netrc_file = os.path.join(os.environ['HOME'], '.netrc')\n if os.path.isfile(netrc_file):\n logging.warning(f'Using existing .netrc file: {netrc_file}')\n else:\n with open(netrc_file, 'w') as f:\n f.write(f'machine {EARTHDATA_LOGIN_DOMAIN} login {username} password {password}')\n\n\ndef get_content_type(filename):\n content_type = guess_type(filename)[0]\n if not content_type:\n content_type = 'application/octet-stream'\n return content_type\n\n\ndef upload_file_to_s3(path_to_file, bucket, prefix=''):\n key = os.path.join(prefix, os.path.basename(path_to_file))\n extra_args = {'ContentType': get_content_type(key)}\n\n logging.info(f'Uploading s3://{bucket}/{key}')\n S3_CLIENT.upload_file(path_to_file, bucket, key, extra_args)\n\n\ndef get_download_url(granule):\n mission = granule[0] + granule[2]\n product_type = granule[7:10]\n if product_type == 'GRD':\n product_type += '_' + granule[10] + granule[14]\n url = f'{SENTINEL_DISTRIBUTION_URL}/{product_type}/{mission}/{granule}.zip'\n return url\n\n\ndef create_thumbnail(input_image, size=(100, 100)):\n filename, ext = os.path.splitext(input_image)\n thumbnail_name = f'{filename}_thumb{ext}'\n\n output_image = Image.open(input_image)\n output_image.thumbnail(size)\n output_image.save(thumbnail_name)\n return thumbnail_name\n\n\ndef main_v2():\n parser = ArgumentParser()\n parser.add_argument('--username', required=True)\n parser.add_argument('--password', required=True)\n parser.add_argument('--bucket')\n parser.add_argument('--bucket-prefix', default='')\n parser.add_argument('granule')\n args = parser.parse_args()\n\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n\n write_netrc_file(args.username, args.password)\n\n granule_url = get_download_url(args.granule)\n granule_zip_file = download_file(granule_url, chunk_size=5242880)\n\n output_folder, product_name = rtc_sentinel_gamma(granule_zip_file)\n\n os.rename(output_folder, product_name)\n output_zip = make_archive(base_name=product_name, format='zip', base_dir=product_name)\n if args.bucket:\n upload_file_to_s3(output_zip, args.bucket, args.bucket_prefix)\n browse_images = glob.glob(f'{product_name}/*.png')\n for browse in browse_images:\n thumbnail = create_thumbnail(browse)\n upload_file_to_s3(browse, args.bucket, args.bucket_prefix + '/browse')\n upload_file_to_s3(thumbnail, args.bucket, args.bucket_prefix + '/thumbnail')\n# end v2 functions\n\n\ndef find_png(product_dir):\n pattern = os.path.join(product_dir, '*.png')\n png_files = glob.glob(pattern)\n\n for png_file in png_files:\n if 'rgb' in png_file:\n return png_file\n\n if png_files:\n return png_files[0]\n\n return None\n\n\ndef find_and_remove(directory, file_pattern):\n pattern = os.path.join(directory, file_pattern)\n for filename in glob.glob(pattern):\n logging.info(f'Removing {filename}')\n os.remove(filename)\n\n\ndef process_rtc_gamma(cfg, n):\n try:\n logging.info(f'Processing GAMMA RTC \"{cfg[\"sub_name\"]}\" for \"{cfg[\"username\"]}\"')\n\n granule = cfg['granule']\n if not re.match('S1[AB]_.._[SLC|GRD]', granule):\n raise GranuleError(f'Invalid granule, only S1 SLC and GRD data are supported: {granule}')\n\n res = get_extra_arg(cfg, 'resolution', '30m')\n if res not in ('10m', '30m'):\n raise ValueError(f'Invalid resolution, valid options are 10m or 30m: {res}')\n\n granule_url = get_download_url(granule)\n granule_zip_file = download_file(granule_url, chunk_size=5242880)\n\n args = {\n 'in_file': granule_zip_file,\n 'res': float(res.rstrip('m')),\n 'match_flag': extra_arg_is(cfg, 'matching', 'yes'),\n 'pwr_flag': extra_arg_is(cfg, 'power', 'yes'),\n 'gamma_flag': extra_arg_is(cfg, 'gamma0', 'yes'),\n 'lo_flag': res == '30m',\n 'filter_flag': extra_arg_is(cfg, 'filter', 'yes'),\n }\n product_dir, product_name = rtc_sentinel_gamma(**args)\n\n logging.info(f'Renaming {product_dir} to {product_name}')\n os.rename(product_dir, product_name)\n product_dir = product_name\n\n if extra_arg_is(cfg, 'include_dem', 'no'):\n find_and_remove(product_dir, '*_dem.tif*')\n if extra_arg_is(cfg, 'include_inc', 'no'):\n find_and_remove(product_dir, '*_inc_map.tif*')\n\n zip_file = make_archive(base_name=product_dir, format='zip', base_dir=product_dir)\n cfg['final_product_size'] = [os.stat(zip_file).st_size, ]\n cfg['attachment'] = find_png(product_dir)\n cfg['email_text'] = ' '\n\n with get_db_connection('hyp3-db') as conn:\n upload_product(zip_file, cfg, conn)\n success(conn, cfg)\n\n except Exception as e:\n logging.exception('Processing failed')\n logging.info('Notifying user')\n failure(cfg, str(e))\n\n cleanup_workdir(cfg)\n\n\ndef main():\n \"\"\"\n Main entrypoint for hyp3_rtc_gamma\n \"\"\"\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n log.propagate = False\n\n processor = Processor(\n 'rtc_gamma', process_rtc_gamma, sci_version=hyp3_rtc_gamma.__version__\n )\n processor.run()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6553191542625427, "alphanum_fraction": 0.6553191542625427, "avg_line_length": 17.076923370361328, "blob_id": "12f892a4b8c0115a284e50ad09cdff56ac1b7259", "content_id": "f60a6da0f48acb93c75521ad9fa74ef3cd63b7de", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "permissive", "max_line_length": 63, "num_lines": 13, "path": "/tests/conftest.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "import os\nimport shutil\n\nimport pytest\n\n_HERE = os.path.dirname(__file__)\n\n\[email protected]()\ndef image(tmp_path):\n image = str(tmp_path / 'test.png')\n shutil.copy(os.path.join(_HERE, 'data', 'test.png'), image)\n return image\n" }, { "alpha_fraction": 0.5215520262718201, "alphanum_fraction": 0.532246470451355, "avg_line_length": 49.77814483642578, "blob_id": "97b54fd361c6ef3e3feb2794fff9ad756786b19d", "content_id": "a27c3ede06d0169c51b373bcdbd3326f03d9f7b9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15335, "license_type": "permissive", "max_line_length": 116, "num_lines": 302, "path": "/hyp3_rtc_gamma/metadata_utils.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "def meta_init():\n return {\n 'meta_version': ('3.6', ''), 'general.name': ('???', 'File name'),\n 'general.sensor': ('???', 'Imaging satellite'), 'general.sensor_name': ('???', 'Imaging sensor'),\n 'general.mode': ('???', 'Imaging mode'), 'general.receiving_station': ('???', 'Downlinking ground station'),\n 'general.processor': ('???', 'Name and Version of Processor'),\n 'general.data_type': ('???', 'Type of samples (e.g. REAL64)'),\n 'general.image_data_type': ('???', 'Image data type (e.g. AMPLITUDE_IMAGE'),\n 'general.radiometry': ('AMPLITUDE', 'Radiometry (e.g. SIGMA)'),\n 'general.acquisition_date': ('???', 'Acquisition date of the data'),\n 'general.orbit': ('-999999999', 'Orbit Number for this datatake'),\n 'general.orbit_direction': ('?', \"Ascending 'A', or descending 'D'\"),\n 'general.frame': ('-999999999', 'Frame for this image [-1 if n/a]'),\n 'general.band_count': ('1', 'Number of bands in image'), 'general.bands': ('???', 'Band of the sensor'),\n 'general.line_count': ('-999999999', 'Number of lines in image'),\n 'general.sample_count': ('-999999999', 'Number of samples in image'),\n 'general.start_line': ('-999999999', 'First line relative to original image'),\n 'general.start_sample': ('-999999999', 'First sample relative to original image'),\n 'general.x_pixel_size': ('NaN', 'Range pixel size [m]'),\n 'general.y_pixel_size': ('NaN', 'Azimuth pixel size [m]'),\n 'general.center_latitude': ('NaN', 'Approximate image center latitude'),\n 'general.center_longitude': ('NaN', 'Approximate image center longitude'),\n 'general.re_major': ('NaN', 'Major (equator) Axis of earth [m]'),\n 'general.re_minor': ('NaN', 'Minor (polar) Axis of earth [m]'),\n 'general.bit_error_rate': ('NaN', 'Fraction of bits which are in error'),\n 'general.missing_lines': ('-999999999', 'Number of missing lines in data take'),\n 'general.no_data': ('NaN', 'Value indicating no data for a pixel')\n }\n\n\ndef meta_init_sar(m):\n m['sar.polarization'] = ('???', 'Signal polarization')\n m['sar.image_type'] = \\\n ('?', '[S=slant range; G=ground range; P=map projected; R=georeferenced]')\n m['sar.look_direction'] = \\\n ('?', 'SAR Satellite look direction [R=right; L=left]')\n m['sar.azimuth_look_count'] = \\\n ('-999999999', 'Number of looks in azimuth direction')\n m['sar.range_look_count'] = \\\n ('-999999999', 'Number of looks in range direction')\n m['sar.multilook'] = ('-999999999', 'Image multilooked? [1=yes; 0=no]')\n m['sar.deskewed'] = \\\n ('-999999999', 'Image moved to zero doppler? [1=yes; 0=no]')\n m['sar.original_line_count'] = \\\n ('-999999999', 'Number of lines in original image')\n m['sar.original_sample_count'] = \\\n ('-999999999', 'Number of samples in original image')\n m['sar.line_increment'] = ('1', 'Line increment for sampling')\n m['sar.sample_increment'] = ('1', 'Sample increment for sampling')\n m['sar.range_time_per_pixel'] = ('NaN', 'Time per pixel in range [s]')\n m['sar.azimuth_time_per_pixel'] = ('NaN', 'Time per pixel in azimuth [s]')\n m['sar.slant_range_first_pixel'] = ('NaN', 'Slant range to first pixel [m]')\n m['sar.slant_shift'] = ('0', 'Error correction factor, in slant range [m]')\n m['sar.time_shift'] = ('0', 'Error correction factor, in time [s]')\n m['sar.wavelength'] = ('NaN', 'SAR carrier wavelength [m]')\n m['sar.prf'] = ('NaN', 'Pulse Repetition Frequency [Hz]')\n m['sar.earth_radius'] = ('NaN', 'Earth radius at scene center [m]')\n m['sar.earth_radius_pp'] = \\\n ('NaN', 'Earth radius used by the PP during L0 processsing. [m]')\n m['sar.satellite_height'] = \\\n ('NaN', \"Satellite height from earth's center [m]\")\n m['sar.satellite_binary_time'] = ('???', 'Satellite Binary Time')\n m['sar.satellite_clock_time'] = ('???', 'Satellite Clock Time (UTC)')\n m['sar.dopRangeCen'] = ('NaN', 'Range doppler centroid [Hz]')\n m['sar.dopRangeLin'] = ('NaN', 'Range doppler per range pixel [Hz/pixel]')\n m['sar.dopRangeQuad'] = \\\n ('NaN', 'Range doppler per range pixel sq. [Hz/(pixel^2)]')\n m['sar.dopAzCen'] = ('NaN', 'Azimuth doppler centroid [Hz]')\n m['sar.dopAzLin'] = ('NaN', 'Azimuth doppler per azimuth pixel [Hz/pixel]')\n m['sar.dopAzQuad'] = \\\n ('NaN', 'Azimuth doppler per azimuth pixel sq. [Hz/(pixel^2)]')\n m['sar.pitch'] = ('NaN', 'Platform pitch [degrees]')\n m['sar.roll'] = ('NaN', 'Platform roll [degrees]')\n m['sar.yaw'] = ('NaN', 'Platform yaw [degrees]')\n m['sar.azimuth_bandwidth'] = ('NaN', 'Azimuth processing bandwidth [Hz]')\n m['sar.chirp_rate'] = ('NaN', 'Chirp rate [Hz/sec]')\n m['sar.pulse_duration'] = ('NaN', 'Pulse duration [s]')\n m['sar.range_samp_rate'] = ('NaN', 'Range sampling rate [Hz]')\n m['sar.incid_a(0)'] = ('NaN', 'Incidence angle transformation parameter')\n m['sar.incid_a(1)'] = ('NaN', 'Incidence angle transformation parameter')\n m['sar.incid_a(2)'] = ('NaN', 'Incidence angle transformation parameter')\n m['sar.incid_a(3)'] = ('NaN', 'Incidence angle transformation parameter')\n m['sar.incid_a(4)'] = ('NaN', 'Incidence angle transformation parameter')\n m['sar.incid_a(5)'] = ('NaN', 'Incidence angle transformation parameter')\n\n return m\n\n\ndef meta_init_location(m):\n # Location block\n m['location.lat_start_near_range'] = \\\n ('NaN', 'Latitude at image start in near range')\n m['location.lon_start_near_range'] = \\\n ('NaN', 'Longitude at image start in near range')\n m['location.lat_start_far_range'] = \\\n ('NaN', 'Latitude at image start in far range')\n m['location.lon_start_far_range'] = \\\n ('NaN', 'Longitude at image start in far range')\n m['location.lat_end_near_range'] = \\\n ('NaN', 'Latitude at image end in near range')\n m['location.lon_end_near_range'] = \\\n ('NaN', 'Longitude at image end in near range')\n m['location.lat_end_far_range'] = \\\n ('NaN', 'Latitude at image end in far range')\n m['location.lon_end_far_range'] = \\\n ('NaN', 'Longitude at image end in far range')\n\n return m\n\n\ndef get_meta_sections(m):\n key_names = (\n 'general', 'sar', 'optical', 'thermal', 'projection',\n 'transform', 'airsar', 'uavsar', 'statistics',\n 'state', 'location', 'calibration', 'colormap',\n 'doppler', 'insar', 'dem', 'latlon', 'quality',\n )\n\n s = {}\n for key_name in key_names:\n if key_name in m.keys():\n s[key_name] = True\n else:\n s[key_name] = False\n\n return s\n\n\ndef writeStr(out_file, m, key):\n value = m[key][0]\n comment = m[key][1]\n line = ''\n name = key.split('.')\n for i in range(1, len(name)):\n line += ' '\n line += name[len(name) - 1] + ': ' + value\n while len(line) < 42 + (len(name) - 1) * 4:\n line += ' '\n line += ' # ' + comment + '\\n'\n out_file.write(line)\n\n\ndef write_asf_meta(m, meta_file):\n s = get_meta_sections(m)\n\n # Write header comments\n with open(meta_file, 'w') as outF:\n outF.write('# This file contains the metadata for satellite capture file of the same base name.\\n')\n outF.write(\"# '?' is likely an unknown single character value.\\n\")\n outF.write(\"# '???' is likely an unknown string of characters.\\n\")\n outF.write(\"# '-999999999' is likely an unknown integer value.\\n\")\n outF.write(\"# 'nan' is likely an unknown Real value.\\n\\n\")\n outF.write('meta_version: ' + m['meta_version'][0] + '\\n\\n')\n\n # General block\n outF.write('general { '\n '# Begin parameters generally used in remote sensing\\n')\n writeStr(outF, m, 'general.name')\n writeStr(outF, m, 'general.sensor')\n writeStr(outF, m, 'general.sensor_name')\n writeStr(outF, m, 'general.mode')\n writeStr(outF, m, 'general.receiving_station')\n writeStr(outF, m, 'general.processor')\n writeStr(outF, m, 'general.data_type')\n writeStr(outF, m, 'general.image_data_type')\n writeStr(outF, m, 'general.radiometry')\n writeStr(outF, m, 'general.acquisition_date')\n writeStr(outF, m, 'general.orbit')\n writeStr(outF, m, 'general.orbit_direction')\n writeStr(outF, m, 'general.frame')\n writeStr(outF, m, 'general.band_count')\n writeStr(outF, m, 'general.bands')\n writeStr(outF, m, 'general.line_count')\n writeStr(outF, m, 'general.sample_count')\n writeStr(outF, m, 'general.start_line')\n writeStr(outF, m, 'general.start_sample')\n writeStr(outF, m, 'general.x_pixel_size')\n writeStr(outF, m, 'general.y_pixel_size')\n writeStr(outF, m, 'general.center_latitude')\n writeStr(outF, m, 'general.center_longitude')\n writeStr(outF, m, 'general.re_major')\n writeStr(outF, m, 'general.re_minor')\n writeStr(outF, m, 'general.bit_error_rate')\n writeStr(outF, m, 'general.missing_lines')\n writeStr(outF, m, 'general.no_data')\n outF.write('} '\n '# End general\\n\\n')\n\n # SAR block\n if s['sar']:\n outF.write('sar { '\n '# Begin parameters used specifically in SAR imaging\\n')\n writeStr(outF, m, 'sar.polarization')\n writeStr(outF, m, 'sar.image_type')\n writeStr(outF, m, 'sar.look_direction')\n writeStr(outF, m, 'sar.azimuth_look_count')\n writeStr(outF, m, 'sar.range_look_count')\n writeStr(outF, m, 'sar.multilook')\n writeStr(outF, m, 'sar.deskewed')\n writeStr(outF, m, 'sar.original_line_count')\n writeStr(outF, m, 'sar.original_sample_count')\n writeStr(outF, m, 'sar.line_increment')\n writeStr(outF, m, 'sar.sample_increment')\n writeStr(outF, m, 'sar.range_time_per_pixel')\n writeStr(outF, m, 'sar.azimuth_time_per_pixel')\n writeStr(outF, m, 'sar.slant_range_first_pixel')\n writeStr(outF, m, 'sar.slant_shift')\n writeStr(outF, m, 'sar.time_shift')\n writeStr(outF, m, 'sar.wavelength')\n writeStr(outF, m, 'sar.prf')\n writeStr(outF, m, 'sar.earth_radius')\n writeStr(outF, m, 'sar.earth_radius_pp')\n writeStr(outF, m, 'sar.satellite_height')\n writeStr(outF, m, 'sar.satellite_binary_time')\n writeStr(outF, m, 'sar.satellite_clock_time')\n writeStr(outF, m, 'sar.dopRangeCen')\n writeStr(outF, m, 'sar.dopRangeLin')\n writeStr(outF, m, 'sar.dopRangeQuad')\n writeStr(outF, m, 'sar.dopAzCen')\n writeStr(outF, m, 'sar.dopAzLin')\n writeStr(outF, m, 'sar.dopAzQuad')\n writeStr(outF, m, 'sar.pitch')\n writeStr(outF, m, 'sar.roll')\n writeStr(outF, m, 'sar.yaw')\n writeStr(outF, m, 'sar.azimuth_bandwidth')\n writeStr(outF, m, 'sar.chirp_rate')\n writeStr(outF, m, 'sar.pulse_duration')\n writeStr(outF, m, 'sar.range_samp_rate')\n writeStr(outF, m, 'sar.incid_a(0)')\n writeStr(outF, m, 'sar.incid_a(1)')\n writeStr(outF, m, 'sar.incid_a(2)')\n writeStr(outF, m, 'sar.incid_a(3)')\n writeStr(outF, m, 'sar.incid_a(4)')\n writeStr(outF, m, 'sar.incid_a(5)')\n outF.write('} '\n '# End sar\\n\\n')\n\n # Projection block\n if s['projection']:\n outF.write('projection { '\n '# Map Projection parameters\\n')\n writeStr(outF, m, 'projection.type')\n writeStr(outF, m, 'projection.startX')\n writeStr(outF, m, 'projection.startY')\n writeStr(outF, m, 'projection.perX')\n writeStr(outF, m, 'projection.perY')\n writeStr(outF, m, 'projection.units')\n writeStr(outF, m, 'projection.hem')\n writeStr(outF, m, 'projection.spheroid')\n writeStr(outF, m, 'projection.re_major')\n writeStr(outF, m, 'projection.re_minor')\n writeStr(outF, m, 'projection.datum')\n writeStr(outF, m, 'projection.height')\n if 'projection.param.utm.zone' in m.keys():\n outF.write(' param { '\n '# Projection specific parameters\\n')\n outF.write(\n ' utm { '\n '# Begin Universal Transverse Mercator Projection\\n'\n )\n writeStr(outF, m, 'projection.param.utm.zone')\n writeStr(outF, m, 'projection.param.utm.false_easting')\n writeStr(outF, m, 'projection.param.utm.false_northing')\n writeStr(outF, m, 'projection.param.utm.latitude')\n writeStr(outF, m, 'projection.param.utm.longitude')\n writeStr(outF, m, 'projection.param.utm.scale_factor')\n outF.write(' } '\n '# End utm\\n')\n outF.write(' } '\n '# End param\\n\\n')\n elif 'projection.param.ps.slat' in m.keys():\n outF.write(' param { '\n '# Projection specific parameters\\n')\n outF.write(' ps { '\n '# Begin Polar Stereographic Projection\\n')\n writeStr(outF, m, 'projection.param.ps.slat')\n writeStr(outF, m, 'projection.param.ps.slon')\n writeStr(outF, m, 'projection.param.ps.false_easting')\n writeStr(outF, m, 'projection.param.ps.false_northing')\n outF.write(' } '\n '# End ps\\n')\n outF.write(' } '\n '# End param\\n\\n')\n outF.write('} '\n '# End projection\\n')\n\n # Location block\n if s['location']:\n outF.write('location { '\n '# Block containing image corner coordinates\\n')\n writeStr(outF, m, 'location.lat_start_near_range')\n writeStr(outF, m, 'location.lon_start_near_range')\n writeStr(outF, m, 'location.lat_start_far_range')\n writeStr(outF, m, 'location.lon_start_far_range')\n writeStr(outF, m, 'location.lat_end_near_range')\n writeStr(outF, m, 'location.lon_end_near_range')\n writeStr(outF, m, 'location.lat_end_far_range')\n writeStr(outF, m, 'location.lon_end_far_range')\n outF.write('} '\n '# End location\\n\\n')\n\n outF.close()\n" }, { "alpha_fraction": 0.6215522885322571, "alphanum_fraction": 0.6658114194869995, "avg_line_length": 32.52688217163086, "blob_id": "a6243ae2b5d3f4adcd77be566e41eba177382875", "content_id": "d1266e08bf794e78507fe8e1470d47f4b7bb6177", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3118, "license_type": "permissive", "max_line_length": 102, "num_lines": 93, "path": "/tests/test_v2.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "import os\n\nimport pytest\nfrom PIL import Image\nfrom botocore.stub import ANY, Stubber\n\nfrom hyp3_rtc_gamma import __main__ as main\n\n\[email protected](autouse=True)\ndef s3_stubber():\n with Stubber(main.S3_CLIENT) as stubber:\n yield stubber\n stubber.assert_no_pending_responses()\n\n\ndef test_get_content_type():\n assert main.get_content_type('foo') == 'application/octet-stream'\n assert main.get_content_type('foo.asfd') == 'application/octet-stream'\n assert main.get_content_type('foo.txt') == 'text/plain'\n assert main.get_content_type('foo.zip') == 'application/zip'\n assert main.get_content_type('foo/bar.png') == 'image/png'\n\n\ndef test_get_download_url():\n granule = 'S1A_IW_GRDH_1SDV_20200611T090849_20200611T090914_032967_03D196_D46C'\n url = main.get_download_url(granule)\n assert url == f'https://d2jcx4uuy4zbnt.cloudfront.net/GRD_HD/SA/{granule}.zip'\n\n granule = 'S1B_IW_SLC__1SDV_20200611T071252_20200611T071322_021982_029B8F_B023'\n url = main.get_download_url(granule)\n assert url == f'https://d2jcx4uuy4zbnt.cloudfront.net/SLC/SB/{granule}.zip'\n\n\ndef test_write_netrc_file(tmp_path):\n os.environ['HOME'] = str(tmp_path)\n output_file = os.path.join(tmp_path, '.netrc')\n\n main.write_netrc_file('foo', 'bar')\n assert os.path.isfile(output_file)\n with open(output_file, 'r') as f:\n assert f.read() == 'machine urs.earthdata.nasa.gov login foo password bar'\n\n main.write_netrc_file('already_there', 'this call should do nothing')\n with open(output_file, 'r') as f:\n assert f.read() == 'machine urs.earthdata.nasa.gov login foo password bar'\n\n\ndef test_upload_file_to_s3(tmp_path, s3_stubber):\n expected_params = {\n 'Body': ANY,\n 'Bucket': 'myBucket',\n 'Key': 'myFile.zip',\n 'ContentType': 'application/zip',\n }\n s3_stubber.add_response(method='put_object', expected_params=expected_params, service_response={})\n\n file_to_upload = tmp_path / 'myFile.zip'\n file_to_upload.touch()\n main.upload_file_to_s3(str(file_to_upload), 'myBucket')\n\n\ndef test_upload_file_to_s3_with_prefix(tmp_path, s3_stubber):\n expected_params = {\n 'Body': ANY,\n 'Bucket': 'myBucket',\n 'Key': 'myPrefix/myFile.txt',\n 'ContentType': 'text/plain',\n }\n s3_stubber.add_response(method='put_object', expected_params=expected_params, service_response={})\n\n file_to_upload = tmp_path / 'myFile.txt'\n file_to_upload.touch()\n main.upload_file_to_s3(str(file_to_upload), 'myBucket', 'myPrefix')\n\n\ndef test_create_thumbnail(image):\n with Image.open(image) as input_image:\n assert input_image.size == (162, 150)\n\n thumbnail = main.create_thumbnail(image, (100, 100))\n assert os.path.basename(thumbnail) == 'test_thumb.png'\n\n with Image.open(image) as input_image:\n assert input_image.size == (162, 150)\n\n with Image.open(thumbnail) as output_image:\n assert output_image.size == (100, 93)\n\n thumbnail = main.create_thumbnail(image, (255, 255))\n\n with Image.open(thumbnail) as output_image:\n assert output_image.size == (162, 150)\n" }, { "alpha_fraction": 0.5657327771186829, "alphanum_fraction": 0.5768678188323975, "avg_line_length": 31.372093200683594, "blob_id": "5bcda9aac69a203c9e92d7e404d9913a068a2a29", "content_id": "7862469e406b5c8d218276aca28a6e9b3532887e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2784, "license_type": "permissive", "max_line_length": 99, "num_lines": 86, "path": "/hyp3_rtc_gamma/smoothem.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "import argparse\nimport glob\nimport logging\nimport os\nimport sys\n\nfrom hyp3lib import saa_func_lib as saa\nfrom hyp3lib.ps2dem import ps2dem\nfrom osgeo import gdal\n\n\ndef smooth_dem_tiles(demdir, build=True):\n os.chdir(demdir)\n\n for mytif in glob.glob(\"*_8m_dem.tif\"):\n newtif = mytif.replace(\".tif\", \"_30m.tif\")\n print(\"creating file {}\".format(newtif))\n gdal.Translate(newtif, mytif, xRes=30, yRes=30)\n\n for mytif in glob.glob(\"*_8m_dem_30m.tif\"):\n\n new_name = mytif.replace(\".tif\", \"_smooth.tif\")\n print(\"new_name {}\".format(new_name))\n if not os.path.isfile(new_name):\n\n print(\"Cleaning up DEM {}\".format(mytif))\n\n src_ds = gdal.Open(mytif)\n (x1, y1, trans, proj, data) = saa.read_gdal_file(src_ds)\n if src_ds is None:\n print('Unable to open %s' % mytif)\n sys.exit(1)\n\n srcband = src_ds.GetRasterBand(1)\n no_data = srcband.GetNoDataValue()\n\n print(\"noData value is {}\".format(no_data))\n\n dem = mytif.replace(\".tif\", \".dem\")\n par = dem + \".par\"\n ps2dem(mytif, dem, par)\n\n tmp_name = mytif.replace(\".tif\", \"_tmp.dem\")\n cmd = \"fill_gaps {in1} {width} {out} - - 1 100\".format(in1=dem, width=x1, out=tmp_name)\n os.system(cmd)\n\n cmd = \"data2geotiff {par} {in1} 2 {out}\".format(par=par, in1=tmp_name, out=new_name)\n os.system(cmd)\n\n print(\"removing {} {} {}\".format(dem, par, tmp_name))\n os.remove(dem)\n os.remove(par)\n os.remove(tmp_name)\n\n if build:\n cmd = \"gdalbuildvrt full_area.vrt *_smooth.tif\"\n os.system(cmd)\n\n cmd = \"gdal_translate full_area.vrt full_area.tif\"\n os.system(cmd)\n\n cmd = \"ps2dem.py full_area.tif full_area.dem full_area.dem.par\"\n os.system(cmd)\n\n logging.info(\"Finished creating output\")\n return \"full_area.dem\", \"full_area.dem.par\"\n\n\ndef main():\n parser = argparse.ArgumentParser(prog=\"smooth_dem_tiles.py\",\n description=\"Smooth REMA DEM tiles using fill_gaps\")\n parser.add_argument(\"dir\", help=\"Directory containing REMA DEMs to smooth\")\n parser.add_argument(\"-n\", help=\"Don't create full_area.dem output\", action=\"store_false\")\n args = parser.parse_args()\n\n logFile = \"smooth_dem_tiles_{}.log\".format(os.getpid())\n logging.basicConfig(filename=logFile, format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)\n logging.getLogger().addHandler(logging.StreamHandler())\n logging.info(\"Starting run\")\n\n smooth_dem_tiles(args.dir, build=args.n)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7074353694915771, "alphanum_fraction": 0.7455100417137146, "avg_line_length": 52.53845977783203, "blob_id": "eeafe8a370defd32c7b10cfdc10669ef95434627", "content_id": "9744805257398f88fef0d7e7611cb70b2e44e2c7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5568, "license_type": "permissive", "max_line_length": 199, "num_lines": 104, "path": "/CHANGELOG.md", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [PEP 440](https://www.python.org/dev/peps/pep-0440/)\nand uses [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [2.1.1](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.1.0...v2.1.1)\n\n### Changed\n* Input scenes are now downloaded directly from NGAP distribution endpoint rather than ASF's datapool \n* Updates the output product README to include usage guidelines with acknowledgment and citation information\n* Removes the `ESA_citation.txt` file from the output product as it's included in the README\n* Drops the \"This is an RTc product from ...\" blurb from the bottom of product notification emails\n* Incompatible granules will fail much earlier in the processing chain\n\n### Removed\n* Non-functional support for non-Sentinel-1 products\n\n## [2.1.0](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.6...v2.1.0)\n\n### Changed\n* Implemented new naming convention for output products. The names of output zips, folders, and files now all share a\n common format.\n* Revised README content:\n * Updated description of product naming scheme\n * Removed references to EUDEM, GEMP, and REMA DEMs that are no longer used since v2.0.0.\n * Clarified which RTC processing steps are performed when DEM matching is or is not requested.\n* Orbit files are now downloaded once at the start of processing, rather than once for each polarization image.\n* Orbit search priority is now POEORB from ESA, POEORB from ASF, RESORB from ESA, RESORB from ASF.\n* `main_v2()` now downloads Sentinel-1 data directly from ASF's NGAP distribution endpoint, rather than datapool.\n* Upgrade to [hyp3-lib v1.4.1](https://github.com/ASFHyP3/hyp3-lib/blob/develop/CHANGELOG.md#141) from 1.3.0.\n* Install hyp3-lib via conda instead of pip\n\n## [2.0.6](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.5...v2.0.6)\n\n### Fixed\n* Resolved issue where thumbnail filenames ended in ..png rather than .png\n\n## [2.0.5](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.4...v2.0.5)\n\n### Added\n* The v2 entrypoint will now create and upload thumbnail images to S3 in addition to browse images\n\n## [2.0.4](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.3...v2.0.4)\n\n### Changed\n* The v2 entrypoint will now upload browse images and thumnail images in addition to the zip file.\n* Upgrade to [hyp3-lib 1.3.0](https://github.com/ASFHyP3/hyp3-lib/blob/develop/CHANGELOG.md#130). In particular, geotiff products no longer include overviews.\n* Eliminated seprate \"low-res\" (1024x) and \"high-res\" (2048x) browse image resolutions in favor of a single 2048x image.\n\n## [2.0.3](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.2...v2.0.3)\n\n### Fixed\n* Updates the minimum required `hyp3lib` and `hyp3proclib` version to bring in the\n [`get_dem.py` NoData bugfix](https://github.com/ASFHyP3/hyp3-lib/pull/175) and\n the [`default_rtc_resolution` bugfix](https://github.com/asfadmin/hyp3-proc-lib/pull/4),\n respectively\n\n## [2.0.2](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.1...v2.0.2)\n\n### Changed\n* The v2 entrypoint will now make up to three retry attempts if it fails to download the input granule from the ASF archive.\n* Changed the name of the product README file to `<product_name>.README.txt`, e.g. `S1A_IW_RT30_20170708T161200_G_gpn.README.txt`\n* Calls to mk_geo_radcal will no longer include the `-j do not use layover-shadow map in the calculation of pixel_area` flag. The layover-shadow map will now be consistently applied to all products.\n* Upgraded to [hyp3-lib v1.2.2](https://github.com/ASFHyP3/hyp3-lib/blob/develop/CHANGELOG.md#v122)\n* Removed custom blank_bad_data.py from mk_geo_radcal processing. Border pixels for older GRD products are now cleaned using the default `make_edge` setting of `par_S1_GRD`.\n\n## [2.0.1](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v2.0.0...v2.0.1)\n\n### Changed\n* hyp3-v2 output products are now packaged in a .zip file similar to hyp3-v1 products\n\n## [2.0.0](https://github.com/ASFHyP3/hyp3-rtc-gamma/compare/v1.3.1...v2.0.0)\n\nThis is a significant refactor of `hyp3-rtc-gamma` into:\n* A `pip` installable package called `hyp3_rtc_gamma`\n* A stand alone, container-based HyP3 plugin\n\n**NOTE:** There are significant changes to the overall repository structure and\nwill break all existing HyP3 workflows!\n\n### Removed\n* Python 2. This package now requires Python 3.6+\n* A patched version of GAMMA's `mk_geo_radcal` and an associated `blank_bad_data.py` script\n\n### Added\n* A packaging and testing structure -- now pip installable and testing is done via pytest\n * Previous command line scripts are now registered entrypoints and created when the package is `pip` installed:\n * `check_coreg.py`\n * `rtc_sentinel.py`\n * `smooth_dem_tiles.py`\n * 'xml2meta.py'\n* A Dockerfile to build the HyP3 plugin\n* A CI/CD workflow setup, which will build and publish the docker container\n* The processing script that used to live in the now depreciated `cloud-proj` repository has been moved into the\npackage as `hyp3_rtc_gamma.__main__` and also registered as a `hyp3_rtc_gamma` entrypoint\n* A second, in development, entrypoint for HyP3 v2 has been added to `hyp3_rtc_gamma.__main__`\n\n### Changed\n* All of `src/` is now contained in the `hyp3_rtc_gamma` package\n* All of `etc/`is now contained in `hyp3_rtc_gamma.etc`\n* The version number is now tracked automatically via git tags instead of in `etc/version.txt`\n" }, { "alpha_fraction": 0.5711439251899719, "alphanum_fraction": 0.6032968163490295, "avg_line_length": 42.468502044677734, "blob_id": "d115da55451df2f79fb3c1f665689547e0403f5c", "content_id": "345382ee46e88e5c4f74fd68223c01b7500abd15", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11041, "license_type": "permissive", "max_line_length": 98, "num_lines": 254, "path": "/hyp3_rtc_gamma/xml2meta.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "\"\"\"Converts an XML metadata file into an ASF metadata file\"\"\"\n\nimport argparse\nimport logging\nimport os\n\nimport lxml.etree as et\nimport scipy.constants as sc\nfrom osgeo import ogr\n\nfrom hyp3_rtc_gamma import metadata_utils\n\nlog = logging.getLogger()\n\n\ndef sentinel2meta(xml_file):\n m = metadata_utils.meta_init()\n m = metadata_utils.meta_init_sar(m)\n m = metadata_utils.meta_init_location(m)\n parser = et.XMLParser(remove_blank_text=True)\n meta = et.parse(xml_file, parser)\n\n # Determine location and centroid\n ring = ogr.Geometry(ogr.wkbLinearRing)\n poly = ogr.Geometry(ogr.wkbPolygon)\n bounds = meta.xpath('//boundary')\n point = None\n for bound in bounds:\n lat = bound.xpath('polygon/point[@id=\"1\"]/lat')[0].text\n m['location.lat_start_near_range'] = \\\n (lat, m['location.lat_start_near_range'][1])\n lon = bound.xpath('polygon/point[@id=\"1\"]/lon')[0].text\n m['location.lon_start_near_range'] = \\\n (lon, m['location.lon_start_near_range'][1])\n ring.AddPoint(float(lat), float(lon))\n\n lat = bound.xpath('polygon/point[@id=\"2\"]/lat')[0].text\n m['location.lat_start_far_range'] = \\\n (lat, m['location.lat_start_far_range'][1])\n lon = bound.xpath('polygon/point[@id=\"2\"]/lon')[0].text\n m['location.lon_start_far_range'] = \\\n (lon, m['location.lon_start_far_range'][1])\n ring.AddPoint(float(lat), float(lon))\n\n lat = bound.xpath('polygon/point[@id=\"3\"]/lat')[0].text\n m['location.lat_end_far_range'] = \\\n (lat, m['location.lat_end_near_range'][1])\n lon = bound.xpath('polygon/point[@id=\"3\"]/lon')[0].text\n m['location.lon_end_far_range'] = \\\n (lon, m['location.lon_end_near_range'][1])\n ring.AddPoint(float(lat), float(lon))\n\n lat = bound.xpath('polygon/point[@id=\"4\"]/lat')[0].text\n m['location.lat_end_near_range'] = \\\n (lat, m['location.lat_end_far_range'][1])\n lon = bound.xpath('polygon/point[@id=\"4\"]/lon')[0].text\n m['location.lon_end_near_range'] = \\\n (lon, m['location.lon_end_far_range'][1])\n ring.AddPoint(float(lat), float(lon))\n\n ring.CloseRings()\n poly.AddGeometry(ring)\n centroid = poly.Centroid()\n point = centroid.GetPoint(0)\n\n # Figure out which product type we deal with\n product_type = meta.xpath('/sentinel/metadata/image/product_type')[0].text\n\n # General block\n name = meta.xpath('/sentinel/metadata/image/file')[0].text\n m['general.name'] = (name, m['general.name'][1])\n sensor = meta.xpath('/sentinel/metadata/image/platform')[0].text\n m['general.sensor'] = (sensor, m['general.sensor'][1])\n sensor_name = meta.xpath('/sentinel/metadata/image/sensor')[0].text\n m['general.sensor_name'] = (sensor_name, m['general.sensor_name'][1])\n mode = meta.xpath('/sentinel/metadata/image/beam_mode')[0].text\n m['general.mode'] = (mode, m['general.mode'][1])\n processor = meta.xpath('/sentinel/processing/software')[0].text\n version = meta.xpath('/sentinel/processing/software_version')[0].text\n m['general.processor'] = \\\n (processor + ' (' + version + ')', m['general.processor'][1])\n m['general.data_type'] = ('REAL32', m['general.data_type'][1])\n m['general.image_data_type'] = \\\n ('AMPLITUDE_IMAGE', m['general.image_data_type'][1])\n m['general.radiometry'] = ('AMPLITUDE', m['general.radiometry'][1])\n acquisition_date = meta.xpath('/sentinel/extent/start_datetime')[0].text\n m['general.acquisition_date'] = \\\n (acquisition_date, m['general.acquisition_date'][1])\n orbit = meta.xpath('/sentinel/metadata/image/absolute_orbit')[0].text\n m['general.orbit]'] = (orbit, m['general.orbit'][1])\n orbit_direction = \\\n meta.xpath('/sentinel/metadata/image/flight_direction')[0].text\n if orbit_direction == 'ASCENDING':\n m['general.orbit_direction'] = ('A', m['general.orbit_direction'][1])\n elif orbit_direction == 'DESCENDING':\n m['general.orbit_direction'] = ('D', m['general.orbit_direction'][1])\n polarization = meta.xpath('/sentinel/metadata/image/polarization')[0].text\n if polarization == 'SH' or polarization == 'HH':\n m['general.band_count'] = ('1', m['general.band_count'][1])\n m['general.bands'] = ('HH', m['general.bands'][1])\n elif polarization == 'HV':\n m['general.band_count'] = ('1', m['general.band_count'][1])\n m['general.bands'] = ('HV', m['general.bands'][1])\n elif polarization == 'VH':\n m['general.band_count'] = ('1', m['general.band_count'][1])\n m['general.bands'] = ('VH', m['general.bands'][1])\n elif polarization == 'SV' or polarization == 'VV':\n m['general.band_count'] = ('1', m['general.band_count'][1])\n m['general.bands'] = ('VV', m['general.bands'][1])\n elif polarization == 'DH':\n m['general.band_count'] = ('2', m['general.band_count'][1])\n m['general.bands'] = ('HH,HV', m['general.bands'][1])\n elif polarization == 'DV':\n m['general.band_count'] = ('2', m['general.band_count'][1])\n m['general.bands'] = ('VV,VH', m['general.bands'][1])\n if product_type == 'GRD':\n line_count = meta.xpath('/sentinel/metadata/image/height')[0].text\n sample_count = meta.xpath('/sentinel/metadata/image/width')[0].text\n x_pixel_size = meta.xpath('/sentinel/metadata/image/x_spacing')[0].text\n y_pixel_size = meta.xpath('/sentinel/metadata/image/y_spacing')[0].text\n elif product_type == 'SLC':\n param = ('/sentinel/metadata/image/IW1_{0}/height'.format(polarization))\n line_count = meta.xpath(param)[0].text\n param = ('/sentinel/metadata/image/IW1_{0}/width'.format(polarization))\n sample_count = meta.xpath(param)[0].text\n param = ('/sentinel/metadata/image/IW1_{0}/x_spacing'.format(polarization))\n x_pixel_size = meta.xpath(param)[0].text\n param = ('/sentinel/metadata/image/IW1_{0}/y_spacing'.format(polarization))\n y_pixel_size = meta.xpath(param)[0].text\n else:\n log.warning('Unkown product type!')\n line_count = None\n sample_count = None\n x_pixel_size = None\n y_pixel_size = None\n m['general.line_count'] = (line_count, m['general.line_count'][1])\n m['general.sample_count'] = (sample_count, m['general.sample_count'][1])\n m['general.start_line'] = ('0', m['general.start_line'][1])\n m['general.start_sample'] = ('0', m['general.start_sample'][1])\n m['general.x_pixel_size'] = (x_pixel_size, m['general.x_pixel_size'][1])\n m['general.y_pixel_size'] = (y_pixel_size, m['general.y_pixel_size'][1])\n m['general.center_latitude'] = \\\n (str(point[0]), m['general.center_latitude'][1])\n m['general.center_longitude'] = \\\n (str(point[1]), m['general.center_longitude'][1])\n # The spatial reference functionality works in Python 2.7\n # Hardwire axes for the moment\n # ref = osr.SpatialReference()\n # ref.ImportFromEPSG(4326)\n # m['general.re_major'] = (str(ref.GetSemiMajor()), m['general.re_major'][1])\n # m['general.re_minor'] = (str(ref.GetSemiMinor()), m['general.re_minor'][1])\n m['general.re_major'] = ('6378137.0', m['general.re_major'][1])\n m['general.re_minor'] = ('6356752.31425', m['general.re_minor'][1])\n\n # SAR block\n if polarization == 'SH' or polarization == 'HH':\n m['sar.polarization'] = ('HH', m['sar.polarization'][1])\n elif polarization == 'HV':\n m['sar.polarization'] = ('HV', m['sar.polarization'][1])\n elif polarization == 'VH':\n m['sar.polarization'] = ('VH', m['sar.polarization'][1])\n elif polarization == 'SV' or polarization == 'VV':\n m['sar.polarization'] = ('VV', m['sar.polarization'][1])\n elif polarization == 'DH':\n m['sar.polarization'] = ('HH,HV', m['sar.polarization'][1])\n elif polarization == 'DV':\n m['sar.polarization'] = ('VV,VH', m['sar.polarization'][1])\n if product_type == 'RAW' or product_type == 'SLC':\n m['sar.image_type'] = ('S', m['sar.image_type'][1])\n m['sar.multilook'] = ('0', m['sar.multilook'][1])\n elif product_type == 'GRD':\n m['sar.image_type'] = ('G', m['sar.image_type'][1])\n m['sar.multilook'] = ('1', m['sar.multilook'][1])\n m['sar.look_direction'] = ('R', m['sar.look_direction'][1])\n '''\n Look counts are different for each beam in the ScanSAR imagery\n sar.azimuth_look_count\n sar.range_look_count\n '''\n m['sar.deskewed'] = ('1', m['sar.deskewed'][1])\n m['sar.original_line_count'] = \\\n (m['general.line_count'][0], m['sar.original_line_count'][1])\n m['sar.original_sample_count'] = \\\n (m['general.sample_count'][0], m['sar.original_sample_count'][1])\n m['sar.line_increment'] = ('1', m['sar.line_increment'][1])\n m['sar.sample_increment'] = ('1', m['sar.sample_increment'][1])\n '''\n range_time_per_pixel: 3.125e-08\n azimuth_time_per_pixel: 0.00091480755475\n slant_range_first_pixel: 853461.85215\n '''\n m['sar.slant_shift'] = ('0', m['sar.slant_shift'][1])\n m['sar.time_shift'] = ('0', m['sar.time_shift'][1])\n frequency = meta.xpath('/sentinel/metadata/image/radar_frequency')[0].text\n wavelength = sc.c / float(frequency)\n m['sar.wavelength'] = (str(wavelength), m['sar.wavelength'][1])\n '''\n prf: 2141.3276231\n earth_radius: 6362363.0798\n satellite_height: 7065911.9034\n dopRangeCen: 50.5405526\n dopRangeLin: -0.0002221\n dopRangeQuad: 0\n dopAzCen: 50.5405526\n dopAzLin: 0\n dopAzQuad: 0\n pitch: NaN\n roll: NaN\n yaw: NaN\n azimuth_bandwidth: 1516.25\n chirp_rate: 0\n pulse_duration: 2.7e-05\n range_samp_rate: 32000000\n incid_a(0): 288.16310464\n incid_a(1): -0.98116986722\n incid_a(2): 0.00069761807297\n incid_a(3): 9.9271558273e-07\n incid_a(4): -1.5988761359e-09\n incid_a(5): 6.0266559732e-13\n '''\n\n return m\n\n\ndef file_exists(file_arg):\n \"\"\"\n Convenience \"type\" function for argparse to check if a file exists\n \"\"\"\n if not os.path.exists(file_arg):\n raise argparse.ArgumentTypeError(f\"XML metadata file {file_arg} does not exist\")\n return file_arg\n\n\ndef main():\n \"\"\"Main entrypoint\"\"\"\n parser = argparse.ArgumentParser(\n prog='xml2meta.py',\n description=__doc__,\n )\n parser.add_argument('data', type=str.lower, choices=('sentinel',),\n help='name of data source')\n parser.add_argument('xmlFile', type=file_exists, help='name of the XML metadata file (input)')\n parser.add_argument('metaFile', help='name of the metadata file (output)')\n parser.add_argument('-s --screen', action='store_true', dest='screen',\n help='log to the console (as well as to syslog)')\n args = parser.parse_args()\n\n log.info('Converting Sentinel XML file (%s) ...' % args.xmlFile)\n asf_meta = sentinel2meta(args.xmlFile)\n metadata_utils.write_asf_meta(asf_meta, args.metaFile)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.711544394493103, "alphanum_fraction": 0.738161563873291, "avg_line_length": 40.42307662963867, "blob_id": "f89ce4e5b4acf4e9be9b90e1bf3b803ac6d2d54e", "content_id": "0982ffd6797adf6dd582329f19eb63c9319fa278", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 3231, "license_type": "permissive", "max_line_length": 103, "num_lines": 78, "path": "/Dockerfile", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "FROM ubuntu:18.04\n\n# For opencontainers label definitions, see:\n# https://github.com/opencontainers/image-spec/blob/master/annotations.md\nLABEL org.opencontainers.image.title=\"HyP3 RTC GAMMA\"\nLABEL org.opencontainers.image.description=\"HyP3 plugin for radiometric terrain correction using GAMMA\"\nLABEL org.opencontainers.image.vendor=\"Alaska Satellite Facility\"\nLABEL org.opencontainers.image.authors=\"ASF APD/Tools Team <[email protected]>\"\nLABEL org.opencontainers.image.licenses=\"BSD-3-Clause\"\nLABEL org.opencontainers.image.url=\"https://github.com/ASFHyP3/hyp3-rtc-gamma\"\nLABEL org.opencontainers.image.source=\"https://github.com/ASFHyP3/hyp3-rtc-gamma\"\n# LABEL org.opencontainers.image.documentation=\"\"\n\n# Dynamic lables to define at build time via `docker build --label`\n# LABEL org.opencontainers.image.created=\"\"\n# LABEL org.opencontainers.image.version=\"\"\n# LABEL org.opencontainers.image.revision=\"\"\n\nARG DEBIAN_FRONTEND=noninteractive\nENV PYTHONDONTWRITEBYTECODE=true\n\nRUN apt-get update && apt-get upgrade -y && \\\n apt-get install -y --no-install-recommends \\\n build-essential bison curl flex g++ gcc gdal-bin \\\n gimp gnuplot gnuplot-data gnuplot-qt libblas-dev libblas3 libcunit1-dev \\\n libexif-dev libfftw3-dev libgdal-dev libgdal20 libgeotiff-dev libglade2-dev \\\n libglib2.0-dev libgsl-dev libgtk2.0-bin libgtk2.0-common libgtk2.0-dev \\\n libhdf5-100 libhdf5-dev libjpeg-dev liblapack-dev liblapack3 libpng-dev \\\n libproj-dev libshp-dev libtiff5-dev libxml2-dev netpbm python3-dev python3-h5py \\\n python3-matplotlib python3-pip python3-scipy tcsh unzip vim wget xsltproc && \\\n apt-get clean && rm -rf /var/lib/apt/lists/* \\\n && pip3 install --no-cache-dir --upgrade pip setuptools wheel\n\nCOPY GAMMA_SOFTWARE-20191203 /usr/local/GAMMA_SOFTWARE-20191203/\n\nCOPY ASF_MapReady-devel /usr/local/MapReady/src/\n\nRUN cd /usr/local/MapReady/src && \\\n ./configure --prefix /usr/local/MapReady && \\\n make && make install && \\\n cd /usr/local/MapReady && rm -r src/\n\nRUN export CPLUS_INCLUDE_PATH=/usr/include/gdal && \\\n export C_INCLUDE_PATH=/usr/include/gdal && \\\n python3 -m pip install --no-cache-dir GDAL==2.2.3 statsmodels==0.9 pandas==0.23\n\nARG S3_PYPI_HOST\nARG SDIST_SPEC\n\nRUN python3 -m pip install --no-cache-dir hyp3_rtc_gamma${SDIST_SPEC} \\\n --trusted-host \"${S3_PYPI_HOST}\" \\\n --extra-index-url \"http://${S3_PYPI_HOST}\"\n\nARG CONDA_GID=1000\nARG CONDA_UID=1000\n\nRUN groupadd -g \"${CONDA_GID}\" --system conda && \\\n useradd -l -u \"${CONDA_UID}\" -g \"${CONDA_GID}\" --system -d /home/conda -m -s /bin/bash conda\n\nUSER ${CONDA_UID}\nSHELL [\"/bin/bash\", \"-l\", \"-c\"]\nENV PYTHONDONTWRITEBYTECODE=true\nENV GAMMA_HOME=/usr/local/GAMMA_SOFTWARE-20191203\nENV MSP_HOME=$GAMMA_HOME/MSP\nENV ISP_HOME=$GAMMA_HOME/ISP\nENV DIFF_HOME=$GAMMA_HOME/DIFF\nENV DISP_HOME=$GAMMA_HOME/DISP\nENV LAT_HOME=$GAMMA_HOME/LAT\nENV PATH=$PATH:$MSP_HOME/bin:$ISP_HOME/bin:$DIFF_HOME/bin:$LAT_HOME/bin:$DISP_HOME/bin\nENV PATH=$PATH:$MSP_HOME/scripts:$ISP_HOME/scripts:$DIFF_HOME/scripts:$LAT_HOME/scripts\nENV MAPREADY_HOME=/usr/local/MapReady\nENV PATH=$PATH:$MAPREADY_HOME/bin:$MAPREADY_HOME/lib:$MAPREADY_HOME/share\nENV GAMMA_RASTER=BMP\n\nWORKDIR /home/conda/\n\nENTRYPOINT [\"/usr/local/bin/rtc_gamma\"]\nCMD [\"-h\"]\n" }, { "alpha_fraction": 0.7922077775001526, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 24.66666603088379, "blob_id": "9c1b44c94c61b242dabf3dcd13e9c1e1b0c60b21", "content_id": "e3e0511008366d532f18f3752d92d2f0d19212fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 77, "license_type": "permissive", "max_line_length": 58, "num_lines": 3, "path": "/README.md", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "# HyP3 RTC GAMMA\n\nHyP3 plugin for radiometric terrain correction using GAMMA\n" }, { "alpha_fraction": 0.4849129617214203, "alphanum_fraction": 0.497485488653183, "avg_line_length": 43.377681732177734, "blob_id": "61d9142fffd52317edc8a7d497cdc393c36013b6", "content_id": "7149da8d2efe8e804f45f9117b2981241c5454c8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10340, "license_type": "permissive", "max_line_length": 101, "num_lines": 233, "path": "/hyp3_rtc_gamma/create_metadata.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "\"\"\"Create ArcGIS compatible xml metadata\"\"\"\n\nimport datetime\nimport glob\nimport logging\nimport os\nimport sys\n\nfrom hyp3lib.execute import execute\nfrom hyp3lib.file_subroutines import get_dem_tile_list\nfrom hyp3lib.getParameter import getParameter\nfrom hyp3lib.make_arc_thumb import pngtothumb\nfrom hyp3lib.saa_func_lib import getCorners\n\nimport hyp3_rtc_gamma.etc\n\n\ndef get_hemisphere(fi):\n \"\"\"Get the UTM N/S designation\"\"\"\n ullon, lrlon, lrlat, ullat = getCorners(fi)\n if lrlat + ullat >= 0:\n return \"N\"\n else:\n return \"S\"\n\n\ndef create_arc_xml(infile, outfile, input_type, gamma_flag, pwr_flag, filter_flag, looks, pol, cpol,\n dem_type, spacing, hyp3_ver, gamma_ver, rtc_name):\n print(\"create_arc_xml: CWD is {}\".format(os.getcwd()))\n zone = None\n try:\n proj_name = getParameter(\"area.dem.par\", \"projection_name\")\n if \"UTM\" in proj_name:\n zone = getParameter(\"area.dem.par\", \"projection_zone\")\n except Exception:\n pass\n logging.info(\"Zone is {}\".format(zone))\n\n dem_tiles = get_dem_tile_list()\n\n # Create XML metadata files\n etc_dir = os.path.abspath(os.path.dirname(hyp3_rtc_gamma.etc.__file__))\n back = os.getcwd()\n os.chdir(\"PRODUCT\")\n\n now = datetime.datetime.now()\n date = now.strftime(\"%Y%m%d\")\n time = now.strftime(\"%H%M%S\")\n dt = now.strftime(\"%Y-%m-%dT%H:%M:%S\")\n year = now.year\n\n basename = os.path.basename(infile)\n granulename = os.path.splitext(basename)[0]\n\n spacing = int(spacing)\n flooks = looks * 30\n hemi = get_hemisphere(rtc_name)\n\n if gamma_flag:\n power_type = \"gamma\"\n else:\n power_type = \"sigma\"\n if pwr_flag:\n format_type = \"power\"\n else:\n format_type = \"amplitude\"\n if filter_flag:\n filter_str = \"A\"\n else:\n filter_str = \"No\"\n\n if input_type == \"SLC\":\n full_type = \"Single-Look Complex\"\n else:\n full_type = \"Ground Range Detected\"\n\n if \"NED\" in dem_type:\n if \"13\" in dem_type:\n resa = \"1/3\"\n resm = 10\n elif \"1\" in dem_type:\n resa = 1\n resm = 30\n else:\n resa = 2\n resm = 60\n pcs = \"WGS 1984 UTM Zone {}{}\".format(zone, hemi)\n elif \"SRTMGL\" in dem_type:\n if \"1\" in dem_type:\n resa = 1\n resm = 30\n else:\n resa = 3\n resm = 90\n pcs = \"WGS 1984 UTM Zone {}{}\".format(zone, hemi)\n elif \"EU_DEM\" in dem_type:\n resa = 1\n resm = 30\n pcs = \"WGS 1984 UTM Zone {}{}\".format(zone, hemi)\n elif \"GIMP\" in dem_type:\n resa = 1\n resm = 30\n pcs = \"WGS 1984 NSIDC Sea Ice Polar Stereographic North\"\n elif \"REMA\" in dem_type:\n resa = 1\n resm = 30\n pcs = \"WGS 1984 Antarctic Polar Stereographic\"\n else:\n logging.error(\"Unrecognized DEM type: {}\".format(dem_type))\n sys.exit(1)\n\n for myfile in glob.glob(\"*.tif\"):\n with open(\"{}.xml\".format(myfile), \"wb\") as g:\n this_pol = None\n if cpol is None:\n cpol = \"ZZ\"\n\n if pol in myfile or cpol in myfile:\n template_suffix = ''\n encoded_jpg = pngtothumb(\"{}.png\".format(outfile))\n if pol in myfile:\n this_pol = pol\n else:\n this_pol = cpol\n elif \"ls_map\" in myfile:\n template_suffix = '_ls'\n execute(\"pbmmake 100 75 | pnmtopng > white.png\", uselogging=True)\n encoded_jpg = pngtothumb(\"white.png\")\n os.remove(\"white.png\")\n elif \"inc_map\" in myfile:\n template_suffix = '_inc'\n encoded_jpg = pngtothumb(\"{}.png\".format(os.path.splitext(myfile)[0]))\n elif \"dem\" in myfile:\n if \"NED\" in dem_type:\n template_suffix = '_dem_NED'\n elif \"SRTM\" in dem_type:\n template_suffix = '_dem_SRTM'\n elif \"EU_DEM\" in dem_type:\n template_suffix = '_dem_EUDEM'\n elif \"GIMP\" in dem_type:\n template_suffix = '_dem_GIMP'\n elif \"REMA\" in dem_type:\n template_suffix = '_dem_REMA'\n else:\n logging.error(\"ERROR: Unrecognized dem type: {}\".format(dem_type))\n encoded_jpg = pngtothumb(\"{}.png\".format(os.path.splitext(myfile)[0]))\n else:\n template_suffix = None\n encoded_jpg = None\n\n if template_suffix is not None:\n with open(\"{}/RTC_GAMMA_Template{}.xml\".format(etc_dir, template_suffix), \"rb\") as f:\n for line in f:\n line = line.replace(b\"[DATE]\", bytes(date, 'utf-8'))\n line = line.replace(b\"[TIME]\", bytes(\"{}00\".format(time), 'utf-8'))\n line = line.replace(b\"[DATETIME]\", bytes(dt, 'utf-8'))\n line = line.replace(b\"[YEARPROCESSED]\", bytes(\"{}\".format(year), 'utf-8'))\n line = line.replace(b\"[YEARACQUIRED]\", bytes(infile[17:21], 'utf-8'))\n line = line.replace(b\"[TYPE]\", bytes(input_type, 'utf-8'))\n line = line.replace(b\"[FULL_TYPE]\", bytes(full_type, 'utf-8'))\n line = line.replace(b\"[THUMBNAIL_BINARY_STRING]\", encoded_jpg)\n if this_pol is not None:\n line = line.replace(b\"[POL]\", bytes(this_pol, 'utf-8'))\n line = line.replace(b\"[POWERTYPE]\", bytes(power_type, 'utf-8'))\n line = line.replace(b\"[GRAN_NAME]\", bytes(granulename, 'utf-8'))\n line = line.replace(b\"[FORMAT]\", bytes(format_type, 'utf-8'))\n line = line.replace(b\"[LOOKS]\", bytes(\"{}\".format(looks), 'utf-8'))\n line = line.replace(b\"[FILT]\", bytes(\"{}\".format(filter_str), 'utf-8'))\n line = line.replace(b\"[FLOOKS]\", bytes(\"{}\".format(flooks), 'utf-8'))\n line = line.replace(b\"[SPACING]\", bytes(\"{}\".format(spacing), 'utf-8'))\n line = line.replace(b\"[DEM]\", bytes(\"{}\".format(dem_type), 'utf-8'))\n line = line.replace(b\"[RESA]\", bytes(\"{}\".format(resa), 'utf-8'))\n line = line.replace(b\"[RESM]\", bytes(\"{}\".format(resm), 'utf-8'))\n line = line.replace(b\"[HYP3_VER]\", bytes(\"{}\".format(hyp3_ver), 'utf-8'))\n line = line.replace(b\"[GAMMA_VER]\", bytes(\"{}\".format(gamma_ver), 'utf-8'))\n line = line.replace(b\"[TILES]\", bytes(\"{}\".format(dem_tiles), 'utf-8'))\n line = line.replace(b\"[PCS]\", bytes(\"{}\".format(pcs), 'utf-8'))\n g.write(line + b'\\n')\n\n for myfile in glob.glob(\"*.png\"):\n with open(\"{}.xml\".format(myfile), \"wb\") as g:\n if \"rgb\" in myfile:\n scale = 'color'\n encoded_jpg = pngtothumb(\"{}_rgb.png\".format(outfile))\n else:\n scale = 'grayscale'\n encoded_jpg = pngtothumb(\"{}.png\".format(outfile))\n\n with open(\"{}/RTC_GAMMA_Template_{}_png.xml\".format(etc_dir, scale), \"rb\") as f:\n for line in f:\n line = line.replace(b\"[DATE]\", bytes(date, 'utf-8'))\n line = line.replace(b\"[TIME]\", bytes(\"{}00\".format(time), 'utf-8'))\n line = line.replace(b\"[DATETIME]\", bytes(dt, 'utf-8'))\n line = line.replace(b\"[YEARPROCESSED]\", bytes(\"{}\".format(year), 'utf-8'))\n line = line.replace(b\"[YEARACQUIRED]\", bytes(infile[17:21], 'utf-8'))\n line = line.replace(b\"[TYPE]\", bytes(input_type, 'utf-8'))\n line = line.replace(b\"[FULL_TYPE]\", bytes(full_type, 'utf-8'))\n line = line.replace(b\"[THUMBNAIL_BINARY_STRING]\", encoded_jpg)\n line = line.replace(b\"[GRAN_NAME]\", bytes(granulename, 'utf-8'))\n line = line.replace(b\"[SPACING]\", bytes(\"{}\".format(spacing), 'utf-8'))\n line = line.replace(b\"[DEM]\", bytes(\"{}\".format(dem_type), 'utf-8'))\n line = line.replace(b\"[FORMAT]\", bytes(format_type, 'utf-8'))\n line = line.replace(b\"[HYP3_VER]\", bytes(\"{}\".format(hyp3_ver), 'utf-8'))\n line = line.replace(b\"[GAMMA_VER]\", bytes(\"{}\".format(gamma_ver), 'utf-8'))\n line = line.replace(b\"[DEM_TILES]\", bytes(\"{}\".format(dem_tiles), 'utf-8'))\n line = line.replace(b\"[PCS]\", bytes(\"{}\".format(pcs), 'utf-8'))\n g.write(line + b\"\\n\")\n\n with open(f'{outfile}.README.txt', 'w') as g:\n with open(\"{}/README_RTC_GAMMA.txt\".format(etc_dir), \"r\") as f:\n for line in f:\n line = line.replace(\"[DATE]\", date)\n line = line.replace(\"[TIME]\", \"{}00\".format(time))\n line = line.replace(\"[DATETIME]\", dt)\n line = line.replace(\"[GRAN_NAME]\", granulename)\n line = line.replace(\"[YEARPROCESSED]\", \"{}\".format(year))\n line = line.replace(\"[YEARACQUIRED]\", infile[17:21])\n line = line.replace(\"[POWERTYPE]\", power_type)\n line = line.replace(\"[FORMAT]\", format_type)\n line = line.replace(\"[LOOKS]\", \"{}\".format(looks))\n line = line.replace(\"[FILT]\", \"{}\".format(filter_str))\n line = line.replace(\"[FLOOKS]\", \"{}\".format(flooks))\n line = line.replace(\"[SPACING]\", \"{}\".format(spacing))\n line = line.replace(\"[DEM]\", \"{}\".format(dem_type))\n line = line.replace(\"[RESA]\", \"{}\".format(resa))\n line = line.replace(\"[RESM]\", \"{}\".format(resm))\n line = line.replace(\"[HYP3_VER]\", \"{}\".format(hyp3_ver))\n line = line.replace(\"[GAMMA_VER]\", \"{}\".format(gamma_ver))\n line = line.replace(\"[DEM_TILES]\", \"{}\".format(dem_tiles))\n line = line.replace(\"[PCS]\", \"{}\".format(pcs))\n g.write(\"{}\".format(line))\n\n os.chdir(back)\n" }, { "alpha_fraction": 0.658015251159668, "alphanum_fraction": 0.6694656610488892, "avg_line_length": 25.73469352722168, "blob_id": "b0f0444109bcf5d6a357d84959e3978e92e0f0e1", "content_id": "204b6a38911f4b317ba0b47e0240d187a62a8646", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "permissive", "max_line_length": 67, "num_lines": 49, "path": "/tests/test_entrypoints.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "def test_rtc_gamma(script_runner):\n ret = script_runner.run('rtc_gamma', '+h')\n assert ret.success\n\n\ndef test_rtc_gamma_passthrough(script_runner):\n ret = script_runner.run('rtc_gamma', '--version')\n assert ret.success\n assert 'rtc_gamma v' in ret.stdout\n assert 'hyp3lib v' in ret.stdout\n assert 'hyp3proclib v' in ret.stdout\n\n\ndef test_rtc_gamma_passthrough_v2(script_runner):\n ret = script_runner.run(\n 'rtc_gamma', '++entrypoint', 'hyp3_rtc_gamma_v2', '--help')\n assert ret.success\n assert 'hyp3_rtc_gamma_v2' in ret.stdout\n assert '--bucket-prefix' in ret.stdout\n\n\ndef test_hyp3_rtc_gamma_v2(script_runner):\n ret = script_runner.run('hyp3_rtc_gamma_v2', '-h')\n assert ret.success\n\n\ndef test_hyp3_rtc_gamma(script_runner):\n ret = script_runner.run('hyp3_rtc_gamma', '-h')\n assert ret.success\n\n\ndef test_check_coreg(script_runner):\n ret = script_runner.run('check_coreg.py', '-h')\n assert ret.success\n\n\ndef test_rtc_sentinel(script_runner):\n ret = script_runner.run('rtc_sentinel.py', '-h')\n assert ret.success\n\n\ndef test_smooth_dem_tiles(script_runner):\n ret = script_runner.run('smooth_dem_tiles.py', '-h')\n assert ret.success\n\n\ndef test_xml2meta(script_runner):\n ret = script_runner.run('xml2meta.py', '-h')\n assert ret.success\n" }, { "alpha_fraction": 0.5685194730758667, "alphanum_fraction": 0.5776379704475403, "avg_line_length": 40.65776824951172, "blob_id": "b6428c503be51be630b2a473d04f26934f116a0f", "content_id": "d096d260efc40690bef0f2f99d7b006f4b587363", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34326, "license_type": "permissive", "max_line_length": 119, "num_lines": 824, "path": "/hyp3_rtc_gamma/rtc_sentinel.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "\"\"\"Create a Radiometrically Terrain-Corrected (RTC) image from a Sentinel-1 scene sing GAMMA software\"\"\"\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport shutil\nimport sys\nimport zipfile\nfrom secrets import token_hex\n\nfrom hyp3lib import ExecuteError, OrbitDownloadError\nfrom hyp3lib import saa_func_lib as saa\nfrom hyp3lib.area2point import fix_geotiff_locations\nfrom hyp3lib.asf_geometry import reproject2grid\nfrom hyp3lib.byteSigmaScale import byteSigmaScale\nfrom hyp3lib.copy_metadata import copy_metadata\nfrom hyp3lib.createAmp import createAmp\nfrom hyp3lib.execute import execute\nfrom hyp3lib.getDemFor import getDemFile\nfrom hyp3lib.getParameter import getParameter\nfrom hyp3lib.get_bb_from_shape import get_bb_from_shape\nfrom hyp3lib.get_dem import get_dem\nfrom hyp3lib.get_orb import downloadSentinelOrbitFile\nfrom hyp3lib.ingest_S1_granule import ingest_S1_granule\nfrom hyp3lib.makeAsfBrowse import makeAsfBrowse\nfrom hyp3lib.make_cogs import cogify_dir\nfrom hyp3lib.ps2dem import ps2dem\nfrom hyp3lib.raster_boundary2shape import raster_boundary2shape\nfrom hyp3lib.rtc2color import rtc2color\nfrom hyp3lib.system import gamma_version\nfrom hyp3lib.utm2dem import utm2dem\nfrom osgeo import gdal\n\nimport hyp3_rtc_gamma\nfrom hyp3_rtc_gamma.check_coreg import CoregistrationError, check_coreg\nfrom hyp3_rtc_gamma.create_metadata import create_arc_xml\nfrom hyp3_rtc_gamma.metadata_utils import write_asf_meta\nfrom hyp3_rtc_gamma.smoothem import smooth_dem_tiles\nfrom hyp3_rtc_gamma.xml2meta import sentinel2meta\n\n\ndef fetch_orbit_file(in_file):\n logging.info(f'Fetching orbit file for {in_file}')\n orbit_file = None\n try:\n orbit_file, _ = downloadSentinelOrbitFile(in_file)\n except OrbitDownloadError:\n logging.warning('Unable to fetch orbit file. Continuing.')\n return orbit_file\n\n\ndef get_product_name(granule_name, orbit_file=None, resolution=30, gamma0=True, power=True,\n filtered=False, matching=False):\n platform = granule_name[0:3]\n beam_mode = granule_name[4:6]\n polarization = granule_name[14:16]\n datetime = granule_name[17:32]\n res = int(resolution)\n\n if orbit_file is None:\n o = 'O'\n elif 'POEORB' in orbit_file:\n o = 'P'\n elif 'RESORB' in orbit_file:\n o = 'R'\n else:\n o = 'O'\n\n product_id = token_hex(2).upper()\n\n g = 'g' if gamma0 else 's'\n p = 'p' if power else 'a'\n f = 'f' if filtered else 'n'\n m = 'm' if matching else 'd'\n\n product_name = f'{platform}_{beam_mode}_{datetime}_{polarization}{o}_RTC{res}_G_{g}{p}u{f}e{m}_{product_id}'\n return product_name\n\n\ndef perform_sanity_checks():\n logging.info(\"Performing sanity checks on output PRODUCTs\")\n tif_list = glob.glob(\"PRODUCT/*.tif\")\n for myfile in tif_list:\n if \"VV\" in myfile or \"HH\" in myfile or \"VH\" in myfile or \"HV\" in myfile:\n # Check that the main polarization file is on a 30 meter posting\n x, y, trans, proj = saa.read_gdal_file_geo(saa.open_gdal_file(myfile))\n logging.debug(\" trans[1] = {}; trans[5] = {}\".format(trans[1], trans[5]))\n if abs(trans[5]) > 10 and abs(trans[1]) > 10:\n logging.debug(\"Checking corner coordinates...\")\n ul1 = trans[3]\n lr1 = trans[3] + y * trans[5]\n ul2 = trans[0]\n lr2 = trans[0] + x * trans[1]\n if ul1 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: ul1 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: ul1 = {}\".format(ul1))\n elif lr1 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: lr1 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: lr1 = {}\".format(lr1))\n elif ul2 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: ul2 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: ul2 = {}\".format(ul2))\n elif lr2 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: lr2 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: lr2 = {}\".format(lr2))\n else:\n logging.debug(\"...ok\")\n\n\ndef reproject_dir(dem_type, res, prod_dir=None):\n if \"REMA\" in dem_type:\n epsg = 3031\n elif \"GIMP\" in dem_type:\n epsg = 3413\n else:\n return\n\n tmp_geotiff = \"tmp_reproj_dir_{}.tif\".format(os.getpid())\n home = os.getcwd()\n if prod_dir:\n os.chdir(prod_dir)\n\n for inGeotiff in glob.glob(\"*.tif\"):\n in_raster = gdal.Open(inGeotiff)\n out_raster = reproject2grid(in_raster, epsg, xRes=res)\n in_raster = None # Because GDAL is weird!\n gdal.Translate(tmp_geotiff, out_raster)\n os.remove(inGeotiff)\n shutil.move(tmp_geotiff, inGeotiff)\n\n if prod_dir:\n os.chdir(home)\n\n\ndef report_kwargs(in_name, out_name, res, dem, roi, shape, match_flag, dead_flag, gamma_flag, lo_flag,\n pwr_flag, filter_flag, looks, terms, par, no_cross_pol, smooth, area, orbit_file):\n logging.info(\"Parameters for this run:\")\n logging.info(\" Input name : {}\".format(in_name))\n logging.info(\" Output name : {}\".format(out_name))\n logging.info(\" Output resolution : {}\".format(res))\n logging.info(\" DEM file : {}\".format(dem))\n if roi is not None:\n logging.info(\" Area of Interest : {}\".format(roi))\n if shape is not None:\n logging.info(\" Shape File : {}\".format(shape))\n logging.info(\" Match flag : {}\".format(match_flag))\n logging.info(\" If no match, use Dead Reckoning : {}\".format(dead_flag))\n logging.info(\" Gamma0 output : {}\".format(gamma_flag))\n logging.info(\" Low resolution flag : {}\".format(lo_flag))\n logging.info(\" Create power images : {}\".format(pwr_flag))\n logging.info(\" Speckle Filtering : {}\".format(filter_flag))\n logging.info(\" Number of looks to take : {}\".format(looks))\n logging.info(\" Number of terms in used in match : {}\".format(terms))\n if par is not None:\n logging.info(\" Offset file : {}\".format(par))\n logging.info(\" Process crosspol : {}\".format(not no_cross_pol))\n logging.info(\" Smooth DEM tiles : {}\".format(smooth))\n logging.info(\" Save Pixel Area : {}\".format(area))\n logging.info(\" Orbit File : {}\".format(orbit_file))\n\n\ndef process_pol(in_file, rtc_name, out_name, pol, res, look_fact, match_flag, dead_flag, gamma_flag,\n filter_flag, pwr_flag, browse_res, dem, terms, par=None, area=False, orbit_file=None):\n logging.info(\"Processing the {} polarization\".format(pol))\n\n mgrd = \"{out}.{pol}.mgrd\".format(out=out_name, pol=pol)\n tif = \"image_cal_map.mli.tif\"\n\n # Ingest the granule into gamma format\n ingest_S1_granule(in_file, pol, look_fact, mgrd, orbit_file=orbit_file)\n width = getParameter(\"{}.par\".format(mgrd), \"range_samples\")\n\n # Apply filter if requested\n if filter_flag:\n el_looks = look_fact * 30\n execute(f\"enh_lee {mgrd} temp.mgrd {width} {el_looks} 1 7 7\", uselogging=True)\n shutil.move(\"temp.mgrd\", mgrd)\n\n options = \"-p -n {} -q -c \".format(terms)\n if gamma_flag:\n options += \"-g \"\n\n logging.info(\"Running RTC process... initializing\")\n geo_dir = \"geo_{}\".format(pol)\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 0 {options}\", uselogging=True)\n\n if match_flag and not par:\n fail = False\n logging.info(\"Running RTC process... coarse matching\")\n try:\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 1 {options}\", uselogging=True)\n except ExecuteError:\n logging.warning(\"WARNING: Determination of the initial offset failed, skipping initial offset\")\n\n logging.info(\"Running RTC process... fine matching\")\n try:\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 2 {options}\", uselogging=True)\n except ExecuteError:\n if not dead_flag:\n logging.error(\"ERROR: Failed to match images\")\n sys.exit(1)\n else:\n logging.warning(\"WARNING: Coregistration has failed; defaulting to dead reckoning\")\n os.remove(\"{}/{}\".format(geo_dir, \"image.diff_par\"))\n fail = True\n\n if not fail:\n try:\n check_coreg(out_name, res, max_offset=75, max_error=2.0)\n except CoregistrationError:\n if not dead_flag:\n logging.error(\"ERROR: Failed the coregistration check\")\n sys.exit(1)\n else:\n logging.warning(\"WARNING: Coregistration check has failed; defaulting to dead reckoning\")\n os.remove(\"{}/{}\".format(geo_dir, \"image.diff_par\"))\n\n logging.info(\"Running RTC process... finalizing\")\n if par:\n shutil.copy(par, \"{}/image.diff_par\".format(geo_dir))\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 3 {options}\", uselogging=True)\n\n os.chdir(geo_dir)\n\n # Divide sigma0 by sin(theta) to get beta0\n execute(f\"float_math image_0.inc_map - image_1.sin_theta {width} 7 - - 1 1 - 0\")\n\n execute(f\"float_math image_cal_map.mli image_1.sin_theta image_1.beta {width} 3 - - 1 1 - 0\")\n\n execute(f\"float_math image_1.beta image_0.sim image_1.flat {width} 3 - - 1 1 - 0\")\n\n # Make Geotiff Files\n execute(f\"data2geotiff area.dem_par image_0.ls_map 5 {out_name}.ls_map.tif\", uselogging=True)\n execute(f\"data2geotiff area.dem_par image_0.inc_map 2 {out_name}.inc_map.tif\", uselogging=True)\n execute(f\"data2geotiff area.dem_par image_1.flat 2 {out_name}.flat.tif\", uselogging=True)\n execute(\"data2geotiff area.dem_par area.dem 2 outdem.tif\", uselogging=True)\n\n gdal.Translate(\"{}.dem.tif\".format(out_name), \"outdem.tif\", outputType=gdal.GDT_Int16)\n\n if gamma_flag:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_gamma0'.format(pol)])\n else:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_sigma0'.format(pol)])\n shutil.move(\"tmp.tif\", tif)\n createAmp(tif, nodata=0)\n\n # Make meta files and stats\n execute(f\"asf_import -format geotiff {out_name}.ls_map.tif ls_map\", uselogging=True)\n execute(\"stats -overstat -overmeta ls_map\", uselogging=True)\n execute(f\"asf_import -format geotiff {out_name}.inc_map.tif inc_map\", uselogging=True)\n execute(\"stats -overstat -overmeta -mask 0 inc_map\", uselogging=True)\n execute(f\"asf_import -format geotiff image_cal_map.mli_amp.tif tc_{pol}\", uselogging=True)\n execute(f\"stats -nostat -overmeta -mask 0 tc_{pol}\", uselogging=True)\n\n # Make browse resolution tif file\n if res == browse_res:\n shutil.copy(\"image_cal_map.mli_amp.tif\", \"{}_{}_{}m.tif\".format(out_name, pol, browse_res))\n else:\n gdal.Translate(\"{}_{}_{}m.tif\".format(out_name, pol, browse_res), \"image_cal_map.mli_amp.tif\",\n xRes=browse_res, yRes=browse_res)\n\n # Move files into the product directory\n out_dir = \"../PRODUCT\"\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n if pwr_flag:\n shutil.move(tif, \"{}/{}\".format(out_dir, rtc_name))\n else:\n copy_metadata(tif, \"image_cal_map.mli_amp.tif\")\n shutil.move(\"image_cal_map.mli_amp.tif\", \"{}/{}\".format(out_dir, rtc_name))\n\n shutil.move(\"{}.ls_map.tif\".format(out_name), \"{}/{}_ls_map.tif\".format(out_dir, out_name))\n shutil.move(\"{}.inc_map.tif\".format(out_name), \"{}/{}_inc_map.tif\".format(out_dir, out_name))\n shutil.move(\"{}.dem.tif\".format(out_name), \"{}/{}_dem.tif\".format(out_dir, out_name))\n if area:\n shutil.move(\"{}.flat.tif\".format(out_name), \"{}/{}_flat_{}.tif\".format(out_dir, out_name, pol))\n\n os.chdir(\"..\")\n\n\ndef process_2nd_pol(in_file, rtc_name, cpol, res, look_fact, gamma_flag, filter_flag, pwr_flag, browse_res,\n outfile, dem, terms, par=None, area=False, orbit_file=None):\n if cpol == \"VH\":\n mpol = \"VV\"\n else:\n mpol = \"HH\"\n\n mgrd = \"{out}.{pol}.mgrd\".format(out=outfile, pol=cpol)\n tif = \"image_cal_map.mli.tif\"\n\n # Ingest the granule into gamma format\n ingest_S1_granule(in_file, cpol, look_fact, mgrd, orbit_file=orbit_file)\n width = getParameter(\"{}.par\".format(mgrd), \"range_samples\")\n\n # Apply filtering if requested\n if filter_flag:\n el_looks = look_fact * 30\n execute(f\"enh_lee {mgrd} temp.mgrd {width} {el_looks} 1 7 7\", uselogging=True)\n shutil.move(\"temp.mgrd\", mgrd)\n\n options = \"-p -n {} -q -c \".format(terms)\n if gamma_flag:\n options += \"-g \"\n\n home_dir = os.getcwd()\n geo_dir = \"geo_{}\".format(cpol)\n mdir = \"geo_{}\".format(mpol)\n if not os.path.isdir(geo_dir):\n os.mkdir(geo_dir)\n\n shutil.copy(\"geo_{}/image.diff_par\".format(mpol), \"{}\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.map_to_rdc\".format(mpol), \"{}/image_0.map_to_rdc\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.ls_map\".format(mpol), \"{}/image_0.ls_map\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.inc_map\".format(mpol), \"{}/image_0.inc_map\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.sim\".format(mpol), \"{}/image_0.sim\".format(geo_dir))\n os.symlink(\"../geo_{}/area.dem_par\".format(mpol), \"{}/area.dem_par\".format(geo_dir))\n\n if par:\n shutil.copy(par, \"{}/image.diff_par\".format(geo_dir))\n\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {mdir}/area.dem\"\n f\" {mdir}/area.dem_par {geo_dir} image {res} 3 {options}\", uselogging=True)\n\n os.chdir(geo_dir)\n\n # Divide sigma0 by sin(theta) to get beta0\n execute(f\"float_math image_0.inc_map - image_1.sin_theta {width} 7 - - 1 1 - 0\")\n\n execute(f\"float_math image_cal_map.mli image_1.sin_theta image_1.beta {width} 3 - - 1 1 - 0\")\n\n execute(f\"float_math image_1.beta image_0.sim image_1.flat {width} 3 - - 1 1 - 0\")\n\n # Make geotiff file\n if gamma_flag:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_gamma0'.format(cpol)])\n else:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_sigma0'.format(cpol)])\n shutil.move(\"tmp.tif\", tif)\n\n # Make browse resolution file\n createAmp(tif, nodata=0)\n if res == browse_res:\n shutil.copy(\"image_cal_map.mli_amp.tif\", \"{}_{}_{}m.tif\".format(outfile, cpol, browse_res))\n else:\n gdal.Translate(\"{}_{}_{}m.tif\".format(outfile, cpol, browse_res), \"image_cal_map.mli_amp.tif\", xRes=browse_res,\n yRes=browse_res)\n\n # Create meta files and stats\n execute(f\"asf_import -format geotiff image_cal_map.mli_amp.tif tc_{cpol}\", uselogging=True)\n execute(f\"stats -nostat -overmeta -mask 0 tc_{cpol}\", uselogging=True)\n\n # Move files to product directory\n out_dir = \"../PRODUCT\"\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n execute(f\"data2geotiff area.dem_par image_1.flat 2 {outfile}.flat.tif\", uselogging=True)\n\n if pwr_flag:\n shutil.move(tif, \"{}/{}\".format(out_dir, rtc_name))\n else:\n copy_metadata(tif, \"image_cal_map.mli_amp.tif\")\n shutil.move(\"image_cal_map.mli_amp.tif\", \"{}/{}\".format(out_dir, rtc_name))\n if area:\n shutil.move(\"{}.flat.tif\".format(outfile), \"{}/{}_flat_{}.tif\".format(out_dir, rtc_name, cpol))\n\n os.chdir(home_dir)\n\n\ndef create_browse_images(out_name, pol, cpol, browse_res):\n ampfile = \"geo_{pol}/{name}_{pol}_{res}m.tif\".format(pol=pol, name=out_name, res=browse_res)\n if cpol:\n ampfile2 = \"geo_{pol}/{name}_{pol}_{res}m.tif\".format(pol=cpol, name=out_name, res=browse_res)\n threshold = -24\n outfile = \"{}_rgb.tif\".format(out_name)\n rtc2color(ampfile, ampfile2, threshold, outfile, amp=True, cleanup=True)\n colorname = \"PRODUCT/{}_rgb\".format(out_name)\n makeAsfBrowse(outfile, colorname)\n\n os.chdir(\"geo_{}\".format(pol))\n outdir = \"../PRODUCT\"\n outfile = \"{}/{}\".format(outdir, out_name)\n ampfile = \"{name}_{pol}_{res}m.tif\".format(pol=pol, name=out_name, res=browse_res)\n sigmafile = ampfile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(ampfile, sigmafile)\n makeAsfBrowse(sigmafile, outfile)\n\n os.chdir(\"../PRODUCT\")\n\n infile = \"{}_inc_map.tif\".format(out_name)\n outfile = \"{}_inc_map\".format(out_name)\n sigmafile = infile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(infile, sigmafile)\n makeAsfBrowse(sigmafile, outfile)\n os.remove(sigmafile)\n\n infile = \"{}_ls_map.tif\".format(out_name)\n outfile = \"{}_ls_map\".format(out_name)\n makeAsfBrowse(infile, outfile)\n\n infile = \"{}_dem.tif\".format(out_name)\n outfile = \"{}_dem\".format(out_name)\n sigmafile = infile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(infile, sigmafile)\n makeAsfBrowse(sigmafile, outfile)\n os.remove(sigmafile)\n\n raster_boundary2shape(out_name + \"_\" + pol + \".tif\", None, out_name + \"_shape.shp\", use_closing=False,\n pixel_shift=True, fill_holes=True)\n\n os.chdir(\"..\")\n\n\ndef create_consolidated_log(out_name, lo_flag, dead_flag, match_flag, gamma_flag, roi,\n shape, pwr_flag, filter_flag, pol, looks, log_file, smooth, terms,\n no_cross_pol, par):\n out = \"PRODUCT\"\n logname = \"{}/{}.log\".format(out, out_name)\n logging.info(\"Creating log file: {}\".format(logname))\n\n f = open(logname, \"w\")\n f.write(\"Consolidated log for: {}\\n\".format(out_name))\n options = \"\"\n if lo_flag:\n options += \"-l \"\n if not dead_flag:\n options += \"--fail \"\n if match_flag:\n options += \"-n \"\n if not gamma_flag:\n options += \"--sigma \"\n if filter_flag:\n options += \"-f \"\n if not pwr_flag:\n options += \"--amp \"\n if smooth:\n options += \"--smooth \"\n options += \"-k {}\".format(looks)\n options += \"-t {}\".format(terms)\n if par:\n options += \"--par {}\".format(par)\n if no_cross_pol:\n options += \"--nocrosspol\"\n if roi:\n options += \"-a {}\".format(roi)\n if shape:\n options += \"-s {}\".format(shape)\n\n cmd = \"rtc_sentinel.py \" + options\n f.write(\"Command: {}\\n\".format(cmd))\n f.close()\n\n geo_dir = \"geo_{}\".format(pol)\n add_log(log_file, logname)\n add_log(\"{}/mk_geo_radcal_0.log\".format(geo_dir), logname)\n add_log(\"{}/mk_geo_radcal_1.log\".format(geo_dir), logname)\n add_log(\"{}/mk_geo_radcal_2.log\".format(geo_dir), logname)\n add_log(\"{}/mk_geo_radcal_3.log\".format(geo_dir), logname)\n add_log(\"coreg_check.log\", logname)\n\n\ndef add_log(log, full_log):\n g = open(full_log, \"a\")\n g.write(\"==============================================\\n\")\n g.write(\"Log: {}\\n\".format(log))\n g.write(\"==============================================\\n\")\n\n if not os.path.isfile(log):\n g.write(\"(not found)\\n\")\n g.close()\n return ()\n\n f = open(log, \"r\")\n for line in f:\n g.write(\"{}\".format(line))\n f.close()\n\n g.write(\"\\n\")\n g.close()\n\n\ndef create_iso_xml(outfile, out_name, pol, cpol, in_file, dem_type, log, gamma_ver):\n hdf5_name = \"hdf5_list.txt\"\n path = in_file\n etc_dir = os.path.abspath(os.path.dirname(hyp3_rtc_gamma.etc.__file__))\n shutil.copy(\"{}/sentinel_xml.xsl\".format(etc_dir), \"sentinel_xml.xsl\")\n\n out = \"PRODUCT\"\n\n execute(f\"xsltproc --stringparam path {path} --stringparam timestamp timestring\"\n f\" --stringparam file_size 1000 --stringparam server stuff\"\n f\" --output out.xml sentinel_xml.xsl {path}/manifest.safe\", uselogging=True)\n\n m = sentinel2meta(\"out.xml\")\n write_asf_meta(m, \"out.meta\")\n\n ver_file = \"{}/manifest.safe\".format(path)\n ipf_ver = None\n if os.path.exists(ver_file):\n f = open(ver_file, \"r\")\n for line in f:\n if \"IPF\" in line:\n t = line.split('\"')\n ipf_ver = t[3].strip()\n else:\n logging.warning(\"No manifest.safe file found in {}\".format(path))\n\n g = open(hdf5_name, \"w\")\n g.write(\"[GAMMA RTC]\\n\")\n g.write(\"granule = {}\\n\".format(in_file.replace(\".SAFE\", \"\")))\n g.write(\"metadata = out.meta\\n\")\n\n geo_dir = \"geo_{}\".format(pol)\n dem_seg = \"{}/area.dem\".format(geo_dir)\n dem_seg_par = \"{}/area.dem_par\".format(geo_dir)\n\n g.write(\"oversampled dem file = {}\\n\".format(dem_seg))\n g.write(\"oversampled dem metadata = {}\\n\".format(dem_seg_par))\n g.write(\"original dem file = {}/{}_dem.tif\\n\".format(out, out_name))\n g.write(\"layover shadow mask = {}/{}_ls_map.tif\\n\".format(out, out_name))\n g.write(\"layover shadow stats = {}/ls_map.stat\\n\".format(geo_dir))\n g.write(\"incidence angle file = {}/{}_inc_map.tif\\n\".format(out, out_name))\n g.write(\"incidence angle metadata = {}/inc_map.meta\\n\".format(geo_dir))\n\n g.write(\"input {} file = {}\\n\".format(pol, outfile))\n g.write(\"terrain corrected {pol} metadata = {dir}/tc_{pol}.meta\\n\".format(pol=pol, dir=geo_dir))\n g.write(\"terrain corrected {} file = {}/{}\\n\".format(pol, out, outfile))\n\n if cpol:\n outfile2 = outfile.replace(pol, cpol)\n g.write(\"input {} file = {}\\n\".format(pol, outfile))\n geo_dir2 = geo_dir.replace(pol, cpol)\n g.write(\"terrain corrected {pol} metadata = {dir}/tc_{pol}.meta\\n\".format(pol=cpol, dir=geo_dir2))\n g.write(\"terrain corrected {} file = {}/{}\\n\".format(cpol, out, outfile2))\n\n g.write(\"initial processing log = {}\\n\".format(log))\n g.write(\"terrain correction log = {}\\n\".format(log))\n g.write(\"main log = {}\\n\".format(log))\n g.write(\"mk_geo_radcal_0 log = {}/mk_geo_radcal_0.log\\n\".format(geo_dir))\n g.write(\"mk_geo_radcal_1 log = {}/mk_geo_radcal_1.log\\n\".format(geo_dir))\n g.write(\"mk_geo_radcal_2 log = {}/mk_geo_radcal_2.log\\n\".format(geo_dir))\n g.write(\"mk_geo_radcal_3 log = {}/mk_geo_radcal_3.log\\n\".format(geo_dir))\n g.write(\"coreg_check log = coreg_check.log\\n\")\n g.write(\"mli.par file = {}.{}.mgrd.par\\n\".format(out_name, pol))\n g.write(\"gamma version = {}\\n\".format(gamma_ver))\n g.write(\"hyp3_rtc version = {}\\n\".format(hyp3_rtc_gamma.__version__))\n g.write(\"ipf version = {}\\n\".format(ipf_ver))\n g.write(\"dem source = {}\\n\".format(dem_type))\n g.write(\"browse image = {}/{}.png\\n\".format(out, out_name))\n g.write(\"kml overlay = {}/{}.kmz\\n\".format(out, out_name))\n\n g.close()\n\n execute(f\"write_hdf5_xml {hdf5_name} {out_name}.xml\", uselogging=True)\n\n logging.info(\"Generating {}.iso.xml with {}/rtc_iso.xsl\\n\".format(out_name, etc_dir))\n\n execute(f\"xsltproc {etc_dir}/rtc_iso.xsl {out_name}.xml > {out_name}.iso.xml\", uselogging=True)\n\n shutil.copy(\"{}.iso.xml\".format(out_name), \"{}\".format(out))\n\n\ndef clean_prod_dir():\n os.chdir(\"PRODUCT\")\n for myfile in glob.glob(\"*ls_map*png*\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*ls_map*kmz\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*inc_map*png*\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*inc_map*kmz\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*dem*png*\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*dem*kmz\"):\n os.remove(myfile)\n os.chdir(\"..\")\n\n\ndef configure_log_file():\n log_file = f'rtc_sentinel_{os.getpid()}.log'\n log_file_handler = logging.FileHandler(log_file)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')\n log_file_handler.setFormatter(formatter)\n logging.getLogger().addHandler(log_file_handler)\n return log_file\n\n\ndef rtc_sentinel_gamma(in_file,\n out_name=None,\n res=None,\n dem=None,\n roi=None,\n shape=None,\n match_flag=False,\n dead_flag=True,\n gamma_flag=True,\n lo_flag=True,\n pwr_flag=True,\n filter_flag=False,\n looks=None,\n terms=1,\n par=None,\n no_cross_pol=False,\n smooth=False,\n area=False):\n\n log_file = configure_log_file()\n\n logging.info(\"===================================================================\")\n logging.info(\" Sentinel RTC Program - Starting\")\n logging.info(\"===================================================================\")\n\n if res is None:\n res = 10\n if lo_flag:\n res = 30\n\n browse_res = 30\n if res > browse_res:\n browse_res = res\n\n if looks is None:\n if res == 30:\n if \"GRD\" in in_file:\n looks = 6\n else:\n looks = 3\n else:\n looks = int(res / 10 + 0.5)\n\n in_file = in_file.rstrip('/')\n if not os.path.exists(in_file):\n logging.error(\"ERROR: Input file {} does not exist\".format(in_file))\n sys.exit(1)\n if in_file.endswith('.zip'):\n logging.info(f'Unzipping {in_file}')\n with zipfile.ZipFile(in_file, 'r') as z:\n z.extractall()\n in_file = in_file.replace('.zip', '.SAFE')\n\n input_type = in_file[7:10]\n\n orbit_file = fetch_orbit_file(in_file)\n\n if out_name is None:\n out_name = get_product_name(in_file, orbit_file, res, gamma_flag, pwr_flag, filter_flag, match_flag)\n\n report_kwargs(in_file, out_name, res, dem, roi, shape, match_flag, dead_flag, gamma_flag, lo_flag,\n pwr_flag, filter_flag, looks, terms, par, no_cross_pol, smooth, area, orbit_file)\n\n orbit_file = os.path.abspath(orbit_file) # ingest_S1_granule requires absolute path\n\n if dem is None:\n logging.info(\"Getting DEM file covering this SAR image\")\n tifdem = \"tmp_{}_dem.tif\".format(os.getpid())\n if shape is not None:\n min_x, min_y, max_x, max_y = get_bb_from_shape(shape)\n logging.info(f'bounding box: {min_x}, {min_y}, {max_x}, {max_y}')\n roi = [min_x, min_y, max_x, max_y]\n if roi is not None:\n dem_type = get_dem(roi[0], roi[1], roi[2], roi[3], tifdem, post=30)\n else:\n demfile, dem_type = getDemFile(in_file, tifdem, post=30)\n\n if 'REMA' in dem_type and smooth:\n logging.info(\"Preparing to smooth DEM tiles\")\n dem, parfile = smooth_dem_tiles(\"DEM\", build=True)\n else:\n dem = \"area.dem\"\n parfile = \"area.dem.par\"\n if \"GIMP\" in dem_type or \"REMA\" in dem_type:\n ps2dem(tifdem, dem, parfile)\n else:\n utm2dem(tifdem, dem, parfile)\n os.remove(tifdem)\n elif \".tif\" in dem:\n tiff_dem = dem\n dem = \"area.dem\"\n parfile = \"area.dem.par\"\n utm2dem(tiff_dem, dem, parfile)\n dem_type = \"Unknown\"\n elif os.path.isfile(\"{}.par\".format(dem)):\n dem_type = \"Unknown\"\n else:\n logging.error(\"ERROR: Unrecognized DEM: {}\".format(dem))\n sys.exit(1)\n\n vvlist = glob.glob(\"{}/*/*vv*.tiff\".format(in_file))\n vhlist = glob.glob(\"{}/*/*vh*.tiff\".format(in_file))\n hhlist = glob.glob(\"{}/*/*hh*.tiff\".format(in_file))\n hvlist = glob.glob(\"{}/*/*hv*.tiff\".format(in_file))\n\n cpol = None\n pol = None\n if vvlist:\n logging.info(\"Found VV polarization - processing\")\n pol = \"VV\"\n rtc_name = out_name + \"_\" + pol + \".tif\"\n process_pol(in_file, rtc_name, out_name, pol, res, looks,\n match_flag, dead_flag, gamma_flag, filter_flag, pwr_flag,\n browse_res, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if vhlist and not no_cross_pol:\n cpol = \"VH\"\n rtc_name = out_name + \"_\" + cpol + \".tif\"\n logging.info(\"Found VH polarization - processing\")\n process_2nd_pol(in_file, rtc_name, cpol, res, looks,\n gamma_flag, filter_flag, pwr_flag, browse_res,\n out_name, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if hhlist:\n logging.info(\"Found HH polarization - processing\")\n pol = \"HH\"\n rtc_name = out_name + \"_\" + pol + \".tif\"\n process_pol(in_file, rtc_name, out_name, pol, res, looks,\n match_flag, dead_flag, gamma_flag, filter_flag, pwr_flag,\n browse_res, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if hvlist and not no_cross_pol:\n cpol = \"HV\"\n logging.info(\"Found HV polarization - processing\")\n rtc_name = out_name + \"_\" + cpol + \".tif\"\n process_2nd_pol(in_file, rtc_name, cpol, res, looks,\n gamma_flag, filter_flag, pwr_flag, browse_res,\n out_name, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if hhlist is None and vvlist is None:\n logging.error(f\"ERROR: Can not find VV or HH polarization in {in_file}\")\n sys.exit(1)\n\n fix_geotiff_locations()\n reproject_dir(dem_type, res, prod_dir=\"PRODUCT\")\n reproject_dir(dem_type, res, prod_dir=\"geo_{}\".format(pol))\n if cpol:\n reproject_dir(dem_type, res, prod_dir=\"geo_{}\".format(cpol))\n create_browse_images(out_name, pol, cpol, browse_res)\n rtc_name = out_name + \"_\" + pol + \".tif\"\n gamma_ver = gamma_version()\n create_iso_xml(rtc_name, out_name, pol, cpol, in_file, dem_type, log_file, gamma_ver)\n create_arc_xml(in_file, out_name, input_type, gamma_flag, pwr_flag, filter_flag, looks, pol, cpol,\n dem_type, res, hyp3_rtc_gamma.__version__, gamma_ver, rtc_name)\n cogify_dir(directory='PRODUCT')\n clean_prod_dir()\n perform_sanity_checks()\n logging.info(\"===================================================================\")\n logging.info(\" Sentinel RTC Program - Completed\")\n logging.info(\"===================================================================\")\n\n create_consolidated_log(out_name, lo_flag, dead_flag, match_flag, gamma_flag, roi,\n shape, pwr_flag, filter_flag, pol, looks, log_file, smooth, terms,\n no_cross_pol, par)\n return 'PRODUCT', out_name\n\n\ndef main():\n \"\"\"Main entrypoint\"\"\"\n parser = argparse.ArgumentParser(\n prog='rtc_sentinel.py',\n description=__doc__,\n )\n parser.add_argument('input', help='Name of input file, either .zip or .SAFE')\n parser.add_argument(\"-o\", \"--outputResolution\", type=float, help=\"Desired output resolution\")\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-e\", \"--externalDEM\", help=\"Specify a DEM file to use - must be in UTM projection\")\n group.add_argument(\"-r\", \"--roi\", type=float, nargs=4, metavar=('LON_MIN', 'LAT_MIN', 'LON_MAX', 'LAT_MAX'),\n help=\"Specify ROI to use\")\n group.add_argument(\"-s\", \"--shape\", help=\"Specify shape file to use\")\n\n parser.add_argument(\"-n\", action=\"store_false\", help=\"Do not perform matching\")\n parser.add_argument(\"--fail\", action=\"store_true\",\n help=\"if matching fails, fail the program. Default: use dead reckoning\")\n parser.add_argument(\"--sigma\", action=\"store_true\", help=\"create sigma0 instead of gamma0\")\n parser.add_argument(\"--amp\", action=\"store_true\", help=\"create amplitude images instead of power\")\n parser.add_argument(\"--smooth\", action=\"store_true\", help=\"smooth DEM file before terrain correction\")\n parser.add_argument(\"-l\", action=\"store_true\", help=\"create a lo-res output (30m)\")\n parser.add_argument(\"-f\", action=\"store_true\", help=\"run enhanced lee filter\")\n parser.add_argument(\"-k\", \"--looks\", type=int,\n help=\"set the number of looks to take (def:3 for SLC/6 for GRD)\")\n parser.add_argument(\"-t\", \"--terms\", type=int, default=1,\n help=\"set the number of terms in matching polynomial (default is 1)\")\n parser.add_argument('--output', help='base name of the output files')\n parser.add_argument(\"--par\", help=\"Stack processing - use specified offset file and don't match\")\n parser.add_argument(\"--nocrosspol\", action=\"store_true\", help=\"Do not process the cross pol image\")\n parser.add_argument(\"-a\", \"--area\", action=\"store_true\", help=\"Keep area map\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n\n # FIXME: This function's inputs should be 1:1 (name and value!) with CLI args!\n rtc_sentinel_gamma(args.input,\n out_name=args.output,\n res=args.outputResolution,\n dem=args.externalDEM,\n roi=args.roi,\n shape=args.shape,\n match_flag=args.n,\n dead_flag=not args.fail,\n gamma_flag=not args.sigma,\n lo_flag=args.l,\n pwr_flag=not args.amp,\n filter_flag=args.f,\n looks=args.looks,\n terms=args.terms,\n par=args.par,\n no_cross_pol=args.nocrosspol,\n smooth=args.smooth,\n area=args.area)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.48112449049949646, "alphanum_fraction": 0.49558234214782715, "avg_line_length": 37.70466232299805, "blob_id": "2ed701ed324d4aa464e781e7cbc231e9a74ec72a", "content_id": "9f9a4fd9bb06875d97fe1aff6f4e36e3a6ae9cd7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7470, "license_type": "permissive", "max_line_length": 103, "num_lines": 193, "path": "/hyp3_rtc_gamma/check_coreg.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "\"\"\"Checks results of Gamma RTC coregistration process\"\"\"\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport re\nimport sys\n\nimport numpy as np\nfrom hyp3lib.getParameter import getParameter\n\n\nclass CoregistrationError(Exception):\n \"\"\"Error to raise when coregistration fails\"\"\"\n\n\nclass CoregLogger:\n \"\"\"A local logging context to create a coregistration log file\"\"\"\n def __init__(self, logger=None, file_name='coreg_check.log', file_mode='w'):\n \"\"\"\n Args:\n logger: The logger to use for logging (defaults to root logger)\n file_name: file to write coregistation log to\n file_mode: mode to open the coregistration log file in\n \"\"\"\n self.logger = logger\n self.file_handler = logging.FileHandler(file_name, mode=file_mode)\n\n def __enter__(self):\n if self.logger is None:\n self.logger = logging.getLogger()\n self.logger.addHandler(self.file_handler)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.logger.removeHandler(self.file_handler)\n self.file_handler.close()\n\n\ndef calc(s, l, r, a): # noqa: E741\n rpt = r[0] + r[1] * s + r[2] * l + r[3] * s * l + r[4] * s * s + r[5] * l * l\n apt = a[0] + a[1] * s + a[2] * l + a[3] * s * l + a[4] * s * s + a[5] * l * l\n return rpt, apt\n\n\ndef check_coreg(sar_file, post, max_offset=50, max_error=2):\n with CoregLogger():\n logging.info(f\"SAR file: {sar_file}\")\n logging.info(f\"Checking coregistration using {post} meters\")\n logging.info(f\"Setting maximum offset to be {max_offset}\")\n logging.info(f\"Setting maximum error to be {max_error}\")\n\n myfile = \"mk_geo_radcal_2.log\"\n if os.path.isdir(\"geo_HH\"):\n mlog = \"geo_HH/{}\".format(myfile)\n elif os.path.isdir(\"geo_hh\"):\n mlog = \"geo_hh/{}\".format(myfile)\n elif os.path.isdir(\"geo_VV\"):\n mlog = \"geo_VV/{}\".format(myfile)\n elif os.path.isdir(\"geo_vv\"):\n mlog = \"geo_vv/{}\".format(myfile)\n elif os.path.isdir(\"geo\"):\n mlog = \"geo/{}\".format(myfile)\n else:\n raise CoregistrationError(f\"Can't find {myfile}\")\n\n a = np.zeros(6)\n r = np.zeros(6)\n\n with open(mlog, \"r\") as g:\n for line in g:\n if 'final range offset poly. coeff.:' in line:\n tmp = re.split(\":\", line)\n vals = tmp[1].split()\n if len(vals) == 1:\n r[0] = float(vals[0])\n logging.info(f\"Range offset is {r}\")\n elif len(vals) == 3:\n r[0] = float(vals[0])\n r[1] = float(vals[1])\n r[2] = float(vals[2])\n logging.info(f\"Range polynomial is {r}\")\n elif len(vals) == 6:\n r[0] = float(vals[0])\n r[1] = float(vals[1])\n r[2] = float(vals[2])\n r[3] = float(vals[3])\n r[4] = float(vals[4])\n r[5] = float(vals[5])\n logging.info(f\"Range polynomial is {r}\")\n\n if 'final azimuth offset poly. coeff.:' in line:\n tmp = re.split(\":\", line)\n vals = tmp[1].split()\n if len(vals) == 1:\n a[0] = float(vals[0])\n logging.info(f\"Azimuth offset is {a}\")\n elif len(vals) == 3:\n a[0] = float(vals[0])\n a[1] = float(vals[1])\n a[2] = float(vals[2])\n logging.info(f\"Azimuth polynomial is {a}\")\n elif len(vals) == 6:\n a[0] = float(vals[0])\n a[1] = float(vals[1])\n a[2] = float(vals[2])\n a[3] = float(vals[3])\n a[4] = float(vals[4])\n a[5] = float(vals[5])\n logging.info(f\"Azimuth polynomial is {a}\")\n\n if 'final model fit std. dev. (samples) range:' in line:\n tmp = re.split(\":\", line)\n vals = tmp[1].split()\n rng_error = float(vals[0])\n val = tmp[2].strip()\n azi_error = float(val)\n logging.info(f\"Range std dev: {rng_error} Azimuth std dev: {azi_error}\")\n error = np.sqrt(rng_error * rng_error + azi_error * azi_error)\n logging.info(f\"error is {error}\")\n if error > max_error:\n logging.warning(\"error > max_error\")\n logging.warning(\"std dev is too high, using dead reckoning\")\n logging.warning(\"Granule failed coregistration\")\n raise CoregistrationError('error > max_error')\n\n mlog = glob.glob('geo_??/*.diff_par')[0]\n if not mlog:\n mlog = glob.glob('geo/*.diff_par')[0]\n if not mlog:\n raise CoregistrationError(\"Can't find diff_par file\")\n\n if os.path.exists(mlog):\n ns = int(getParameter(mlog, \"range_samp_1\"))\n logging.info(f\"Number of samples is {ns}\")\n nl = int(getParameter(mlog, \"az_samp_1\"))\n logging.info(f\"Number of lines is {nl}\")\n else:\n raise CoregistrationError(f\"Can't find diff par file {mlog}\")\n\n rpt, apt = calc(1, 1, r, a)\n pt1 = np.sqrt(rpt * rpt + apt * apt)\n logging.info(f\"Point 1 offset is {pt1} = sqrt({rpt}**2 + {apt}**2)\")\n\n rpt, apt = calc(ns, 1, r, a)\n pt2 = np.sqrt(rpt * rpt + apt * apt)\n logging.info(f\"Point 2 offset is {pt2} = sqrt({rpt}**2 + {apt}**2)\")\n\n rpt, apt = calc(1, nl, r, a)\n pt3 = np.sqrt(rpt * rpt + apt * apt)\n logging.info(f\"Point 3 offset is {pt3} = sqrt({rpt}**2 + {apt}**2)\")\n\n rpt, apt = calc(ns, nl, r, a)\n pt4 = np.sqrt(rpt * rpt + apt * apt)\n logging.info(f\"Point 4 offset is {pt4} = sqrt({rpt}**2 + {apt}**2)\")\n\n top = max(pt1, pt2, pt3, pt4)\n offset = top * post\n\n logging.info(f\"Found absolute offset of {offset} meters\")\n if offset >= max_offset:\n raise CoregistrationError(\"offset too large, using dead reckoning\")\n\n logging.info(\"Granule passed coregistration\")\n\n\ndef main():\n \"\"\"Main entrypoint\"\"\"\n parser = argparse.ArgumentParser(\n prog='check_coreg.py',\n description=__doc__,\n )\n parser.add_argument('input', help='Name of input SAR file')\n parser.add_argument('post', type=float, help='Posting of the SAR image')\n parser.add_argument('-o', '--max_offset', type=float, default=50,\n help='Set the maximum allowable max_offset (meters)')\n parser.add_argument('-e', '--max_error', type=int, default=2,\n help='Set the maximum allowable standard deviation of max_offset fit (pixels)')\n args = parser.parse_args()\n\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n stream=sys.stdout\n )\n logging.info(\"Starting run\")\n\n check_coreg(args.input, args.post, args.max_offset, args.max_error)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4233871102333069, "alphanum_fraction": 0.6618663668632507, "avg_line_length": 40.33333206176758, "blob_id": "ec9de8cef40759c7fe57d083fe4938366393046d", "content_id": "684ada0c5faa771a7a6a4406f02a8298a4538b15", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1736, "license_type": "permissive", "max_line_length": 118, "num_lines": 42, "path": "/tests/test_rtc_sentinel.py", "repo_name": "washreve/hyp3-rtc-gamma", "src_encoding": "UTF-8", "text": "from re import match\n\nfrom hyp3_rtc_gamma import rtc_sentinel\n\n\ndef test_get_product_name():\n payload = {\n 'granule_name': 'S1A_S1_GRDH_1SSH_20181121T184017_20181121T184046_024690_02B6ED_6946',\n 'orbit_file': 'S1A_OPER_AUX_POEORB_OPOD_20181211T120749_V20181120T225942_20181122T005942.EOF',\n 'resolution': 30,\n 'gamma0': False,\n 'power': False,\n 'filtered': False,\n 'matching': False,\n }\n name = rtc_sentinel.get_product_name(**payload)\n assert match('S1A_S1_20181121T184017_SHP_RTC30_G_sauned_[0-9A-F]{4}$', name)\n\n payload = {\n 'granule_name': 'S1B_WV_OCN__2SSV_20200714T162902_20200714T163511_022469_02AA55_1A7D',\n 'orbit_file': 'S1B_OPER_AUX_RESORB_OPOD_20200714T223706_V20200714T150658_20200714T182428.EOF',\n 'resolution': 10,\n 'power': True,\n 'filtered': True,\n 'gamma0': True,\n 'matching': True,\n }\n name = rtc_sentinel.get_product_name(**payload)\n assert match('S1B_WV_20200714T162902_SVR_RTC10_G_gpufem_[0-9A-F]{4}$', name)\n\n payload = {\n 'granule_name': 'S1B_IW_SLC__1SDV_20200714T152128_20200714T152150_022469_02AA50_9A64',\n 'resolution': 30.999,\n }\n name = rtc_sentinel.get_product_name(**payload)\n assert match('S1B_IW_20200714T152128_DVO_RTC30_G_gpuned_[0-9A-F]{4}$', name)\n\n name = rtc_sentinel.get_product_name('S1A_EW_RAW__0SDH_20151118T190420_20151118T190529_008663_00C507_0A5F', None)\n assert match('S1A_EW_20151118T190420_DHO_RTC30_G_gpuned_[0-9A-F]{4}$', name)\n\n name = rtc_sentinel.get_product_name('S1A_EW_RAW__0SDH_20151118T190420_20151118T190529_008663_00C507_0A5F', 'foo')\n assert match('S1A_EW_20151118T190420_DHO_RTC30_G_gpuned_[0-9A-F]{4}$', name)\n" } ]
14
Reconfiu/server
https://github.com/Reconfiu/server
cd479e1214dea2ae094c91b99d104449115824f9
4360f21a52818e7c96867ec2555483bf912412f8
73aeefa3805c460c2489d89923fa9f27db0ed676
refs/heads/develop
2021-06-14T05:36:02.978329
2017-05-12T05:24:59
2017-05-12T05:24:59
81,399,331
0
0
null
2017-02-09T02:26:07
2017-02-09T02:28:03
2017-04-13T21:48:58
Python
[ { "alpha_fraction": 0.6387660503387451, "alphanum_fraction": 0.651260495185852, "avg_line_length": 33.91891860961914, "blob_id": "b3bbb7cfe2c4d56c9a868ca69a816300ed3d86e0", "content_id": "95b236fe0f7cb336696e5825b54a7f7930a31774", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9044, "license_type": "no_license", "max_line_length": 134, "num_lines": 259, "path": "/app/server.py", "repo_name": "Reconfiu/server", "src_encoding": "UTF-8", "text": "import os, utils, re, hashlib, time, jwt\nfrom flask import Flask, json, request\nfrom pymongo import MongoClient\nfrom flask_cors import CORS\nfrom bson.objectid import ObjectId\nfrom bson import json_util\nfrom datetime import datetime\nimport config\n\napp = Flask(__name__)\nCORS(app)\n\n\nclient = MongoClient(\n \"db\",\n 27017)\n\nclient.admin.authenticate(config.mongouser, config.mongopassword)\n# databases\ndb = client.fiudb\ndb_users = client.userdb\n\n# Globals\nPASS_MIN_LEN = 8\nPASS_MAX_LEN = 32\n\nISSUER = \"reconFIU_server\"\nEXP_TIME = 86400 # 1 day\n# Secret key for JWT\nSECRET_KEY = \"RECON_FIU_CEN_4010\" # TODO: Should be Randomized and sent to Client Browser\n_session = {}\n# creates autoResponse with data\ndef autoResponse(function=lambda: dict(), status_code=200, message=\"Success\"):\n try:\n result = function()\n except (BaseException, RuntimeError) as e:\n response = json.jsonify(\n status=400,\n message=str(e),\n data=dict(),\n records=0\n )\n client.close()\n return response, status_code\n return json.jsonify(\n status=status_code,\n message=message,\n data=result,\n records=len(result)\n ), status_code\n\n\[email protected]_request\ndef require_token():\n r_token = True\n global _session\n if request.method == \"POST\" :\n body = request.get_json(force=True)\n token = body.get(\"token\")\n user = body.get(\"user\")\n if request.endpoint == \"login\" or request.endpoint == \"add_user\":\n r_token = False\n if body is None or user is None:\n return autoResponse(status_code=400, message=\"Bad Request\")\n username = user[\"username\"] \n if username is None or (r_token and not username in _session):\n return autoResponse(status_code=400, message=\"Not logged in\")\n if r_token and (token is None or not is_valid_token(json_util.loads(token), username)):\n return autoResponse(status_code=400, message=\"invalid token\")\n\n\[email protected](\"/\")\ndef initApp():\n return \"Welcome to ReconFIU\"\n\[email protected](\"/api/getall\")\[email protected](\"/api/getall/<int:limit>\", methods=[\"POST\", \"GET\"])\ndef allRecords(limit=1000):\n cursor = db.courses.find().limit(limit)\n return autoResponse(lambda: utils.toArray(cursor))\n\n# performs a search by depending on what criteri is passed\n# if no criteria is passed the returns first 1000 records\[email protected](\"/api/searchby\", methods=[\"POST\"])\ndef searchBy():\n params = request.get_json(force=True).get(\"query\")\n if params is None:\n return allRecords(limit=100)\n course = params.get(\"course\")\n term = params.get(\"term\")\n prof = params.get(\"prof\")\n limit = params.get(\"limit\") or 100\n match = {}\n if \"course\" in params and course:\n match[\"course.number\"] = { \"$regex\" : re.compile(pattern=course, flags=re.IGNORECASE) }\n if \"term\" in params and term and term != \"All\":\n match[\"term.term\"] = term\n if \"prof\" in params and prof:\n match[\"instructor.name\"] = { \"$in\" : list(map(lambda x : re.compile(pattern=x, flags=re.IGNORECASE), prof.split() )) }\n \n pipeline_query[0][\"$match\"] = { \"$and\": [match] }\n pipeline_query[1][\"$limit\"] = limit\n cursor = db.courses.aggregate(pipeline_query)\n return autoResponse(lambda: utils.toArray(cursor))\n\npipeline_query = [\n { \"$match\": {} },\n { \"$limit\": 100 },\n { \"$sort\": { \"date\": 1, \"instructor.name\": 1 } }\n] \n\n\n# Find a user in userdb, return token of this user\[email protected](\"/api/finduser\", methods=[\"POST\"])\ndef find_user():\n params = request.get_json(force=True).get(\"user\")\n username = params.get(\"username\")\n user = db_users.users.find_one({\"username\": username})\n if user is None:\n return autoResponse(status_code=404, message=username + \" not found\")\n return autoResponse(function=lambda: {\"username\": user[\"username\"]}, message=\"User found\")\n\n\n# Create a user document in userdb\[email protected](\"/api/adduser\", methods=[\"POST\"])\ndef add_user():\n params = request.get_json(force=True).get(\"user\")\n username = params.get(\"username\")\n password = params.get(\"password\")\n user = db_users.users.find_one({\"username\": username})\n \n if password is None:\n return autoResponse(status_code=400, message=\"bad request\")\n if user:\n return autoResponse(status_code=409, message=username + \" already exists\")\n\n # Determine if legal password\n valid_pass, msg = is_valid_password(password)\n if not is_valid_username(username):\n return autoResponse(status_code=400, message=\"Invalid username\")\n if not valid_pass:\n return autoResponse(status_code=400, message=\"Invalid Password: \" + msg)\n\n hash_password = hash_pass(password)\n new_user = {\"username\": username, \"password\": hash_password}\n # Add to userdb\n db_users.users.insert(new_user)\n return autoResponse(status_code=201, message=username + \" has been created\")\n\n# Delete a user document in userdb\[email protected](\"/api/deleteuser\", methods=[\"POST\"])\ndef delete_user():\n params = request.get_json(force=True).get(\"user\")\n username = params.get(\"username\")\n user = db_users.users.remove({\"username\": username})\n if user:\n return autoResponse(status_code=200, message=\"Username: \" + username + \" has been deleted.\")\n return autoResponse(status_code=409, message=username + \" not found\")\n\n# Check if password is valid\ndef is_valid_password(password):\n if len(password) < 8:\n return False, \"Too short, password needs to be at least \" + str(PASS_MIN_LEN) + \" characters long\"\n if len(password) > 32:\n return False, \"Too long, password needs to be at most \" + str(PASS_MAX_LEN) + \" characters long\"\n if not password.isalnum(): # Check if alphanumeric\n return False, \"Invalid char, password can only consist of alphanumeric characters\"\n return True, \"Password OK\"\n\n# check if username is valid\ndef is_valid_username(username):\n user = username.split(\"@\")\n return len(user) == 2 and user[0] and len(user[0]) == 8 and user[1] == \"fiu.edu\"\n\n# Login user, authorization\[email protected](\"/api/login\", methods=[\"POST\"])\ndef login():\n global _session\n params = request.get_json(force=True).get(\"user\")\n username = params.get(\"username\")\n password = params.get(\"password\")\n user = db_users.users.find_one({\"username\": username})\n if user is None:\n return autoResponse(status_code=404, message=username + \" not found\")\n if username in _session: # User already logged in\n return autoResponse(status_code=409, message=username + \" is already logged in\") \n # Determine if password is correct\n if hash_pass(password) != user[\"password\"]:\n return autoResponse(status_code=403, message=\"Given password does not match with \" + username)\n \n token = generate_token(username)\n # Store user in _session\n _session[username] = username\n return autoResponse(function=lambda: {\"username\": username, \"token\": json_util.dumps(token)}, message=username + \" has logged in\")\n \n\n\n# Logout user\[email protected](\"/api/logout\", methods=[\"POST\"])\ndef logout():\n global _session\n # Receive username, token\n params = request.get_json(force=True).get(\"user\")\n username = params.get(\"username\") \n # Remove user from _session\n _session.pop(username, None)\n return autoResponse(message=username + \" has been logged out\")\n \n\n# Adds a comment to the database\n# Needs username, comment body, and course id\[email protected](\"/api/addcomment\", methods=[\"POST\"])\ndef add_comment():\n params = request.get_json(force=True)\n username = params.get(\"username\") or \"Anonymous\"\n body = params.get(\"body\")\n _id = params.get(\"id\")\n if body.strip() is None or _id is None:\n return autoResponse(status_code=400, message=\"Bad Request\")\n \n db.courses.update({\"_id\": ObjectId(_id)},\\\n {\"$push\": {\"comments\": {\"username\": username,\\\n \"body\": body, \"time\": datetime.now().strftime(\"%m-%d-%Y %H:%M:%S\")}}})\n return autoResponse(message=\"Comment has been added\")\n\n# Hash a given password using the sha1 hash function\ndef hash_pass(password):\n hash_obj = hashlib.sha1(password.encode(\"utf-8\")).digest()\n hash_password = hashlib.sha1(hash_obj).hexdigest()\n return hash_password\n\n\n# Generates a JSON Web Token for a given username\ndef generate_token(username):\n payload = {\n \"iss\": ISSUER, # Issuer\n \"sub\": username, # Subject\n \"iat\": time.time(), # Issued at\n }\n\n token = jwt.encode(payload, SECRET_KEY, algorithm=\"HS256\")\n return token\n\n\n# Determines if a token is valid for a given username\ndef is_valid_token(token, username):\n try:\n token_vals = jwt.decode(token, SECRET_KEY, algorithm=\"HS256\")\n except (jwt.DecodeError, jwt.InvalidTokenError) as e:\n return False\n if token_vals[\"iss\"] != ISSUER or token_vals[\"sub\"] != username:\n return False\n else:\n return True\n\n\nif __name__ == \"__main__\":\n # Secret key for _sessions\n app.run(host=\"0.0.0.0\", debug=True)\n" }, { "alpha_fraction": 0.6880000233650208, "alphanum_fraction": 0.7120000123977661, "avg_line_length": 24, "blob_id": "b116d3343510f13b344325814cd039c160f4fce3", "content_id": "d3e6edac11ecf14017f1c4444424b04013e9bbc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 125, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/Dockerfile", "repo_name": "Reconfiu/server", "src_encoding": "UTF-8", "text": "FROM python:3.4\nWORKDIR /reconfiu\nADD . /reconfiu\nRUN pip install -r requirements.txt\nCMD [\"python3\", \"-u\" ,\"app/server.py\"]\n" }, { "alpha_fraction": 0.7086614370346069, "alphanum_fraction": 0.7086614370346069, "avg_line_length": 24.399999618530273, "blob_id": "f161199971c38f78a89845e0391d2da144abe253", "content_id": "7da24ee21d35997f94b6b6cad1e3df65bf5917c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 56, "num_lines": 5, "path": "/app/utils.py", "repo_name": "Reconfiu/server", "src_encoding": "UTF-8", "text": "from bson import json_util\n\ndef toArray(cursor):\n json_docs = [json_util.dumps(doc) for doc in cursor]\n return json_docs\n" }, { "alpha_fraction": 0.6114845275878906, "alphanum_fraction": 0.6216631531715393, "avg_line_length": 31.11111068725586, "blob_id": "0e28ad113cc34e26fe7266d646b4afebee67c1b4", "content_id": "e2c3c72c3dc5228722e22039469caf1742fe4cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5207, "license_type": "no_license", "max_line_length": 302, "num_lines": 162, "path": "/README.md", "repo_name": "Reconfiu/server", "src_encoding": "UTF-8", "text": "# ReconFIU - Server \n\n> Python server running on docker container, by running our server in a docker container we can easily change, upgrade and scale our server. In order to use docker, we install `docker-engine` and `docker-compose`. In case of Mac and Windows `docker-machine` with `virtualbox` must also be installed. \n\n## Requirements\n\n* **Python 3.4* (https://www.python.org/)\n ```\n For Linux run command:\n $ sudo apt-get install python\n Windows/Mac OS:\n Download from https://www.python.org/downloads/release/python-2711/\n ```\n \n* **Docker 1.12.6** (https://docs.docker.com/)\n ```\n 1. Linux: https://docs.docker.com/engine/installation/linux/\n 2. Windows: https://docs.docker.com/docker-for-windows/\n * For older versions of Windows download Docker-Toolbox version 1.12.6 from https://github.com/docker/toolbox/releases\n 3. Mac OS: https://docs.docker.com/docker-for-mac/\n * For older versions of Mac download Docker-Toolbox version 1.12.6 from https://github.com/docker/toolbox/releases\n ```\n \n* **Docker-compose 1.10.0** \n ```\n Follow these steps: https://docs.docker.com/compose/install/\n ```\n \n* **Docker-machine (Windows/Mac users only)** For docker-machine to run we also need virtualbox. \n ```\n docker-machine: https://docs.docker.com/machine/install-machine/\n virtualbox: https://www.virtualbox.org/wiki/Downloads\n ```\n After installing docker-machine create a docker virtual machine\n ```\n $ docker-machine create --driver virtualbox default\n $ eval \"$(docker-machine env default)\"\n ```\n \n Full guide: https://docs.docker.com/machine/get-started/\n* **Important Note**\n To check if docker, docker-compose and python are installed run these commands\n ```\n $ docker-compose -v\n $ docker -v\n $ python --version\n ```\n \n If no errors occured then we can build our server.\n \n## Setup\nTo set up our server with docker after all dependencies have been installed follow these steps:\n \n* **All platforms** \n ```\n $ git clone https://github.com/reckonfiu/server.git\n $ cd server\n $ docker-compose up\n ```\n \n* **Note:** For Linux you might have to do\n ```\n $ sudo docker-compose up\n ```\n \n In the browser go to `localhost:5000` \n \n## Server Technology Stack\n* **Flask** http://flask.pocoo.org/ \n * **flask_cors** https://flask-cors.readthedocs.io/en/latest/\n* **Pymongo** https://api.mongodb.com/python/current/ \n* **PyJWT** https://github.com/jpadilla/pyjwt\n\n## API\n* **Search by**\n Returns courses that matched the specific course, term or professor passed in the body of the request. The return data is returned sorted by the fields that are passed. First is course then term and professor. \n \n Api request route: the object passed to the query parameter must be a JSON string\n ```\n POST /api/searchBy\n headers: \n content-type: application/json\n body: { query: { course: course_name, term: term_number, prof: firstname<space>lastname } }\n where course_name and term_number don't contain any white space\n \n Sample query using HTML5 fetch API:\n \n fetch('http://localhost:5000/api/searchby', {\n method: 'POST',\n headers: new Headers({ 'Content-Type': 'application/json' }),\n mode: 'cors',\n body: JSON.stringify({ query: { course: \"cop4610\" } })\n }).then(resp => {\n resp.json().then(data => {\n console.log(data);\n })\n })\n \n Using professor and course:\n \n fetch('http://localhost:5000/api/searchby', {\n method: 'POST',\n headers: new Headers({ 'Content-Type': 'application/json' }),\n mode: 'cors',\n body: JSON.stringify({ query: { course: \"cop2210\", prof: \"masoud\" } })\n }).then(resp => {\n resp.json().then(data => {\n console.log(data);\n })\n })\n ``` \n If neither course, term or prof fields are passed then the first 100 found records are returned \n Response:\n ```\n \"status\": 200,\n \"data\" : [{ professor : example }],\n \"records\": 1,\n \"message\": \"Success\",\n ```\n \n* **authenticate user**\n ```\n POST /api/login\n headers: content-type: application/json\n body: { user: {username: \"\", password: \"\" } }\n ```\n \n* **add user**\n ```\n POST /api/adduser\n headers: content-type: application/json\n body: { user: {username: \"\", password: \"\" } }\n ```\n \n* **find user**\n ```\n POST /api/finduser\n headers: content-type: application/json\n body: { user: {username: \"\", password: \"\" } }\n ```\n \n* **delete user**\n ```\n POST /api/deleteuser\n headers: content-type: application/json\n body: { user: {username: \"\"} }\n ```\n* **Store comments**\n ```\n POST /api/addcomment\n headers: content-type: application/json\n body: { comment: {username: \"\", body: \"\", id: \"\"} }\n ```\n\n\n\n## TODO:\n Users should not be allowed to use the API without being logged in.\n* **Implement ORM framework library**\n\n In order to keep the data embedded model consistentin mongo let's implement ORM posibly using\n **Humongolus** https://github.com/entone/Humongolus\n \n" }, { "alpha_fraction": 0.7678571343421936, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 27.5, "blob_id": "ee446e2b98900e31c9d4714f054885626cb2cab5", "content_id": "46e8769f4d60da1f3f46aac8ea9284df125df67c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/app/example-config.py", "repo_name": "Reconfiu/server", "src_encoding": "UTF-8", "text": "mongouser = \"enterusername\"\nmongopassword = \"enterpassword\"" }, { "alpha_fraction": 0.3785425126552582, "alphanum_fraction": 0.42510122060775757, "avg_line_length": 22.4761905670166, "blob_id": "6e53af4f80804ab069ef95f52955cdf5860e9e15", "content_id": "492f94638d3740f0577cbf9ab5d9d91f51832232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 494, "license_type": "no_license", "max_line_length": 55, "num_lines": 21, "path": "/docker-compose.yml", "repo_name": "Reconfiu/server", "src_encoding": "UTF-8", "text": "version: \"2\"\nservices:\n db:\n image: mongo:3.4.2\n environment:\n # enter credentials here\n - MONGO_INITDB_ROOT_USERNAME\n - MONGO_INITDB_ROOT_PASSWORD7\n volumes:\n - /data/db:/data/db\n ports:\n - \"27017:27017\" \n api:\n build: .\n ports:\n - \"5000:5000\"\n volumes:\n - .:/reconfiu\n - /etc/localtime:/etc/localtime \n links:\n - db\n\n" } ]
6
PythonJamaica/collective.badge
https://github.com/PythonJamaica/collective.badge
99f697e5b255d97469b85397f8f1b538523c4e01
c4213fa20283440e9837619756bb23f6256471e0
e95ee089914e265998de726ff3179f7c36ba9b06
refs/heads/master
2020-04-05T23:23:54.253088
2015-11-25T18:15:46
2015-11-25T18:15:46
47,024,085
0
0
null
2015-11-28T13:14:01
2015-11-22T04:08:06
2015-11-25T18:15:50
null
[ { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 20, "blob_id": "d90197b39aaaad984325eee514a28ca7afdf9979", "content_id": "40f93885d0cecb2ec289e76eb6e8a88a18d8451e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/src/collective/badge/interfaces.py", "repo_name": "PythonJamaica/collective.badge", "src_encoding": "UTF-8", "text": "from zope.interface import Interface\n\n\nclass IBadge(Interface):\n \"\"\"Marker for badge content type.\"\"\"\n" } ]
1
seakintruth/hive-hydra-socket
https://github.com/seakintruth/hive-hydra-socket
7d0a4dd59a87f2f49b315da0d6a73c22a71fe9be
c9e94eac9d372cf5c7a3000db08d302748f6f2db
3a66b6d3dedd345c606ff800d775def016a3388f
refs/heads/master
2023-04-22T11:38:11.767204
2021-05-06T06:32:27
2021-05-06T06:32:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6721611618995667, "alphanum_fraction": 0.7014651894569397, "avg_line_length": 18.5, "blob_id": "46630f9403dfae447e241b90bf41222c5954f08f", "content_id": "b85b68670cdabc89338c5ecd1f905115c0180408", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 546, "license_type": "no_license", "max_line_length": 46, "num_lines": 28, "path": "/Dockerfile", "repo_name": "seakintruth/hive-hydra-socket", "src_encoding": "UTF-8", "text": "FROM pypy:3.7-buster\n\n\nARG HIVE_SERVER_ACCOUNT\nARG HIVE_POSTING_KEY\nARG SECRET_KEY\n\nENV HIVE_SERVER_ACCOUNT $HIVE_SERVER_ACCOUNT \\\n HIVE_POSTING_KEY $HIVE_POSTING_KEY \\\n SECRET_KEY $SECRET_KEY \\\n PYTHONFAULTHANDLER=1 \\\n PYTHONHASHSEED=random \\\n PYTHONUNBUFFERED=1 \\\n PIP_DEFAULT_TIMEOUT=100 \\\n PIP_DISABLE_PIP_VERSION_CHECK=1 \\\n PIP_NO_CACHE_DIR=1\n\nRUN pip install poetry\n\nWORKDIR /app/\nCOPY . /app/\n\nRUN poetry install --no-interaction --no-ansi\n\nEXPOSE 80/tcp\nEXPOSE 5000/tcp\n\nCMD [\"poetry\", \"run\", \"python3\", \"app.py\"]\n" }, { "alpha_fraction": 0.595806896686554, "alphanum_fraction": 0.5977572202682495, "avg_line_length": 26, "blob_id": "bbfa06503280a4266ff92cc03862d6578b2bb03c", "content_id": "735f99e8facf11a0b922c02a846fcd44b5b0ff0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2051, "license_type": "no_license", "max_line_length": 94, "num_lines": 76, "path": "/app.py", "repo_name": "seakintruth/hive-hydra-socket", "src_encoding": "UTF-8", "text": "import logging\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit, send\nfrom beem import Hive\nimport os\n\n# Testnet instead of main Hive\nUSE_TEST_NODE = True\nTEST_NODE = ['http://testnet.openhive.network:8091']\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\napp.secret_key = os.getenv('SECRET_KEY')\n\nlogging.basicConfig(level=logging.INFO,\n format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')\n\n\[email protected]('/')\ndef index():\n \"\"\" Simple page for testing \"\"\"\n return render_template('index.html')\n\[email protected]('connect')\ndef test_connect():\n emit('after connect', {'data':'Lets dance'})\n\[email protected]('message')\ndef url_in(url):\n \"\"\" Send a URL and I'll post it to Hive \"\"\"\n custom_json = {'url': url}\n trx_id, success = send_notification(custom_json=custom_json)\n custom_json['trx_id'] = trx_id\n custom_json['success'] = success\n emit('response', {'data': custom_json})\n\n\ndef send_notification(custom_json, server_account='', wif=''):\n \"\"\" Sends a custom_json to Hive\n Expects two env variables, Hive account name and posting key\n HIVE_SERVER_ACCOUNT\n HIVE_POSTING_KEY\n \"\"\"\n\n operation_id = 'hive-hydra'\n\n try:\n if server_account == '':\n server_account = os.getenv('HIVE_SERVER_ACCOUNT')\n pass\n if wif == '':\n wif = [os.getenv('HIVE_POSTING_KEY')]\n pass\n\n if USE_TEST_NODE:\n h = Hive(keys=wif,node=TEST_NODE)\n else:\n h = Hive(keys=wif)\n\n\n tx = h.custom_json(id=operation_id, json_data= custom_json,\n required_posting_auths=[server_account])\n\n trx_id = tx['trx_id']\n logging.info(f'Transaction sent: {trx_id}')\n return trx_id, True\n\n except Exception as ex:\n error_message = f'{ex} occurred {ex.__class__}'\n logging.error(error_message)\n trx_id = error_message\n return trx_id, False\n\n\nif __name__ == '__main__':\n socketio.run(app)" }, { "alpha_fraction": 0.6626825928688049, "alphanum_fraction": 0.671978771686554, "avg_line_length": 22.5625, "blob_id": "4a61961d3445605b1ac95244f790baeb07065456", "content_id": "783438ad302af714139b71ae1e312bff12bc1e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "no_license", "max_line_length": 64, "num_lines": 32, "path": "/testnetspam.py", "repo_name": "seakintruth/hive-hydra-socket", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom beem import Hive\nfrom datetime import datetime\nfrom time import sleep\nimport os\nimport json\n\n# Testnet instead of main Hive\nUSE_TEST_NODE = True\nTEST_NODE = ['http://testnet.openhive.network:8091']\n\n\n# If we're using the test net it has to have transactions\n\nserver_account = os.getenv('HIVE_SERVER_ACCOUNT')\n\nwif = [os.getenv('HIVE_POSTING_KEY')]\n\nh = Hive(keys=wif,node=TEST_NODE)\n\noperation_id = 'tick-over-testnet'\n\nwhile True:\n custom_json = {\n 'no agenda': 'in the morning',\n 'at' : str(datetime.utcnow())\n }\n\n tx = h.custom_json(id=operation_id, json_data= custom_json,\n required_posting_auths=[server_account])\n print(json.dumps(tx, indent=2))\n sleep(60)" }, { "alpha_fraction": 0.7511415481567383, "alphanum_fraction": 0.7557077407836914, "avg_line_length": 38.727272033691406, "blob_id": "5a1d43257418e123cf1dc45d3744cd0993a4c4da", "content_id": "343f3608af5ab6fc2aa12e76b1f45ab617b7e0a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 876, "license_type": "no_license", "max_line_length": 267, "num_lines": 22, "path": "/README.md", "repo_name": "seakintruth/hive-hydra-socket", "src_encoding": "UTF-8", "text": "# hive-hydra-socket\n\nBasic socket to run and listend for a connection. Anything sent will be put into a custom_json on Hive.\n\nI presume this needs some other web engine and packaging in some way but this is all the Python you need.\n\nThe watching programs can be found in https://github.com/brianoflondon/hive-hydra/blob/main/watcher.py\n\n\n## IMPORTANT:\n\nThis defaults to using a Hive Testnet not the main chain.\n\n```\n# Testnet instead of main Hive\nUSE_TEST_NODE = True\nTEST_NODE = ['http://testnet.openhive.network:8091']\n```\n\nTo use the main chain, all you have to do is set ```USE_TEST_NODE``` to False.\n\nFor testing, the watching program and the posting program have to be using the same Hive Node. If no node is specified (i.e. when testing is turned off) the program will automatically choose a live Hive node and will hunt for a new one if a timeout or failure occurs.\n\n\n" }, { "alpha_fraction": 0.6081504821777344, "alphanum_fraction": 0.6583071947097778, "avg_line_length": 18.9375, "blob_id": "e29f3ff3f08bf76956ab776e6a7d21c9ca8b5ea8", "content_id": "06837abbb7f590f2aac12186b5f1d2ea95017440", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 319, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/pyproject.toml", "repo_name": "seakintruth/hive-hydra-socket", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"hive-hydra-socket\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\"Your Name <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.7\"\nbeem = \"^0.24.21\"\nFlask-SocketIO = \"^5.0.1\"\n\n[tool.poetry.dev-dependencies]\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n" } ]
5
medev21/learningSite-DjangoApp
https://github.com/medev21/learningSite-DjangoApp
f6150dabd115fc22db3c7c716fe14b339a4fde49
80ad04d80dc36120d1fd864c08002ca3ab217725
40967a778ce32797003795a94db1c926b31787f3
refs/heads/master
2021-01-10T07:30:31.313563
2016-01-31T01:25:51
2016-01-31T01:25:51
50,753,533
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6968374848365784, "alphanum_fraction": 0.7066521048545837, "avg_line_length": 38.869564056396484, "blob_id": "738dd27a4e55871cea187c3866015b08220f2fe0", "content_id": "bb78493694bc3ef4638901503125931eeb08acb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 917, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/courses/views.py", "repo_name": "medev21/learningSite-DjangoApp", "src_encoding": "UTF-8", "text": "# from django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\n\n# Create your views here.\nfrom .models import Course, Step\n\ndef course_list(request):\n courses = Course.objects.all() #select the courses that exists\n\n #join them with commas, convert each course object into string\n # output = ', '.join([str(course) for course in courses])\n\n # return HttpResponse(output) #return https with courses w/ commas\n return render(request, 'courses/course_list.html', {'courses' : courses})\n\ndef course_detail(request, pk):\n # course = Course.objects.get(pk = pk)\n course = get_object_or_404(Course, pk=pk)\n return render(request, 'courses/course_detail.html', {'course': course})\n\ndef step_detail(request, course_pk, step_pk):\n step = get_object_or_404(Step, course_id = course_pk, pk = step_pk)\n return render(request, 'courses/step_detail.html', {'step': step})\n" }, { "alpha_fraction": 0.7682926654815674, "alphanum_fraction": 0.7682926654815674, "avg_line_length": 26.33333396911621, "blob_id": "075580675e9939e31a30776725cd7f133b580019", "content_id": "3737150e873cd373a4504ae667c8ca3824b67a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 82, "license_type": "no_license", "max_line_length": 70, "num_lines": 3, "path": "/README.md", "repo_name": "medev21/learningSite-DjangoApp", "src_encoding": "UTF-8", "text": "## README\n\nA basic django app that display courses, title, descriptions and step.\n" }, { "alpha_fraction": 0.6789413094520569, "alphanum_fraction": 0.6869965195655823, "avg_line_length": 32.42307662963867, "blob_id": "af4c9e09977e6ef6f4e3a8124e3cf1909a2a86f1", "content_id": "d7ec38b46e3ab044b4e137ac02a6f444e916a37f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "no_license", "max_line_length": 94, "num_lines": 26, "path": "/courses/models.py", "repo_name": "medev21/learningSite-DjangoApp", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Course(models.Model):\n # add columns to our table\n created_at = models.DateTimeField(auto_now_add=True)#add now msg? when item created\n title = models.CharField(max_length=225)\n description = models.TextField()\n\n #defines this thing turns into a string\n def __str__(self):\n return self.title\n\nclass Step(models.Model):\n title = models.CharField(max_length=255)\n description = models.TextField()\n content = models.TextField(blank=True, default='')\n order = models.IntegerField(default=0) #order of step\n course = models.ForeignKey(Course) # a column that points to a record in a different table\n\n class Meta:\n ordering = ['order',]\n #this tells django to order all our records by the order attribute\n\n def __str__(self):\n return self.title\n" } ]
3
yo-carthy/DataStructuresAlgorithms
https://github.com/yo-carthy/DataStructuresAlgorithms
f5d83d2f1b2ee1b46e4de410476a31a0fb35ef6f
5b64f94aefe4308a1d77fbfea38daa887a6a3892
d8c80e39261d3e246e340cfe32b866e4184b532a
refs/heads/main
2023-04-15T22:10:32.316357
2021-05-01T03:46:45
2021-05-01T03:46:45
362,764,029
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4845586121082306, "alphanum_fraction": 0.5520634055137634, "avg_line_length": 32.56880569458008, "blob_id": "fdecd714fb8e93e7c247b330108e75dbd516e1e3", "content_id": "4b9b3852dce5e6c857f00763e96c861fab8f2900", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3659, "license_type": "no_license", "max_line_length": 127, "num_lines": 109, "path": "/karatsuba_multiplication_Stanford_Homework_Week1.cpp", "repo_name": "yo-carthy/DataStructuresAlgorithms", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <string>\n\nusing namespace std;\n\nvoid addZeros(string &num1, string&num2){\n int l1 = num1.size();\n int l2 = num2.size();\n if(l1 > l2){\n for(int i = 0; i < l1 - l2; i++){\n num2 = ('0' + num2);\n }\n }else{\n for(int i = 0; i < l2 - l1; i++){\n num1 = ('0' + num1);\n }\n }\n}\n\nstring stringPlus(string num1, string num2){\n int carrier = 0;\n int bitresult = 0;\n string fullResult;\n string finalResult;\n addZeros(num1,num2);\n for(int i =0; i < num1.size(); i++){\n bitresult = num1[num1.size()-i-1] - '0' + num2[num2.size()-i-1] - '0' + carrier;\n carrier = 0;\n if(bitresult > 9){\n carrier = 1;\n bitresult = bitresult - 10;\n }\n fullResult.push_back(bitresult+'0');\n }\n if(carrier == 1)fullResult.push_back('1');\n for(int i = 0; i < fullResult.size(); i++){\n finalResult.push_back(fullResult[fullResult.size()-i-1]);\n }\n //finalResult.push_back('\\0');\n int pos = finalResult.find_first_not_of('0');\n if(pos!=string::npos)finalResult = finalResult.substr(pos, finalResult.size()-pos);\n else finalResult = \"0\";\n return finalResult;\n}\n\nstring stringMinus(string num1, string num2){\n int carrier = 0;\n int bitresult = 0;\n string fullResult;\n string finalResult;\n addZeros(num1,num2);\n for(int i =0; i < num1.size(); i++){\n bitresult = num1[num1.size()-i-1] - num2[num2.size()-i-1] - carrier;\n carrier = 0;\n if(bitresult < 0){\n carrier = 1;\n bitresult = bitresult + 10;\n }\n fullResult.push_back(bitresult+'0');\n }\n for(int i = 0; i < fullResult.size(); i++){\n finalResult.push_back(fullResult[fullResult.size()-i-1]);\n }\n //finalResult.push_back('\\0');\n int pos = finalResult.find_first_not_of('0');\n if(pos!=string::npos)finalResult = finalResult.substr(pos, finalResult.size()-pos);\n else finalResult = \"0\";\n return finalResult;\n}\n\nstring karatsuba(string num1, string num2){\n if(num1.length() == 1 || num2.length() ==1){\n return to_string(atoi(num1.c_str()) * atoi(num2.c_str()));\n }else{\n addZeros(num1,num2);\n string a, b, c, d;\n a = num1.substr(0,num1.size()/2 + num1.size()%2);\n b = num1.substr(num1.size()/2 + num1.size()%2, num1.size()/2);\n c = num2.substr(0,num2.size()/2 + num2.size()%2);\n d = num2.substr(num2.size()/2 + num2.size()%2, num2.size()/2);\n string ac = karatsuba(a,c);\n string bd = karatsuba(b,d);\n string acPlusbd= stringPlus(ac,bd);\n string adPlusbc = karatsuba(stringPlus(a,b),stringPlus(c,d));\n adPlusbc = stringMinus(adPlusbc, acPlusbd);\n cout<<\" ac: \"<<ac<<\" adPlusbc: \"<<adPlusbc<<\":\"<<\" bd: \"<<bd<<\" final: \"<<stringPlus(stringPlus(ac,adPlusbc),bd)<<endl;\n int bigOrder = num1.size()/2 * 2;\n int smallOrder = num1.size()/2 ;\n for(int i = 0; i < bigOrder; i++){\n ac = ac + '0';\n }\n for(int i = 0; i < smallOrder; i++){\n adPlusbc = adPlusbc + '0';\n }\n cout<<\" A: \"<<a<<\" B: \"<<b<<\":\"<<\" C: \"<<c<<\" D: \"<<d<<endl;\n cout<<\" ac: \"<<ac<<\" adPlusbc: \"<<adPlusbc<<\":\"<<\" bd: \"<<bd<<\" final: \"<<stringPlus(stringPlus(ac,adPlusbc),bd)<<endl;\n return stringPlus(stringPlus(ac,adPlusbc),bd);\n }\n}\n\nint main()\n{\n string a = \"3141592653589793238462643383279502884197169399375105820974944592\";\n string b = \"2718281828459045235360287471352662497757247093699959574966967627\";\n addZeros(a,b);\n cout<< \"a+b =\";\n cout <<karatsuba(a,b);\n}\n" }, { "alpha_fraction": 0.5268065333366394, "alphanum_fraction": 0.5501165390014648, "avg_line_length": 20.450000762939453, "blob_id": "e05493198b1412eea112312fd2ad2944526cd67c", "content_id": "339ce27c09feb43fe5f05b18d010b2d8d05c64d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 55, "num_lines": 20, "path": "/CountingInversions.py", "repo_name": "yo-carthy/DataStructuresAlgorithms", "src_encoding": "UTF-8", "text": "# Python3 program to count \n# inversions in an array\n \ndef getInvCount(arr, n):\n \n inv_count = 0\n for i in range(n):\n for j in range(i + 1, n):\n if (arr[i] > arr[j]):\n inv_count += 1\n \n return inv_count\n \n# Driver Code\narr = [1, 20, 6, 4, 5]\nn = len(arr)\nprint(\"Number of inversions are\",\n getInvCount(arr, n))\n \n# Credit: This code was initially contributed by Smitha Dinesh Semwal\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 83, "blob_id": "7635ab43668b5f95fd0c92a247cb4a9d16294a64", "content_id": "5c9ff004ee4cba31db6783bd18181e11e1c268cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 168, "license_type": "no_license", "max_line_length": 140, "num_lines": 2, "path": "/README.md", "repo_name": "yo-carthy/DataStructuresAlgorithms", "src_encoding": "UTF-8", "text": "# DataStructuresAlgorithms\nThis repository contains fully executed code (my way of writing notes :)) from the Stanford Data Structures and Algorithms course by Tim R.\n" } ]
3
PoojaBhatia12-3-92/NLP_Lab_Exercises
https://github.com/PoojaBhatia12-3-92/NLP_Lab_Exercises
3ba935f4e5d7f24da41ac0723d11fe81fe44a51a
289ae9910b59ea8628fbefb32685785a3bb7d2fb
48e0d46ec9832a3aa7cbec4e12a1d92ed3e9c15a
refs/heads/master
2020-03-16T05:50:09.414961
2019-09-09T09:53:48
2019-09-09T09:53:48
132,541,633
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5848143696784973, "alphanum_fraction": 0.627863347530365, "avg_line_length": 31.658063888549805, "blob_id": "57ee5a8e98ce3e57a1dce6a82ac1efc9d5b5f9e6", "content_id": "5b353e223e1d8b625529e7ace04acacde847f5e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10128, "license_type": "no_license", "max_line_length": 126, "num_lines": 310, "path": "/PoojaBhatia_Solutions_Exer2.py", "repo_name": "PoojaBhatia12-3-92/NLP_Lab_Exercises", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 07 19:27:08 2018\n\n@author: POOJA\n\"\"\"\nimport nltk \nfrom nltk.corpus import brown\nfrom nltk.corpus import treebank\nfrom nltk import DefaultTagger as df\nfrom nltk import UnigramTagger as ut\nfrom nltk import BigramTagger as bt\nfrom nltk import TrigramTagger as tg\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom nltk.corpus import TaggedCorpusReader\nimport plotly.offline as offline\nfrom plotly.offline import init_notebook_mode, iplot \n\nfrom IPython.display import display\n\n\n#### Corpus X1#####\ntreebank_annotated_sent = nltk.corpus.treebank.tagged_sents()\nsizeX1= int(len(treebank_annotated_sent)* 0.8)\ntrain_sents_treebank = treebank_annotated_sent[:sizeX1]\ntest_sents_treebank = treebank_annotated_sent[sizeX1:]\n\n####Corpus X2####\nbrown_annotated_sent = nltk.corpus.brown.tagged_sents()\nsizeX2 = int(len(brown_annotated_sent) * 0.8)\ntrain_sents_brown = brown_annotated_sent[:sizeX2]\ntest_sents_brown = brown_annotated_sent[sizeX2:]\n\n################################ MODEL 1####################################################\n#####Training#######\ndef features(sentence, index):\n return {\n 'word': sentence[index],\n 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n 'is_entire_word_capitalized':sentence[index].upper() == sentence[index],\n 'prefix-1': sentence[index][0],\n 'suffix-1': sentence[index][-1],\n 'prev_word': '' if index == 0 else sentence[index - 1],\n 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n 'length_word': len(sentence[index]),\n 'is_numeric': sentence[index].isdigit(),\n 'is_alphabetic': sentence[index].isalpha(),\n 'is_alphanumeric':sentence[index].isalnum(),\n }\ndef untag(tagged_sentence):\n return [w for w, t in tagged_sentence]\n\ndef transform_to_dataset(tagged_sentences):\n X, y = [], []\n for tagged in tagged_sentences:\n for index in range(len(tagged)):\n X.append(features(untag(tagged), index))\n y.append(tagged[index][1])\n \n return X, y\n \nX_treebank, y_treebank = transform_to_dataset(train_sents_treebank) \nX_brown, y_brown = transform_to_dataset(train_sents_brown)\n#########Implementing a classifier#############################\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nsize=10000\n\nclf = Pipeline([\n ('vectorizer', DictVectorizer(sparse=False)),\n ('classifier', DecisionTreeClassifier(criterion='entropy'))\n])\n####for treebank###\nclf.fit(X_treebank[:size],y_treebank[:size])\n \nprint('training OK for X1')\n \nX_treebank_test, y_treebank_test = transform_to_dataset(test_sents_treebank)\n\n####for Brown###\nclf.fit(X_brown[:size],y_brown[:size])\n \nprint('training OK for X2')\n \nX_brown_test, y_brown_test = transform_to_dataset(test_sents_brown)\nprint()\n\n####################################### MODEL 2###########################################\nnltk.download('maxent_treebank_pos_tagger')\nMAXEXT_POS_TAGGER =nltk.data.load('taggers/maxent_treebank_pos_tagger/english.pickle')\n#################Model3.x = rule-based classifiers (x = 1 to 5)##########################\npatterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n\n###Training OF model3.x in X1######\ndef_model_1 = nltk.DefaultTagger('NN')\nuni_model_1 = nltk.UnigramTagger(train_sents_treebank)\nbi_model_1 = nltk.BigramTagger(train_sents_treebank)\ntri_model_1 = nltk.TrigramTagger(train_sents_treebank)\nregexp_model_1 = nltk.RegexpTagger(patterns)\n###Training OF model3.x in X2######\ndef_model_2 = nltk.DefaultTagger('NN')\nuni_model_2 = nltk.UnigramTagger(train_sents_brown)\nbi_model_2 = nltk.BigramTagger(train_sents_brown)\ntri_model_2 = nltk.TrigramTagger(train_sents_brown)\nregexp_model_2 = nltk.RegexpTagger(patterns)\n#########TASK 1####################################################\n\n########performance 1.1 = model1 in X1#############################\nprint(\"performance 1.1 = model1 in X1\")\nperformance_1_1 =clf.score(X_treebank_test, y_treebank_test)\nprint(\"Accuracy:\", performance_1_1)\nprint()\n########performance 1.2 = model2 in X1#############################\nprint(\"performance 1.2 = model2 in X1\")\nperformance_1_2= MAXEXT_POS_TAGGER.evaluate(treebank_annotated_sent)\nprint(\"Accuracy:\",performance_1_2)\nprint()\n########performance 1.3.x = model3.x in X1#########################\n# performance of Default Tagger\nprint(\"performance 1.3.1 = model3.1 in X1\")\nperformance_1_3_1= def_model_1.evaluate(test_sents_treebank)\nprint(\"Accuracy:\",performance_1_3_1)\nprint()\n# performance of Unigram Tagger\nprint(\"performance 1.3.2 = model3.2 in X1\")\nperformance_1_3_2=uni_model_1.evaluate(test_sents_treebank)\nprint(\"Accuracy:\",performance_1_3_2)\nprint()\n# performance of Bigram Tagger\nprint(\"performance 1.3.3 = model3.3 in X1\")\nperformance_1_3_3=bi_model_1.evaluate(test_sents_treebank)\nprint(\"Accuracy:\",performance_1_3_3)\nprint()\n# performance of Trigram Tagger\nprint(\"performance 1.3.4 = model3.4 in X1\")\nperformance_1_3_4=tri_model_1.evaluate(test_sents_treebank)\nprint(\"Accuracy:\",performance_1_3_4)\nprint()\n# performance of Regex Tagger\nprint(\"performance 1.3.5 = model3.5 in X1\")\nperformance_1_3_5=regexp_model_1.evaluate(test_sents_treebank)\nprint(\"Accuracy:\",performance_1_3_5)\nprint()\n\n########performance 1.4 = model1 in X2#######################\nprint(\"performance 1.4 = model1 in X2\")\nperformance_1_4 =clf.score(X_brown_test, y_brown_test)\nprint(\"Accuracy:\", performance_1_4)\nprint()\n#######performance 1.5 = model2 in X2#######################\nprint(\"performance 1.5 = model2 in X2 \")\nperformance_1_5= MAXEXT_POS_TAGGER.evaluate(brown_annotated_sent)\nprint(\"Accuracy:\",performance_1_5)\nprint()\n########performance 1.6.x = model3.x in X2########\n# performance of Default Tagger\nprint(\"performance 1.6.1 = model3.1 in X2\")\nperformance_1_6_1= def_model_2.evaluate(test_sents_brown)\nprint(\"Accuracy:\",performance_1_6_1)\nprint(\"\")\n# performance of Unigram Tagger\nprint(\"performance 1.6.2 = model3.2 in X2\")\nperformance_1_6_2=uni_model_2.evaluate(test_sents_brown)\nprint(\"Accuracy:\",performance_1_6_2)\nprint(\"\")\n# performance of Bigram Tagger\nprint(\"performance 1.6.3 = model3.3 in X2\")\nperformance_1_6_3=bi_model_2.evaluate(test_sents_brown)\nprint(\"Accuracy:\",performance_1_6_3)\nprint(\"\")\n# performance of Trigram Tagger\nprint(\"performance 1.6.4 = model3.4 in X2\")\nperformance_1_6_4=tri_model_2.evaluate(test_sents_brown)\nprint(\"Accuracy:\",performance_1_6_4)\nprint(\"\")\n# performance of Regex Tagger\nprint(\"performance 1.6.5 = model3.5 in X2\")\nperformance_1_6_5=regexp_model_2.evaluate(test_sents_brown)\nprint(\"Accuracy:\",performance_1_6_5)\nprint(\"\")\n\n######## Results of Task1 on BarChart###########\ndata = [go.Bar(\n x=['Task 1.1', 'Task 1.2', 'Task 1.3.1','Task 1.3.2', 'Task 1.3.3','Task 1.3.4','Task 1.3.5',\n 'Task 1.4','Task 1.5', 'Task 1.6.1','Task 1.6.2', 'Task 1.6.3','Task 1.6.4','Task 1.6.5'],\n y=[performance_1_1, performance_1_2, performance_1_3_1,performance_1_3_2, performance_1_3_3, performance_1_3_4,\n performance_1_3_5, performance_1_4, performance_1_5,performance_1_6_1, performance_1_6_2, performance_1_6_3,\n performance_1_6_4, performance_1_6_5]\n )]\nlayout = go.Layout(\n title='Results of Task1 on BarChart',\n xaxis=dict(\n title='Task Number',\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n ),\n yaxis=dict(\n title='Accuracy',\n titlefont=dict(\n size=16,\n color='rgb(107, 107, 107)'\n )\n ))\nfigure=go.Figure(data=data, layout=layout)\noffline.plot(figure, image='png', filename='Task1.html')\n\n####Corpus X3#########\ncorp = nltk.corpus.ConllCorpusReader('.', 'germanfile',\n ['ignore', 'words', 'ignore', 'ignore', 'pos'],\n encoding='utf-8')\n\ntagged_sents = corp.tagged_sents()\n\nsizeX3 = int(len(tagged_sents) * 0.8)\ntrain_tagged_sents = tagged_sents[:sizeX3]\ntest_tagged_sents = tagged_sents[sizeX3:]\n\n####Model 4############\nX_tagged, y_tagged = transform_to_dataset(train_tagged_sents)\nsize=10000\n\nclf.fit(X_tagged[:size],y_tagged[:size])\n \nprint('training OK for X3')\n \nX_tagged_test, y_tagged_test = transform_to_dataset(test_tagged_sents)\n\n\n########performance 2.1 = model4 in X3#############################\nprint(\"performance 2.1 = model4 in X3\")\nperformance_2_1 =clf.score(X_tagged_test, y_tagged_test)\nprint(\"Accuracy:\", performance_2_1)\nprint()\n\n####Model 5############\n#import os\n#os.environ['TREETAGGER_HOME'] = '/Users/POOJA/Documents/TreeTagger/cmd'\n\nfrom treetagger import TreeTagger\ntt = TreeTagger(language='german')\n#result_train=tt.tag(X_tagged_test)\n\n########performance 2.2 = model5 in X3#############################\n\nprint(\"performance 2.2 = model5 in X3\")\n#performance_2_2 = np.mean([x[1] == y for x, y in zip(res_train, y_tagged_test)])\n###STORING Accuracy has 0 because TreeTagger is giving AttributeError: 'TreeTagger' object has no attribute '_treetagger_bin' \n#####error in the testtagger.py file--Unable to create environment in Windows 10 for the same########\n\nperformance_2_2 = 0.0\nprint(\"Accuracy:\", performance_2_2)\nprint()\n\n######## Results of Task1 on BarChart###########\ndata = [go.Bar(\n x=['Task 2.1', 'Task 2.2'],\n y=[performance_2_1, performance_2_2]\n )]\nlayout = go.Layout(\n title='Results of Task2 on BarChart',\n xaxis=dict(\n title='Task Number',\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n ),\n yaxis=dict(\n title='Accuracy',\n titlefont=dict(\n size=16,\n color='rgb(107, 107, 107)'\n )\n ))\nfigure=go.Figure(data=data, layout=layout)\n\noffline.plot(figure, image='png', filename='Task2.html')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.6875868439674377, "alphanum_fraction": 0.7133476734161377, "avg_line_length": 28.314815521240234, "blob_id": "2f34b66329111053b26758bbed6d394d4b6c8c63", "content_id": "3409fc2c0699fe0074a163d055b1dda85751bd22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7919, "license_type": "no_license", "max_line_length": 104, "num_lines": 270, "path": "/PoojaBhatia_Solutions_Exer3.py", "repo_name": "PoojaBhatia12-3-92/NLP_Lab_Exercises", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np \nimport pandas as pd \nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils.np_utils import to_categorical\nfrom keras.layers.convolutional import Convolution1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.layers import Flatten\nimport re\n\n# parameters\nmax_fatures = 500\nembed_dim = 128\nlstm_out = 196\ndropout = 0.1\ndropout_1d = 0.4\nrecurrent_dropout = 0.1\nrandom_state = 1324\nvalidation_size = 1000\nbatch_size = 16\nepochs=2\nverbose= 2\n\n# Preprocess and Read Data \ndf = pd.read_csv('dataset_sentiment.csv')\ndf = df[['text','sentiment']]\nprint(df[0:10])\n\ndf = df[df.sentiment != \"Neutral\"] \ndf['text'] = df['text'].apply(lambda x: x.lower()) #\ndf['text'] = df['text'].apply(lambda x: x.replace('rt',' '))\ndf['text'] = df['text'].apply((lambda x: re.sub('[^a-zA-z0-9\\s]','',x)))\nprint(df[0:10]) \n \ntok = Tokenizer(num_words=max_fatures, split=' ')\ntok.fit_on_texts(df['text'].values)\nX = tok.texts_to_sequences(df['text'].values)\nX = pad_sequences(X)\n\n# Model1:Using LSTM\ndef model_1():\n nn = Sequential()\n nn.add(Embedding(max_fatures, embed_dim, input_length = X.shape[1]))\n nn.add(SpatialDropout1D(dropout_1d))\n nn.add(LSTM(lstm_out, dropout=dropout, recurrent_dropout=recurrent_dropout))\n nn.add(Dense(2, activation='softmax'))\n nn.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])\n print(nn.summary())\n return nn\n\n#Model2:Using ConvNet\ndef model_2():\n nn = Sequential()\n nn.add(Embedding(max_fatures, embed_dim, input_length = X.shape[1]))\n nn.add(Convolution1D(filters=100,kernel_size=3, padding=\"valid\", activation=\"relu\", strides=1))\n nn.add(MaxPooling1D(pool_size=2))\n nn.add(Flatten())\n nn.add(Dense(2, activation='softmax'))\n nn.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])\n print(nn.summary())\n return nn\n\nY = pd.get_dummies(df['sentiment']).values\n\n#Split Dataset\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.30, random_state = random_state)\n\n\nX_validate = X_test[-validation_size:]\nY_validate = Y_test[-validation_size:]\nX_test = X_test[:-validation_size]\nY_test = Y_test[:-validation_size]\n\n#Evaluation Function \ndef evaluation(nn):\n \n score, accuracy = nn.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size)\n print(\"score: %.2f\" % (score))\n print(\"acc: %.2f\" % (accuracy))\n\n pos_cnt, neg_cnt, pos_ok, neg_ok = 0, 0, 0, 0\n for x in range(len(X_validate)):\n result = nn.predict(X_validate[x].reshape(1,X_test.shape[1]),batch_size=1,verbose = 2)[0]\n if np.argmax(result) == np.argmax(Y_validate[x]):\n if np.argmax(Y_validate[x]) == 0: neg_ok += 1\n else: pos_ok += 1\n if np.argmax(Y_validate[x]) == 0: neg_cnt += 1\n else: pos_cnt += 1\n\n print(\"pos_acc\", pos_ok/pos_cnt*100, \"%\")\n print(\"neg_acc\", neg_ok/neg_cnt*100, \"%\")\n\n X2 = ['what are u going to say about that? the truth, wassock?!']\n X2 = tok.texts_to_sequences(X2)\n X2 = pad_sequences(X2, maxlen=26, dtype='int32', value=0)\n print(X2)\n print(nn.predict(X2, batch_size=1, verbose = 2)[0])\n\n\n\n####Results####\n\nnn_1=model_1()\nnn_1.fit(X_train, Y_train, epochs = epochs, batch_size=batch_size, verbose=verbose)\nevaluation(nn_1)\n\n\nnn_2=model_2()\nnn_2.fit(X_train, Y_train, epochs = epochs, batch_size=batch_size, verbose=verbose)\nevaluation(nn_2)\n\n\n\n# In[8]:\n\n\n#Generating MetaData\n\nfrom rdflib import Namespace, Graph, Literal\nfrom rdflib.namespace import FOAF, OWL, XSD, RDFS, DCTERMS, DOAP, DC, RDF\n\n\nprov = Namespace('http://www.w3.org/ns/prov#')\ndcat = Namespace('http://www.w3.org/ns/dcat#')\nmexalgo = Namespace('http://mex.aksw.org/mex-algo#')\nmexperf = Namespace('http://mex.aksw.org/mex-perf#')\nmexcore = Namespace('http://mex.aksw.org/mex-core#')\nthis = Namespace('http://mex.aksw.org/examples/')\n\ng = Graph()\n# Create Binding\ng.bind('dct',DCTERMS)\ng.bind('owl',OWL)\ng.bind('foaf',FOAF)\ng.bind('xsd', XSD)\ng.bind('rdfs', RDFS)\ng.bind('doap', DOAP)\ng.bind('dc', DC)\ng.bind('prov', prov)\ng.bind('dcat', dcat)\ng.bind('mexalgo',mexalgo)\ng.bind('mexperf',mexperf)\ng.bind('mexcore',mexcore)\ng.bind('this',this)\n\n\ng.add((this.pooja_task3,mexcore.Experiment, prov.Entity))\ng.add((this.pooja_task3,mexcore.ApplicationContext, prov.Entity))\ng.add((this.pooja_task3,DCTERMS.date, Literal('2018-07-22',datatype=XSD.date)))\ng.add((this.pooja_task3,FOAF.givenName, Literal('Pooja Bhatia')))\ng.add((this.pooja_task3,FOAF.mbox, Literal('[email protected]')))\n\n\n#Configuration of Model 1\ng.add((this.configuration1,RDF.type,mexcore.ExperimentConfiguration))\ng.add((this.configuration1,prov.used, this.model1))\ng.add((this.configuration1,prov.wasStartedBy,this.pooja_task3))\n\n#Configuration of Model 2\ng.add((this.configuration2,RDF.type,mexcore.ExperimentConfiguration))\ng.add((this.configuration2,prov.used, this.model2))\ng.add((this.configuration2,prov.wasStartedBy,this.pooja_task3))\n\n\n\n\ng.add((this.hyerparameter_model1,mexalgo.HyperParameterCollection,prov.Entity))\ng.add((this.hyerparameter1,RDFS.label,Literal('HyperParameterCollection')))\ng.add((this.hyerparameter_model1,prov.hadMember,this.hyerparameter1))\n\ng.add((this.hyerparameter_model2,mexalgo.HyperParameterCollection,prov.Entity))\ng.add((this.hyerparameter2,RDFS.label,Literal('HyperParameterCollection')))\ng.add((this.hyerparameter_model2,prov.hadMember,this.hyerparameter2))\n\n\ng.add((this.hyerparameter1,mexalgo.HyperParameter,prov.Entity))\ng.add((this.hyerparameter1,RDFS.label, Literal('LSTM')))\ng.add((this.hyerparameter1,DCTERMS.identifier, Literal('LSTM')))\ng.add((this.hyerparameter1,prov.value, Literal('196',datatype=XSD.float)))\n\n\ng.add((this.hyerparameter2,mexalgo.HyperParameter,prov.Entity))\ng.add((this.hyerparameter2,RDFS.label, Literal('ConvNet')))\ng.add((this.hyerparameter2,DCTERMS.identifier, Literal('ConvNet')))\ng.add((this.hyerparameter2,prov.value, Literal('100',datatype=XSD.float)))\n\n\ng.add((this.execution1,mexcore.ExecutionOverall,prov.Entity))\ng.add((this.execution1,prov.generated,this.performance_measures1))\ng.add((this.execution1,prov.used,this.test))\ng.add((this.execution1,prov.used,this.hyerparameter_model1))\ng.add((this.execution1,prov.used,this.model1))\n\ng.add((this.performance_measures1,mexcore.PerformanceMeasure,prov.Entity))\ng.add((this.performance_measures1,mexperf.score,Literal('0.38',datatype=XSD.float)))\ng.add((this.performance_measures1,mexperf.accuracy,Literal('0.84',datatype=XSD.float)))\ng.add((this.performance_measures1,prov.wasGeneratedBy,this.execution1))\n\n\ng.add((this.execution2,mexcore.ExecutionOverall,prov.Entity))\ng.add((this.execution2,prov.generated,this.performance_measures2))\ng.add((this.execution2,prov.used,this.test))\ng.add((this.execution2,prov.used,this.model2))\n\ng.add((this.performance_measures2,mexcore.PerformanceMeasure,prov.Entity))\ng.add((this.performance_measures2,mexperf.score,Literal('0.38',datatype=XSD.float)))\ng.add((this.performance_measures2,mexperf.accuracy,Literal('0.85',datatype=XSD.float)))\ng.add((this.performance_measures2,prov.wasGeneratedBy,this.execution2))\n\n\ng.add((this.model1,mexalgo.Algorithm,prov.Entity))\ng.add((this.model1,RDFS.label,Literal('LSTM')))\ng.add((this.model1,mexalgo.hasHyperParameter,this.hyerparameter1))\n\ng.add((this.model2,mexalgo.Algorithm,prov.Entity))\ng.add((this.model2,RDFS.label,Literal('ConvNet')))\ng.add((this.model2,mexalgo.hasHyperParameter,this.hyerparameter2))\n\n\nwith open('pooja_Exer3_metadata.ttl','wb') as f:\n f.write(g.serialize(format='turtle'))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" } ]
2
louloucodes/louloucodes.github.io
https://github.com/louloucodes/louloucodes.github.io
376b180969728094a7832b50091c372a8e4e2974
973ee15b6e052ceb949a195f48fc486be5959fd9
06cf0f21cdf4bf4d4cda60cb2f85d14c968f0153
refs/heads/master
2022-12-30T05:21:02.200252
2020-10-22T18:51:07
2020-10-22T18:51:07
289,709,348
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6349517107009888, "alphanum_fraction": 0.69655442237854, "avg_line_length": 55.338233947753906, "blob_id": "b56a8e82ff8b017d7db979f1a3dfb187ddb34b2b", "content_id": "49a6c1bcc3f911611ecf9909bc04af165c3492b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 7678, "license_type": "no_license", "max_line_length": 626, "num_lines": 136, "path": "/_site/jekyll/2020/08/30/Adventures-in-Navigation.html", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\"><head>\n <meta charset=\"utf-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"><!-- Begin Jekyll SEO tag v2.6.1 -->\n<title>Adventures in Navigation | LouLou Codes</title>\n<meta name=\"generator\" content=\"Jekyll v3.9.0\" />\n<meta property=\"og:title\" content=\"Adventures in Navigation\" />\n<meta property=\"og:locale\" content=\"en_US\" />\n<meta name=\"description\" content=\"Add another item to the list of things that are not so easy for me: tags. Something so straightforward as tagging posts and pages for content should be easy to implement in a language that touts itself as easy and accessible.\" />\n<meta property=\"og:description\" content=\"Add another item to the list of things that are not so easy for me: tags. Something so straightforward as tagging posts and pages for content should be easy to implement in a language that touts itself as easy and accessible.\" />\n<link rel=\"canonical\" href=\"http://localhost:4000/jekyll/2020/08/30/Adventures-in-Navigation.html\" />\n<meta property=\"og:url\" content=\"http://localhost:4000/jekyll/2020/08/30/Adventures-in-Navigation.html\" />\n<meta property=\"og:site_name\" content=\"LouLou Codes\" />\n<meta property=\"og:type\" content=\"article\" />\n<meta property=\"article:published_time\" content=\"2020-08-30T00:00:00-04:00\" />\n<script type=\"application/ld+json\">\n{\"url\":\"http://localhost:4000/jekyll/2020/08/30/Adventures-in-Navigation.html\",\"headline\":\"Adventures in Navigation\",\"dateModified\":\"2020-08-30T00:00:00-04:00\",\"datePublished\":\"2020-08-30T00:00:00-04:00\",\"mainEntityOfPage\":{\"@type\":\"WebPage\",\"@id\":\"http://localhost:4000/jekyll/2020/08/30/Adventures-in-Navigation.html\"},\"description\":\"Add another item to the list of things that are not so easy for me: tags. Something so straightforward as tagging posts and pages for content should be easy to implement in a language that touts itself as easy and accessible.\",\"@type\":\"BlogPosting\",\"@context\":\"https://schema.org\"}</script>\n<!-- End Jekyll SEO tag -->\n<link rel=\"stylesheet\" href=\"/assets/main.css\"><link type=\"application/atom+xml\" rel=\"alternate\" href=\"http://localhost:4000/feed.xml\" title=\"LouLou Codes\" /></head>\n<body><header class=\"site-header\" role=\"banner\">\n\n <div class=\"wrapper\"><a class=\"site-title\" rel=\"author\" href=\"/\">LouLou Codes</a><nav class=\"site-nav\">\n <input type=\"checkbox\" id=\"nav-trigger\" class=\"nav-trigger\" />\n <label for=\"nav-trigger\">\n <span class=\"menu-icon\">\n <svg viewBox=\"0 0 18 15\" width=\"18px\" height=\"15px\">\n <path d=\"M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.032C17.335,0,18,0.665,18,1.484L18,1.484z M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.032C17.335,6.031,18,6.696,18,7.516L18,7.516z M18,13.516C18,14.335,17.335,15,16.516,15H1.484 C0.665,15,0,14.335,0,13.516l0,0c0-0.82,0.665-1.483,1.484-1.483h15.032C17.335,12.031,18,12.695,18,13.516L18,13.516z\"/>\n </svg>\n </span>\n </label>\n\n <div class=\"trigger\"><a class=\"page-link\" href=\"/about/\">About</a><a class=\"page-link\" href=\"/blog.html\">Blog</a></div>\n </nav></div>\n</header>\n<main class=\"page-content\" aria-label=\"Content\">\n <div class=\"wrapper\">\n <article class=\"post h-entry\" itemscope itemtype=\"http://schema.org/BlogPosting\">\n\n <header class=\"post-header\">\n <h1 class=\"post-title p-name\" itemprop=\"name headline\">Adventures in Navigation</h1>\n <p class=\"post-meta\">\n <time class=\"dt-published\" datetime=\"2020-08-30T00:00:00-04:00\" itemprop=\"datePublished\">Aug 30, 2020\n </time></p>\n </header>\n\n <div class=\"post-content e-content\" itemprop=\"articleBody\">\n <p>Add another item to the list of things that are not so easy for\nme: tags. \nSomething so straightforward as tagging posts and pages\nfor content should be easy to implement in a language that touts\nitself as easy and accessible.</p>\n\n<p>For a rank beginner (hi!) this is not so easy. I’ll save my \nmisadventures in linking to tags for another post, because the\ntrue subject here is …</p>\n<h3 id=\"the-navbar\">The navbar</h3>\n<p>I encountered issues with the navbar in one of my attempts to\ncreate pages for the tags. All of a sudden, the navbar filled \nwith <code class=\"language-plaintext highlighter-rouge\">Tag: aws</code> and <code class=\"language-plaintext highlighter-rouge\">Tag: python</code> and all of the other tags \naccumulated through my posts.</p>\n\n<h3 id=\"what-is-going-on\">What is going on?</h3>\n<p>Some fumbling around – remember, noob here – led me to the\ndefault (I think?) header.html in the default minima theme. This\nis a reusable snippet of html that can be included on any page,\nand is conveniently found in the <code class=\"language-plaintext highlighter-rouge\">_includes</code> folder.</p>\n\n<p>The culprit was here:</p>\n\n<div class=\"language-plaintext highlighter-rouge\"><div class=\"highlight\"><pre class=\"highlight\"><code>...if my_page.title ...\n\n</code></pre></div></div>\n<p>See that? <code class=\"language-plaintext highlighter-rouge\">if my_page.title</code> meant that every single page that\nhad a title would appear in the navbar. Doh!</p>\n\n<h3 id=\"my-solution\">My solution</h3>\n<p>I added a flag variable to the pages I want to appear in the \nnavbar. Specifically, in <code class=\"language-plaintext highlighter-rouge\">about.html</code> and <code class=\"language-plaintext highlighter-rouge\">blog.html</code> (should these\nbe <code class=\"language-plaintext highlighter-rouge\">.md</code>?), to the front matter:</p>\n<div class=\"language-plaintext highlighter-rouge\"><div class=\"highlight\"><pre class=\"highlight\"><code>---\nnavbar: true\n---\n</code></pre></div></div>\n<p>Then I returned to the <code class=\"language-plaintext highlighter-rouge\">header.html</code> file and changed the \nvariable controlling appearance in the header to \n<code class=\"language-plaintext highlighter-rouge\">my_page.navbar</code>.</p>\n<h3 id=\"problem-solved\">Problem solved!</h3>\n<p>And now back to the tagging problem…</p>\n\n<h3 id=\"haha\">Haha</h3>\n<p>Also joke’s on me when I attempted to cite the Liquid code, my\nserver raised an error…so I’ll have to figure that out too.</p>\n\n </div>\n <b>Tags: </b>\n \n \n <a href=\"/tag/jekyll/index.html\">jekyll </a>\n \n <a href=\"/tag/liquid/index.html\">liquid </a>\n \n <a href=\"/tag/navbar/index.html\">navbar </a>\n \n <a href=\"/tag/tags/index.html\">tags </a>\n \n <a class=\"u-url\" href=\"/jekyll/2020/08/30/Adventures-in-Navigation.html\" hidden></a</article>\n\n </div>\n </main><footer class=\"site-footer h-card\">\n <data class=\"u-url\" href=\"/\"></data>\n\n <div class=\"wrapper\">\n\n <h2 class=\"footer-heading\">LouLou Codes</h2>\n\n <div class=\"footer-col-wrapper\">\n <div class=\"footer-col footer-col-1\">\n <ul class=\"contact-list\">\n <li class=\"p-name\">LouLou Codes</li></ul>\n </div>\n\n <div class=\"footer-col footer-col-2\"><ul class=\"social-media-list\"><li><a href=\"https://github.com/louloucodes\"><svg class=\"svg-icon\"><use xlink:href=\"/assets/minima-social-icons.svg#github\"></use></svg> <span class=\"username\">louloucodes</span></a></li><li><a href=\"https://www.twitter.com/pretzelsbaby\"><svg class=\"svg-icon\"><use xlink:href=\"/assets/minima-social-icons.svg#twitter\"></use></svg> <span class=\"username\">pretzelsbaby</span></a></li></ul>\n</div>\n\n <div class=\"footer-col footer-col-3\">\n <p>Learning new skills at the tender age of &quot;none of your beeswax.&quot; </p>\n </div>\n </div>\n\n </div>\n\n</footer>\n</body>\n\n</html>\n" }, { "alpha_fraction": 0.7543749809265137, "alphanum_fraction": 0.7612500190734863, "avg_line_length": 87.88888549804688, "blob_id": "0b4599d30cc9452f6768f7ec1750c5e666af731f", "content_id": "a7a03ba521d3626e70af3d1ee72ac0275135917b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 339, "num_lines": 18, "path": "/_posts/2020-08-25-Not-so-easy-for-me.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntitle: Get your site up and running\ncategory: jekyll\ntags: meta jekyll github blog cli vi\n---\n\n# \"Get up and running in *seconds*,\" they said.\n\nTo be fair, I did get this site up and running in seconds...but only after \nnuking two other iterations of a blog. In total: a few hours plus a few seconds, this blog is up and running.\n\nI've learned a few things along the way:\n1. I'm not convinced that github's `gh-pages` is the way to go for blogging. It wasn't the obvious one for me: I couldn't figure out how to branch off of that stand-alone graph. My solution: create a repo with the name of the site. Not an elegant solution, but a solution.\n2. Re-acquainted with basic linux commands. It has been ... a few years since I last copied directories, used the vi text editor (please don't suggest emacs), or thought about config variables. It's not like riding a bike after taking some time off -- there's very little muscle memory.\n3. Creating a web page from scratch in 2020 is (not shockingly) a completely different experience from the same thing in 1995 at which point I marked up html by hand without an IDE to check for closing tags, and I was super impressed with myself when I made a tiled background from an image. It made the page unreadable, but I didn't care.\n\nSo...adventures in Jekyll and Liquid are in my future. Perhaps even some Ruby. I use Python in my day job: I'm a data scientist! And that's all I've got for now. I'm looking forward to playing with the layout of this site, and maybe helping my friends create their own static sites to host their own projects.\n" }, { "alpha_fraction": 0.7402088642120361, "alphanum_fraction": 0.7460835576057434, "avg_line_length": 33.818180084228516, "blob_id": "49b9d8d50da04d6acaf5616948f57133fae4381e", "content_id": "37c9b48e3775ff64d9250432fabee52f36c4ece8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 298, "num_lines": 44, "path": "/_posts/2020-10-18-automating_publishing_task.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\nauthor: louloucodes\ntitle: Automating my publishing workflow\ncategory: automation\ntags: automation shell git workflow tags\n---\n### Adventures in tagging, continued\n\nMy solution for tagging posts is the 'jekyll-tagging' plugin, which GitHub does not support. I discussed the workaround [in this post](https://louloucodes.github.io/2020/09/10/Adventures-in-Tagging-Part-2.html), and outlined the steps I needed to make ensure that pages for new tags appear.\n\nThere aren't that many steps, but they are kind of annoying, so... I wrote a script to automate them. \n\nThis is my first shell script and I am pleased with it :heart_eyes:\n\n```\n#! /bin/sh\n\n#automate workflow to publish blog post:\n# make sure new auto-generated tag pages are uploaded\n# add all, commit with message, and push to git\n\necho Enter commit message\nread commit_message\necho Your message is ${commit_message}\n\npub_dir=~/my_path #I hardcoded this with the actual path\n\n# remove current tag directory\nrm -R ${pub_dir}/tag\n\n# create tag directory\nmkdir ${pub_dir}/tag\n\n# copy everything from the _site/tag directory to tag\ncp -R ${pub_dir}/_site/tag/* ${pub_dir}/tag/\n\n# add, commit, and push\ngit add .\ngit commit -m \"${commit_message}\"\ngit push\n\n```\nAnd a side note: I discovered that the tag `CLI` isn't working as I expect it to because the tagging process turns it into lowercase `cli`. Which is great! But I have to figure out how to convert that. I'm sure it will be a straightforward, built-in function like Python's `lower()` except in Ruby.\n" }, { "alpha_fraction": 0.7358039617538452, "alphanum_fraction": 0.7358039617538452, "avg_line_length": 30, "blob_id": "74b0be871cda209a219b62713543c72b55f5fa82", "content_id": "7c941b6afe3558c3cfc1cb7a411b6ed91acf981e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 67, "num_lines": 54, "path": "/_posts/2020-08-30-Adventures-in-Navigation.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntitle: Adventures in Navigation\ncategory: jekyll\ntags: jekyll liquid navbar tags\n---\nAdd another item to the list of things that are not so easy for\nme: tags. \nSomething so straightforward as tagging posts and pages\nfor content should be easy to implement in a language that touts\nitself as easy and accessible. \n\nFor a rank beginner (hi!) this is not so easy. I'll save my \nmisadventures in linking to tags for another post, because the\ntrue subject here is ...\n### The navbar\nI encountered issues with the navbar in one of my attempts to\ncreate pages for the tags. All of a sudden, the navbar filled \nwith `Tag: aws` and `Tag: python` and all of the other tags \naccumulated through my posts. \n\n### What is going on?\nSome fumbling around -- remember, noob here -- led me to the\ndefault (I think?) header.html in the default minima theme. This\nis a reusable snippet of html that can be included on any page,\nand is conveniently found in the `_includes` folder.\n\nThe culprit was here:\n\n```\n...if my_page.title ...\n\n```\nSee that? `if my_page.title` meant that every single page that\nhad a title would appear in the navbar. Doh!\n\n### My solution\nI added a flag variable to the pages I want to appear in the \nnavbar. Specifically, in `about.html` and `blog.html` (should these\nbe `.md`?), to the front matter:\n```\n---\nnavbar: true\n---\n```\nThen I returned to the `header.html` file and changed the \nvariable controlling appearance in the header to \n`my_page.navbar`.\n### Problem solved! \nAnd now back to the tagging problem...\n\n### Haha\nAlso joke's on me when I attempted to cite the Liquid code, my\nserver raised an error...so I'll have to figure that out too." }, { "alpha_fraction": 0.5676172971725464, "alphanum_fraction": 0.5722171068191528, "avg_line_length": 34.6065559387207, "blob_id": "5b43c6d8f9c84de61a6824f47af02fc333dad4cb", "content_id": "67a6bdfa073ecc7ea230ce44a5c5342732542e20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2174, "license_type": "no_license", "max_line_length": 110, "num_lines": 61, "path": "/tree_cli/tree_explore.py", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "#! /Users/***/lib/anaconda3/envs/tree_cli/bin/python\n\n\"\"\"\n\nUsage:\n tree_explore.py [--dir DIR|-d DIR]\n [--level LEVEL|-l LEVEL]\n [--file_color FILE_COLOR|-f FILE_COLOR]\n [--dir_color DIR_COLOR|-r DIR_COLOR]\n tree_explore.py [-h|--help]\n\nOptions:\n -h --help What happens here?\n --dir -d DIR Optional directory argument [default: .]\n --level -l LEVEL Optional level argument [default: 3]\n --file_color -f FILE_COLOR Optional file color in hex (in quotes) or valid name [default: deep_pink_2]\n --dir_color -r DIR_COLOR Optional directory color in hex (in quotes) or valid name [default: thistle_1]\n\n\"\"\"\n\nimport os\nfrom docopt import docopt\nfrom colored import fg, attr, stylize\n\ndef recursive_list(pathdir, level, indent):\n '''\n :param pathdir: string of path\n :param level: how deep to map the tree\n :param indent: how far to indent\n :return: to get out of recursive loop\n '''\n if indent == 0:\n print(stylize(os.path.abspath(pathdir), fg(dir_color) + attr('bold')))\n if level < 0:\n return\n #consider creating an option to sort by date, or size, but keep default alphabetical\n dir_list = sorted(os.listdir(pathdir))\n for d in dir_list:\n for i in range(0, indent):\n print(stylize('| ', fg('#ffffff')), end='')\n print(stylize('|___', fg('#ffffff')), end='')\n if os.path.isdir(pathdir + '/' + d):\n print(stylize(d, fg(dir_color) + attr('bold')))\n else:\n print(stylize(d, fg(file_color)))\n if os.path.isdir(pathdir + '/' + d):\n try:\n recursive_list(pathdir + '/' + d, level - 1, indent + 1)\n except PermissionError:\n pass\n return\n\nif __name__=='__main__':\n # docopt saves arguments and options as key:value pairs in a dictionary\n args = docopt(__doc__)\n # turn the arguments into global variables\n level = int(args['--level'])\n dir_color = args['--dir_color']\n file_color = args['--file_color']\n path_dir = args['--dir']\n recursive_list(path_dir, level, 0)\n\n\n" }, { "alpha_fraction": 0.595652163028717, "alphanum_fraction": 0.5978260636329651, "avg_line_length": 25.538461685180664, "blob_id": "efc86b26191f8a9780cabb269a2cffbdacacd5b5", "content_id": "76d73e29f9fee773d63d4ff94b85a30436fbf989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1380, "license_type": "no_license", "max_line_length": 179, "num_lines": 52, "path": "/about.markdown", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: page\ntitle: About\npermalink: /about/\nnavigation: true\n---\nThis page tells you nothing about me! However ...\n\nAll the tags on this site:\n{% comment %}\nSimply list all the tags...\n{% endcomment %}\n{% for tag in tags %}\n <a href=\"#{{ tag | slugify }}\"> {{ tag }} </a>\n{% endfor %}\n\nAll posts with tags:\n{% comment %}\nSimply list all the posts that have a certain tag.\n{% endcomment %}\n\n{% for tag in tags %}\n <h2 id=\"{{ tag | slugify }}\">{{ tag }}</h2>\n <ul>\n {% for post in site.posts %}\n {% if post.tags contains tag %}\n <li>\n\t <h3>\n\t <a href=\"{{ post.url }}\">{{ post.title }}\n <small>{{ post.date | date_to_string }} </small>\n\t </a>\n {% for tag in post.tags %}\n\t\t<a class=\"tag\" href=\"/tag/#{{ tag | slugify }}\">{{ tag }}</a>\n\t {% endfor %}\n </li>\n {% endif %}\n {% endfor %}\n </ul>\n{% endfor %}\n\nThis is the base Jekyll theme. You can find out more info about customizing your Jekyll theme, as well as basic Jekyll usage documentation at [jekyllrb.com](https://jekyllrb.com/)\n\nYou can find the source code for Minima at GitHub:\n[jekyll][jekyll-organization] /\n[minima](https://github.com/jekyll/minima)\n\nYou can find the source code for Jekyll at GitHub:\n[jekyll][jekyll-organization] /\n[jekyll](https://github.com/jekyll/jekyll)\n\n\n[jekyll-organization]: https://github.com/jekyll\n" }, { "alpha_fraction": 0.7682119011878967, "alphanum_fraction": 0.7708609104156494, "avg_line_length": 49.266666412353516, "blob_id": "eaa8eb0ed78055480770d730308885fc1cbc703e", "content_id": "e51c576ea7daeb3dd80753690703e1651aa5579f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 755, "license_type": "no_license", "max_line_length": 205, "num_lines": 15, "path": "/_posts/2020-08-28-AWS-Lambda-Error.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntitle: AWS Lambda error fix\ncategory: server\ntags: aws lambda error env tip\n---\n\n# Everything was rolling along...\nWe have a handful of lambda functions running on AWS. Everything was running as it should until this morning.\n\n`ERROR: Could not install packages due to an EnvironmentError: [Errno 28] No space left on device`\n\nIt was just good luck that I was updating a lambda at the same time that this error showed up and propagated through all our lambdas. I should probably have some sort of alert set up for when this happens.\n\nOur tech lead figured it out quickly -- he referenced [this page](https://epsagon.com/tools/free-lambda-code-storage-exceeded/) and made some updates. I triggered the lambda to test it, and all was well.\n\n" }, { "alpha_fraction": 0.7457741498947144, "alphanum_fraction": 0.7545638680458069, "avg_line_length": 76.78947448730469, "blob_id": "5c2a550c2978e53548c5212b67682d76bc6c9264", "content_id": "4ecafa710d7cddfb4a24700a60a6d806998f311e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1479, "license_type": "no_license", "max_line_length": 289, "num_lines": 19, "path": "/_posts/2020-09-08-Adventures-in-Tagging-part-1.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntags: tags ruby git til\n---\nThe headline is\n### I got the tagging to work!!!\nDon't know why I expected tagging to have some sort of easy implementation. Clearly I don't know what I'm doing! \n\nAttempts included:\n* creating tag pages by hand. Clearly the wrong solution.\n* writing a python script to create tag pages. Turned out to be the wrong solution because (1) the way I wrote it require running the script every time I create a post and (2) the foundation for Jekyll is Ruby.\n* finding the ['jekyll-tagging'](https://github.com/pattex/jekyll-tagging) gem*, blindly following the readme, and getting super frustrated that \"it isn't working!\"\n* finding [this blog post](https://jameshfisher.com/2019/05/05/how-can-i-add-tags-to-a-jekyll-blog/) and copy-pasting the code. **This was actually *almost* there.**\n* deciding that I should really sit down and understand the code that I had just copy-pasted...never having looked at Ruby before, this was not a 5 minute exercise. Some odd syntax to learn here. I would like to shake James H. Fisher's hand for writing the tag generating code in the first place.\n* multiple branches in my github repo, and finally one frustrated reset master to at least 10 commits back.\n\nBut here I am! I have tags with links that work and pages that have information on them! Hooray! I'm going to bed.\n\nTIL from * above: There's an app for that! In Python, there's a package for that! And now I know that in Ruby, there's a gem for that! \n" }, { "alpha_fraction": 0.7824631929397583, "alphanum_fraction": 0.7918339967727661, "avg_line_length": 64, "blob_id": "66534f915f03618444d67b4753e12680889c387f", "content_id": "1a94687391fc54c58a3b6b4cad982647d70fab86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1494, "license_type": "no_license", "max_line_length": 278, "num_lines": 23, "path": "/_posts/2020-10-05-Real-Python-Project.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\nauthor: louloucodes\ntitle: Directory Tree Generator Challenge\ncategory: python\ntags: python challenge project portfolio CLI recursion\n---\n# No longer a beginner :snake:\n### I'll call myself an intermediate Pythonista\n\nThe [Real Python](www.realpython.com) website has been a go-to resource in my Python journey. I have landed on this page again and again as I google topics from OOP to data structures to pandas. \n\n[13 Project Ideas for Intermediate Python Developers](https://realpython.com/intermediate-python-project-ideas/) caught my eye. With less than a year of daily Pythoning at work (mostly data science with some development mixed in), could I call myself an intermediate developer? \n\nThe Directory Tree Generator looked like a project that I could actually use in real life -- and would be a good challenge, so I went for it.\n\nMy interpretation of the project turned out to have four challenges -- not all of which were Python, specifically:\n1. [Traversing the directory structure](https://louloucodes.github.io/python/2020/10/06/Recursive-traversing-directory-structure.html) \n2. Printing to the terminal in color\n3. Converting the script into a command line utility\n4. Making the script executable\n\nRead more about the mini-challenges their respective blog posts, then check out the code in this GitHub repo -- it will be my first posted project for my programming portfolio. Such a satisfying feeling to create a command line tool to do just what I want it to do!" }, { "alpha_fraction": 0.7459604740142822, "alphanum_fraction": 0.7535906434059143, "avg_line_length": 122.72222137451172, "blob_id": "3ec8aabfac746e49957ad9eaa020d2d2cb4ae39d", "content_id": "97e2b888b2b155c34c3b2ef134bf861041ff0eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2228, "license_type": "no_license", "max_line_length": 463, "num_lines": 18, "path": "/_posts/2020-09-06-Cron_job.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntitle: Cron Job\ntags: cron schedule automation AWS lambda\n---\n### Cron Job: how to make my life a little easier\nI've heard \"automation\" will take jobs away, and that sounds bad. In my job, some tasks are easy enough to do but they take my time and attention away from other, more interesting issues. For me, automation frees up time to get the real work done.\n\nMy first exposure to automating a programming task was AWS Lambda. Instead of running a script every day at noon to create a file to store in an AWS S3 bucket, I programmed a lambda function to do it for me. \nThis function is the same as the script I run from my machine plus a \"lambda handler\" method -- akin to a `main()` function -- that runs when triggered. For my purposes, the trigger is a time of day. [Read more about scheduling an AWS Lambda here.](https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchevents-expressions.html)\n\nTo create the trigger, I used a \"cron expression.\" Here is an example if you haven't seen one before. The cron expression to run every day at noon is `0 0 12 * * *` There are a zillion sites that explain this, so I will not add to the pool. Instead I direct you to [the site I used to figure this out.](https://www.freeformatter.com/cron-expression-generator-quartz.html)\n\nBut what if you don't want to use AWS to schedule a repeating task? From my own (mine ... haha, NOT mine but my company's) server, I edit a cron table, or \"crontab.\" To do that, bring up terminal and type `crontab -e`. [This site has more information.](https://opensource.com/article/17/11/how-use-cron-linux) Add your cron expression followed by the path to the executable file that you are scheduling to run. And then...sit around and wait for it to do its job.\n\nIf you don't want to sit around and wait, test with a cron expression that will trigger in a couple of minutes. For example, if it is 3:19pm and I want to test the execution of my script, I'll write a cron expression to go off at 3:21 with `21 * * * * *`. As soon as I confirm that it worked, I'll go back into the crontab and remove the line.\n\nNote that the executable should create a log! I'll cover that in another post -- separation of concerns and all that.\n\n" }, { "alpha_fraction": 0.7263083457946777, "alphanum_fraction": 0.7298443913459778, "avg_line_length": 41.84848403930664, "blob_id": "f0411069cff012752314f6b60116c1de852ea7a8", "content_id": "914f66ccfdee2018cd6ddacdf3eb12c2c91dbb5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 397, "num_lines": 33, "path": "/_posts/2020-08-25-Python-max.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\nauthor: louloucodes\ntitle: Python tip of the day\ncategory: python\ntags: python til tip documentation example max lambda\n---\n\n# Today I Learned: Python tip\n### The little lambda that could\n\nI needed to find the maximum of values in a dictionary. I had an old school (and inefficient) solution in mind:\n```\nmax = 0\nmax_key = ''\nfor entry in my_dictionary:\n if len(my_dictionary[entry]) > max:\n max = len(my_dictionary[entry])\n max_key = entry\n```\nAnd then I made the wise choice to see if there is a better way, and of course there is. *Always check the documentation.* Documentation is still a bit of a slog for me, so often I have to search for examples too.\nHere is the [documentation](https://docs.python.org/3/library/functions.html#max).\n\nI don't remember what the example was, so I will record my own! Here it is for posterity:\n```\nmax(my_dictionary.keys(), key = (lambda entry: len(my_dictionary[entry]))\n```\n\nHow wonderful!\n\nI have not unleashed the complete power of the [`max` function](https://docs.python.org/3/library/functions.html#max) as described in the documentation, but I now have a taste of it. Also can be used with [`min`](https://docs.python.org/3/library/functions.html#min) and [`sorted`](https://docs.python.org/3/library/functions.html#sorted) and maybe even other yet-to-be-discovered-by-me functions.\n\nPython :snake:, I think I :sparkling_heart: you.\n" }, { "alpha_fraction": 0.7079579830169678, "alphanum_fraction": 0.7087087035179138, "avg_line_length": 31.487804412841797, "blob_id": "0aec4bd8a94c6b5b6579f2befeaf7b886bb4a7bc", "content_id": "d475dbf207a788ede2c6faf331d9f8588b83e015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1332, "license_type": "no_license", "max_line_length": 66, "num_lines": 41, "path": "/_posts/2020-08-31-TIL-join.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntitle: TIL .join\ntags: til python\n---\n### Code is read more often than it is written.\nWho said it first? Does it matter? It is a point of fact. \n\nI know my code is inefficient, non-pythonic, and medieval. I know\nthis even as I write it! And yet I do not know what I do not know.\nUntil I read someone else's code -- and then I say, \"I didn't \nknow you could do that!\" and then I put it in my (hand-written)\nnotes with hearts and stars. \n\nToday I learned about `.join`. I had a list of email addresses to\nturn into a string, with the addresses separated by commas.\n \nHere is what I was about to do:\n```\nemail_str = ''\nemail_list = [ ... ] # list of email addresses\nfor i in range(len(email_list)):\n # purposefully leaving off the last address\n email_str += email_list[i] + ', '\nemail_str += email_list[-1]\n```\nThis...this is old school thinking. A little pythonic thinking\nsnuck in there with the negative indexing to get the last \nelement of the list.\n\nThen I read a snippet written by a coder I admire...and did a \ndouble take. What magic is this??? I tried it myself.\n\n`', '.join(email_list)`\n\nThat's it. That's what I needed to do. One line.\n\nI'm beginning to think\nthat when there's some functionality that should exist... it \nalready does. Python :snake:, every day I :sparkling_heart: you\na little more. " }, { "alpha_fraction": 0.7643758654594421, "alphanum_fraction": 0.7713884711265564, "avg_line_length": 88.0625, "blob_id": "bf0c418f1dfda4df22d6fce8cd8f4e20d963c630", "content_id": "2b369d2f8c7e9a08609fe5c2a3e450ed092d563d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1426, "license_type": "no_license", "max_line_length": 455, "num_lines": 16, "path": "/_posts/2020-10-07-terminal-font-colors.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\nauthor: louloucodes\ntitle: Directory Tree Generator Challenge -- Customizing\ncategory: python\ntags: python challenge project portfolio CLI recursion\n---\n# Directory Tree Generator: Making it mine\n\nSo one advantage of [making this tool](https://louloucodes.github.io/python/2020/10/05/Real-Python-Project.html) for myself is that I can make it do what I want and look how I want, and what I wanted was colorful printing to the terminal to help distinguish the files from the directories.\n\nThe Real Python folks suggested the [pypi `colored` package](https://pypi.org/project/colored/), and I found it easy to implement. I used the `stylize`, `fg` (foreground), and `attr` (attribute) functions to print in color and boldface to the terminal. There's a great list of named colors -- I was taken by `thistle_1` and `deep_pink_2` -- but any hex value color will work. I'm a big fan of Google's color picker for finding hex values of shades I like.\n\nOf course, I won't always want pink and thistle for my tree, so the user can choose what values to use for the files and the directories.\n\nAfter using my CLI tree for a couple of days, I recognized another customization that I would like to implement: organization fo the tree by level. Sometimes it would be useful to have this by date, or by size, or alphabetically. By the time I publish the code in my repo, I plan to have this customization implemented too.\n\n" }, { "alpha_fraction": 0.7539568543434143, "alphanum_fraction": 0.757314145565033, "avg_line_length": 55.35135269165039, "blob_id": "3ebedfbd7d312f7532d5793bb07f9d6789db7964", "content_id": "bd85d09e220651f53721fdf461ff1e19e77f3134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2085, "license_type": "no_license", "max_line_length": 343, "num_lines": 37, "path": "/_posts/2020-09-10-Adventures-in-Tagging-Part-2.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\ntags: tags ruby jekyll github workflow\n---\n\nIt turns out that celebrating successful tagging was premature. The links to the tags worked *only on my local machine*. \n\nThere were a couple of wrong turns in my search to find the solution...that I will not record. The final answer came from my husband*. He read the documentation -- and it made sense to him. [Here is the link to GitHub's page about approved plug-ins.](https://docs.github.com/en/github/working-with-github-pages/about-github-pages-and-jekyll). \n\nThe key information here is\n> GitHub Pages cannot build sites using unsupported plugins. If you want to use unsupported plugins, generate your site locally and then push your site's static files to GitHub.\n\nIs the `jekyll-tagging` plugin supported by GitHub? **No**, it is not. \nWhen I build my site locally with `bundle exec jekyll serve`, the tagging gem generates tag pages in the `_site` folder, from which the pages are served.\n\nGitHub does NOT serve pages from `_site` -- and there's no way to get around it. (I shouldn't say \"no way,\" rather, no way that I found.)\n\nThe solution is to change my workflow**.\n\n#### Workflow before:\n1. Write a blog post on my machine.\n2. Generate my site locally: `bundle exec jekyll serve`.\n3. Push changes to the GitHub repo.\n\n#### Workflow now:\n1. Write a blog post on my machine.\n2. Generate my site locally: `bundle exec jekyll serve`.\n3. Replace the existing `tag` directory in the home directory with the `tag` directory from `_site` (the static files referred to in the quote above).\n4. Push changes to the Github repo.\n\nI will probably end up writing a bash script to automate the workflow, but that will be another blog post.\n\nProof of concept: see the tag links below? Click on one of them! \n\nThe * above on husband because this one word does not do justice to all of his roles in my life: mentor, champion, git whisperer, programming wizard, to name just a few. He's generally awesome.\n\nI generally avoid the word workflow** because I dislike business jargon, but here it feels meaningful and descriptive.\n" }, { "alpha_fraction": 0.7810183167457581, "alphanum_fraction": 0.7810183167457581, "avg_line_length": 100, "blob_id": "089722f5020c41056127b56d8eb3b361a5091b27", "content_id": "cd5a3e377ba10feb053cdca30067b60eb33e974e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2023, "license_type": "no_license", "max_line_length": 487, "num_lines": 20, "path": "/_posts/2020-10-06-Recursive-traversing-directory-structure.md", "repo_name": "louloucodes/louloucodes.github.io", "src_encoding": "UTF-8", "text": "---\nlayout: post\nauthor: louloucodes\ntitle: Directory Tree Generator Challenge -- Recursive Solution\ncategory: python\ntags: python challenge project portfolio CLI recursion\n---\n# Application of recursion\n\nOn first glance, I thought the hardest part of the [Directory Tree Generator challenge](https://realpython.com/intermediate-python-project-ideas/#directory-tree-generator) would be traversing the directory structure. My instincts turned out to be wrong, as I solved the general problem with the os package and a single, recursive function.\n\nThe os package provides the functionality to explore the file structure, and in particular `os.path.listdir` and `os.path.isdir`. As I wrote it, the directory tree generator allows the user to choose which directory to be the root of the tree and how deep to traverse the file structure.\n\nI did not *intend* to solve this problem recursively, but it turned out to be the natural solution, as nested for-loops have their limits -- this would have been fine if I had been satisfied with hard-coding in the depth of the tree. But no, I wanted the user to determine that, not the coder. (The fact that I am both the user and the coder does not matter -- what I want tomorrow may not be what I want today.)\n\nMy biggest fear with recursion is getting stuck in an infinite loop. My habit is to put the kick-out condition [see note below] at the top of the function so that I won't forget to do it later. Also, when I say habit -- it's been a minute since the last time I wrote a recursive function. If I think about it, it was before I had children, back when I taught computer science to high school students. My own children are now both in high school, and yet this habit came right back to me.\n\nSatisfied with my solution to the base problem, the next challenge was to figure out how to visually distinguish the files from the directories. And that will be the next post.\n\nNote: the technical term, which did not come back to me but was easy enough to look up, is *base case*.\n\n\n\n" } ]
15
Theskyspace/PPDT
https://github.com/Theskyspace/PPDT
860af0ce18571229eabc79259e9654e382def505
4df697be7e6194b0b88904c2b54b227919623b71
75b84ed842da34310c2242274f292181bb84ef36
refs/heads/main
2023-08-09T11:07:41.108920
2021-09-10T05:21:28
2021-09-10T05:21:28
307,361,153
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6094069480895996, "alphanum_fraction": 0.685071587562561, "avg_line_length": 15.300000190734863, "blob_id": "e7938c1ccdec7cd2aa1779f0b6b4f5760d50090a", "content_id": "7f22b73cb3213a83b20039575a8b7b91498e2d2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 61, "num_lines": 30, "path": "/Python_script.py", "repo_name": "Theskyspace/PPDT", "src_encoding": "UTF-8", "text": "import tkinter\n\nimport os\nimport random \nimport time\nimport winsound\n\n\ntop = tkinter.Tk()\n\ncont = 1\n\npath=\"E:\\\\Coding zone\\\\PP&DT\\\\src\" #Path to your location\nfiles=os.listdir(path)\n\nfor i in range(10):\n\td=random.choice(files)\n\tprint(d)\n\tos.startfile(d)\n\tcont = int(input(\"Do you want to continue with this?(1/0)\"))\n\tif cont == 1:\n\t\ttime.sleep(30)\n\t\twinsound.Beep(300, 10000)\n\t\t\n\t\ttime.sleep(215)\n\t\twinsound.Beep(400, 10000)\n\t\ttime.sleep(10)\n\t\twinsound.Beep(500, 10000)\n\telse:\n\t\tcontinue\n" }, { "alpha_fraction": 0.7341772317886353, "alphanum_fraction": 0.7405063509941101, "avg_line_length": 30.600000381469727, "blob_id": "272f5351ec300b27cb06fa41f9def26dba1620c1", "content_id": "7c9f1f50302feff3c1d397c0fae8792944ac7267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 158, "license_type": "no_license", "max_line_length": 71, "num_lines": 5, "path": "/README.md", "repo_name": "Theskyspace/PPDT", "src_encoding": "UTF-8", "text": "# PPDT\nThis program stimulates the SSB PPDT test from timing to image sharing.\n\n## Build\n1. Get python in your PC. [Python Download](https://www.python.org/)\n" } ]
2
SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System
https://github.com/SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System
e508760eb7914042ae6f8a85ddc70856a0a3e93c
260bb58e20a52471579d24d73bbb8cd57fd7df34
bd09676403772922be5c9cd5a209a214fc24b76c
refs/heads/main
2023-07-26T13:43:55.902056
2021-08-30T13:58:41
2021-08-30T13:58:41
401,361,229
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7881041169166565, "alphanum_fraction": 0.7881041169166565, "avg_line_length": 37.42856979370117, "blob_id": "635cfc3fc05225c208494fda1a23e2c5b0379ad9", "content_id": "35522fc3bedc57b736cc042b157fb790929424a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 269, "license_type": "no_license", "max_line_length": 115, "num_lines": 7, "path": "/README.md", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "# Conceptual Framework\n\n![](https://github.com/SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System/blob/main/framework.PNG?raw=true)\n\n# Work Flow\n\n![](https://github.com/SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System/blob/main/workflow.PNG?raw=true)\n" }, { "alpha_fraction": 0.4943566620349884, "alphanum_fraction": 0.5286681652069092, "avg_line_length": 25.047618865966797, "blob_id": "b23ae05270309f4f00eedb770417a465c2c15276", "content_id": "b5946d1f98a6c64ffff1fddf41d0e969f2f51aca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2215, "license_type": "no_license", "max_line_length": 64, "num_lines": 84, "path": "/main_function.py", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO \nimport time \nfrom capture_image import * \nfrom process_db import *\nfrom process_iamge import * \nfrom voice_alert import * \nfrom send_message import * \nfrom metal_detect import * \n \nGPIO.setmode(GPIO.BCM) \n\nTRIG1 = 23 \nECHO1 = 24 \nTRIG2 = 14 \nECHO2 = 15 \nPOWER = 5 \n\nGPIO.setup(TRIG1,GPIO.OUT) \nGPIO.setup(ECHO1,GPIO.IN)\nGPIO.setup(TRIG2,GPIO.OUT) \nGPIO.setup(ECHO2,GPIO.IN) \nGPIO.setup(POWER,GPIO.OUT) \n\nwhile True:\n\n\tGPIO.output(TRIG1, False) \n\tprint \"Waitng For Sensor To Settle\"\n\ttime.sleep(2) \n\n\tGPIO.output(TRIG1, True) \n\ttime.sleep(0.00001) \n\tGPIO.output(TRIG1, False) \n\n\twhile GPIO.input(ECHO1)==0 : \n\t\tpulse_start1 = time.time() \n\n\twhile GPIO.input(ECHO2)==0 : \n\t\tpulse_start2 = time.time() \n\n\twhile GPIO.input(ECHO1)==1 : \n\t\tpulse_end1 = time.time()\n\n\twhile GPIO.input(ECHO2)==1 : \n\t\tpulse_end2 = time.time() \n\n\tpulse_duration1 = pulse_end1 - pulse_start1 \n\tpulse_duration2 = pulse_end2 - pulse_start2 \n\n\tdistance1 = pulse_duration1 * 17150 \n\tdistance1 = round(distance1, 2) \n\n\tdistance2 = pulse_duration2 * 17150 \n\tdistance2 = round(distance2, 2) \n\n\tif distance1 > 2 and distance1 < 400 && distance2 == 0 : \n\t\tprint \"person detected outside entrance\" \n\t\tcapture_image()\n\t\tprocess_db()\n\t\twho = process_image()\n\t\tvoice_alert_ID(who)\n\t\tmetal = metal_detect()\n\t\tmetal_alert(metal)\n\t\tif who != 'unknown' :\n\t\t\tvoice_alert_ask()\n\t\t\tcommand = def listen_command()\n\n\t\t\twhile (command != 'yes' && command != 'no')\n\t\t\t\tsay_again()\n\t\t\t\tcommand = def listen_command()\n\n\t\t\tif command == 'yes':\n\t\t\t\tGPIO.output(led_pin, GPIO.HIGH)\n\t\t\t\tvoice_alert_enter()\n\t\t\t\ttime.sleep(5)\n\t\t\t\tGPIO.output(led_pin, GPIO.LOW)\n\t\t\t\tvoice_alert_close()\n\t\t\t\tbreak\n\n\t\t\telif command == 'no':\n\t\t\t\tvoice_alert_deny()\n\t\t\t\tbreak\n\n\telif distance2 != 0 :\n\t\tsend_message()\t\t\t\n\t\t\n " }, { "alpha_fraction": 0.5970017910003662, "alphanum_fraction": 0.6181657910346985, "avg_line_length": 31.399999618530273, "blob_id": "20e057161e8fc434c919bfd85987fc41fe0ab983", "content_id": "25050e939fcf8eefcc9cfce6826e5b19435afe40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 88, "num_lines": 35, "path": "/process_image.py", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "import face_recognition\nimport imutils\nimport pickle\nimport time\nimport cv2\nimport os\n \ndef process_image():\n\tcascPathface = os.path.dirname(cv2.__file__) + \"/data/haarcascade_frontalface_alt2.xml\"\n\tfaceCascade = cv2.CascadeClassifier(cascPathface)\n\tdata = pickle.loads(open('face_enc', \"rb\").read())\n\timage = cv2.imread('captured/saved_img.jpg')\n\trgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tfaces = faceCascade.detectMultiScale(gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(60, 60),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\tcv2.imshow(\"Frame\", image)\n\tcv2.waitKey(0)\n \n\tencodings = face_recognition.face_encodings(rgb)\n\tnames = []\n\tfor encoding in encodings:\n\t\tmatches = face_recognition.compare_faces(data[\"encodings\"],encoding)\n\t\tname = \"Unknown\"\n\t\tif True in matches:\n\t\t\tname = data[\"names\"]\n \t\tcounts[name] = counts.get(name, 0) + 1\n \t\tname = max(counts, key=counts.get)\n \n\t\tnames.append(name)\n\t\tbreak\n\treturn names\n" }, { "alpha_fraction": 0.665429413318634, "alphanum_fraction": 0.6932681202888489, "avg_line_length": 39.32653045654297, "blob_id": "bdd7cc6b4527b669bec2a996aea7db5bbab3ad74", "content_id": "334d152983568a6e313666b968de471888d51871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5929, "license_type": "no_license", "max_line_length": 243, "num_lines": 147, "path": "/screen_interface.py", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "import os\nimport tkinter \nimport tkinter.filedialog\nimport shutil\nimport integrated\nimport random\nimport time\nfrom tkinter.font import Font\nfrom tkinter import *\nfrom tkinter import messagebox\n\n\ntop = Tk()\nroot = tkinter.Tk()\n\nC = Canvas(top, bg=\"blue\", height=500, width=800)\nfilename = PhotoImage(file = \"background.png\")\nbackground_label = Label(top, image=filename)\nbackground_label.place(x=0, y=0, relwidth=1, relheight=1)\n\nC.pack()\n\ntop.title(\"SMART HOME SECURITY SYSTEM\")\n\n\n\ndef settings():\n tp = Toplevel()\n C = Canvas(tp, bg=\"pink\", height=500, width=800)\n filename = PhotoImage(file=\"background.png\")\n background_label = Label(tp, image=filename)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n C.pack()\n tp.title(\"SMART HOME SECURITY SYSTEM\")\n\n b1 = Button(tp, text=\" ADD PHOTO\", bg=\"pink\", fg=\"purple\", anchor='w', relief=RAISED, height=1, width=15,command=save_photo)\n b1_window = C.create_window(330, 115, anchor='nw', window=b1)\n b1.config(font=('helvetica', 15, 'bold'))\n\n #b2 = Button(tp, text=\"RENAME PHOTO\", bg=\"pink\", fg=\"purple\", anchor='w', relief=RAISED,\n # height=1, width=15)\n #b2_window = C.create_window(330, 145, anchor='nw', window=b2)\n #b2.config(font=('helvetica', 15, 'bold'))\n \n b3 = Button(tp, text=\"REMOVE PHOTO\", bg=\"pink\", fg=\"purple\", anchor='w', relief=RAISED,\n height=1, width=15,command=remove_photo)\n b3_window = C.create_window(330, 175, anchor='nw', window=b3)\n b3.config(font=('helvetica', 15, 'bold'))\n\n\n tp.mainloop()\n\n\ndef face_recognize():\n exec(integrated.sonar_range())\n \ndef save_photo():\n f = tkinter.filedialog.askopenfilename(\n parent=root, initialdir='/home',\n title='Choose file',\n filetypes=[('png images', '.png'),\n ('jpg images', '.jpg'),\n ('jpeg images', '.jpeg')]\n )\n \n shutil.move(f, \"/home/pi/Desktop/final/images\")\n \ndef remove_photo():\n f = tkinter.filedialog.askopenfilename(\n parent=root, initialdir='/home/pi/Desktop/final/images',\n title='Choose file',\n filetypes=[('png images', '.png'),\n ('jpg images', '.jpg'),\n ('jpeg images', '.jpeg')]\n )\n \n os.remove(f)\n \n\n\ndef open_window():\n tp=Toplevel()\n C = Canvas(tp, bg=\"pink\", height=500, width=800)\n filename = PhotoImage(file=\"background.png\")\n background_label = Label(tp, image=filename)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n C.pack()\n tp.title(\"SMART HOME SECURITY SYSTEM\")\n\n one = Label(tp, text=\"ABOUT\", bg=\"purple\", fg=\"white\", anchor='w')\n one_window = C.create_window(95, 20, anchor='nw', window=one)\n one.config(font=(\"Bold\", 20))\n\n\n\n S = Scrollbar(one)\n T = Text(one, height=25, width=80,bg=\"pink\",fg=\"purple\")\n T.config(font=(\"Bold\", 10))\n S.pack(side=RIGHT, fill=Y)\n T.pack(side=LEFT, fill=Y)\n S.config(command=T.yview)\n T.config(yscrollcommand=S.set)\n quote = \"\"\"A report by World Health Organization(WHO)and International Agency for Prevention of Blindness(IAPB) stated that there are approximately285 million peoples around the world who are\nvisuay impaired.Among these individuals, thereare 39 million who are totally blind.Africa and\nother developing countries represent 90 percent ofthis statistics. According to WHO and IPAB, the\nnumber of blind people will increase worldwide toreach the double by 2020.\nIt is difficult for blind people to move or livewithout help. There have been several systems\ndesigned to support visually-impaired people andto improve the quality of their lives. Unfortunately,\nmost of these systems are limited in their capabilities.Almost 90 percent blind people have to depend\non others to ensure their safety.So, here we have thought about a project that will\ngive comfort to the blind people so that they don’tneed others to ensure their safety.Here our device\nwill be attached to the door and it will contain somefeatures to ensure safety.Through our system users\ncan detect people who came to their home andif they know the people then they can commandthe door to open through android device.The doorwill open automatically after command so that\nblind people dont need to move.This device canalso detect suspicious activity and will give alertin any kind of danger situation. Our device willcollect data from the surrounding environment (viaeither laser scanner, cameras sensors, or sonar)\nand transmitted to the user either via tactile, audioformat or both. Hardware and software both part\nwill be integrated in our device.There are many devices integrated for blind peoplebut mostly are for their walking,reading purposes\nbut we mainly focus on their security purpose sothat they can stay safe by themselves.\"\"\"\n T.insert(END, quote)\n\n tp.mainloop()\n\n\none=Label(top,text=\"SMART HOME SECURITY SYSTEM\",bg=\"purple\",fg=\"white\",anchor='w')\none_window=C.create_window(80,5,anchor='nw',window=one)\none.config(font=(\"Bold\",30))\n\n\nb1=Button(top,text=\" START\",bg=\"pink\",fg=\"purple\",anchor='w', relief=RAISED, height =1, width =15,command=face_recognize)\nb1_window=C.create_window(330,115,anchor='nw',window=b1)\nb1.config(font=('helvetica',15, 'bold'))\n\n\nb2=Button(top,text=\" ABOUT\",bg=\"pink\",fg=\"purple\",anchor='w', relief=RAISED,command=open_window, height =1, width =15)\nb2_window=C.create_window(330,185,anchor='nw',window=b2)\nb2.config(font=('helvetica', 15, 'bold'))\n\n\nb3=Button(top,text=\" VIDEO TUTORIAL\",bg=\"pink\",fg=\"purple\",anchor='w', relief=RAISED, height =1, width =15)\nb3_window=C.create_window(330,255,anchor='nw',window=b3)\nb3.config(font=('helvetica', 15, 'bold'))\n\n\nb4=Button(top,text=\" SETTINGS\",bg=\"pink\",fg=\"purple\",anchor='w', relief=RAISED, height =1, width =15,command=settings)\nb4_window=C.create_window(330,325,anchor='nw',window=b4)\nb4.config(font=('helvetica', 15, 'bold'))\n\ntop.mainloop()" }, { "alpha_fraction": 0.6498316526412964, "alphanum_fraction": 0.7373737096786499, "avg_line_length": 36.25, "blob_id": "d0785b02aed8b1df7426ab5a3c914f6aad04226e", "content_id": "d09cc4567505e25daab4755ccf3df49851c50b73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 129, "num_lines": 8, "path": "/send_message.py", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "from twilio.rest import Client\n\ndef send_message():\n\taccount_sid = [tw7923648]\n\tauth_token = [abdtmol973wls]\n\tclient = Client(account_sid, auth_token)\n\tmessage = client.messages.create(body='Mr. X is in distress and need immediate assistance!', from_=[012836321], to=[0817362791])\n\tprint(message.sid)" }, { "alpha_fraction": 0.6571224331855774, "alphanum_fraction": 0.667859673500061, "avg_line_length": 21.532258987426758, "blob_id": "b0c1bad1c086e793343d3a6b3cb4fc2df74ba6b2", "content_id": "a0ed10a6d0d5397bb00be36d1020eb89a7b519ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1397, "license_type": "no_license", "max_line_length": 50, "num_lines": 62, "path": "/voice_alert.py", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "from gtts import gTTS\nimport os\n\n\ndef voice_alert_ID(who):\n\tlanguage = 'en'\n\tif who == 'unknown':\n\t\talert = 'unknown person is at the door'\n\telse\n\t\talert = who +' is at the door'\n\n\tmyobj = gTTS(text=who, lang=language, slow=False)\n\tmyobj.save(\"alert.mp3\")\n\tos.system(\"alert.mp3\")\n\n\ndef voice_alert_ask():\n\tlanguage = 'en'\n\task = 'do you want to open the door?'\n\tmyobj = gTTS(text=ask, lang=language, slow=False)\n\tmyobj.save(\"ask.mp3\")\n\tos.system(\"ask.mp3\")\n\nvoice_alert_enter():\n\tlanguage = 'en'\n\task = 'door is open'\n\tmyobj = gTTS(text=ask, lang=language, slow=False)\n\tmyobj.save(\"enter.mp3\")\n\tos.system(\"enter.mp3\")\t\n\nvoice_alert_close():\n\tlanguage = 'en'\n\task = 'door is closed'\n\tmyobj = gTTS(text=ask, lang=language, slow=False)\n\tmyobj.save(\"close.mp3\")\n\tos.system(\"close.mp3\")\n\nvoice_alert_deny():\n\tlanguage = 'en'\n\task = 'door will not be opened'\n\tmyobj = gTTS(text=ask, lang=language, slow=False)\n\tmyobj.save(\"deny.mp3\")\n\tos.system(\"deny.mp3\")\t\n\nsay_again():\n\tlanguage = 'en'\n\task = 'command is no clear. please try again'\n\tmyobj = gTTS(text=ask, lang=language, slow=False)\n\tmyobj.save(\"say_again.mp3\")\n\tos.system(\"say_again.mp3\")\n\nmetal_alert(metal):\n\tlanguage = 'en'\n\task = ''\n\tif metal == 1 :\n\t\task = 'this person is carrying metal'\n\telse : \n\t\task = 'this person is not carrying metal'\n\n\tmyobj = gTTS(text=ask, lang=language, slow=False)\n\tmyobj.save(\"metal.mp3\")\n\tos.system(\"metal.mp3\")\t" }, { "alpha_fraction": 0.6438848972320557, "alphanum_fraction": 0.6618704795837402, "avg_line_length": 15.411765098571777, "blob_id": "d47af474c4e7c729a1b64cd7b58187d19cf1e01a", "content_id": "9d2c35d4538e6661cadb472bf9400b03a95b0842", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "no_license", "max_line_length": 40, "num_lines": 17, "path": "/metal_detect.py", "repo_name": "SanzidaMojibLuna/IoT-Based-Automated-Door-Accessing-System", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO \nimport time \n\nGPIO.setmode(GPIO.BCM)\n\nIN = 17\nOUT = 27\n\nGPIO.setup(IN,GPIO.IN) \nGPIO.setup(OUT,GPIO.OUT)\n\ndef metal_detect():\n\tGPIO.output(OUT, GPIO.HIGH)\n\ttime.sleep(5)\n\tmetal = GPIO.input(IN)\n\tGPIO.output(OUT, GPIO.LOW)\n\treturn metal" } ]
7
DevWithMe/AlgoExpert-Clone
https://github.com/DevWithMe/AlgoExpert-Clone
8d95cc2cd90b0df69bf810420db973440b9cdb6e
a62ebca61e535259de314e945c1c7b212522326e
e56651ad3305af526d65b86c46c5fafe3999cca7
refs/heads/master
2022-11-06T08:40:50.953294
2020-06-24T18:27:18
2020-06-24T18:27:18
276,244,633
1
2
null
2020-07-01T01:12:15
2020-07-01T01:11:12
2020-06-25T01:23:26
null
[ { "alpha_fraction": 0.5869725942611694, "alphanum_fraction": 0.5951147079467773, "avg_line_length": 34.86725616455078, "blob_id": "4f60542c47f8c5e42326bcaf1f19bde5579bdeda", "content_id": "6bbe8174c12facc9885cf180fe0e2408fe47349b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4053, "license_type": "no_license", "max_line_length": 135, "num_lines": 113, "path": "/CodeEditor/views.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom subprocess import Popen\nimport subprocess\nfrom termcolor import colored\nfrom .models import Code, Passed\nfrom django.contrib.auth.decorators import login_required\nfrom api.views import PROBLEMS\nfrom main.models import Profile\nfrom django.views.decorators.csrf import csrf_protect\n\nimport random\nimport string\n\nBASE_URL = \"https://algoexpertclone.herokuapp.com\"\n\ntokens = []\n\ndef random_str(digits=70):\n ans = \"\"\n for i in range(digits+1):\n ans += random.choice(string.ascii_letters + string.digits)\n return ans\n\n# Create your views here.\n@login_required(redirect_field_name='login')\ndef index(request, problem_id):\n try:\n PROBLEMS[problem_id-1]\n except IndexError:\n return HttpResponse(\"Problem doesn't exist\")\n if Profile.objects.get(user_id=request.user.id).paid == \"false\" and PROBLEMS[problem_id-1][\"paid\"] == \"true\":\n return HttpResponse(\"Purchase it before accessing this question!\")\n\n token = random_str()\n tokens.append(token)\n\n try:\n existing_code = Code.objects.get(user_id=request.user.id, problem_id=problem_id)\n except:\n return render(request, \"CodeEditor/index.html\", {\n \"title\": PROBLEMS[problem_id-1][\"title\"],\n \"description\": PROBLEMS[problem_id-1][\"description\"],\n \"id\": PROBLEMS[problem_id-1][\"id\"],\n \"starter\": PROBLEMS[problem_id-1][\"starter\"],\n \"token\": token\n })\n return render(request, \"CodeEditor/index.html\", {\n \"title\": PROBLEMS[problem_id-1][\"title\"],\n \"description\": PROBLEMS[problem_id-1][\"description\"],\n \"id\": PROBLEMS[problem_id-1][\"id\"],\n \"exist\": existing_code,\n \"token\": token\n })\n\n\n@login_required(redirect_field_name='login')\ndef run_code(request):\n code = request.GET[\"code\"]\n problem = int(request.GET[\"problem\"])\n try:\n existing_code = Code.objects.get(user_id=request.user.id, problem_id=problem)\n existing_code = Code.objects.filter(user_id=request.user.id, problem_id=problem).update(code=code)\n except:\n code_ = Code(user_id=request.user.id, problem_id=problem, code=code)\n code_.save()\n\n solution = open(\"solution.py\", mode=\"w\")\n solution.write(code)\n solution.close()\n if problem == 1:\n out = Popen([\"python3\", \"-m\", \"unittest\", \"-q\", \"test.NthFib\"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n elif problem == 2:\n out = Popen([\"python3\", \"-m\", \"unittest\", \"-q\", \"test.PalindromeChecker\"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout, stderr = out.communicate()\n data = str(stdout).split(\"======================================================================\")\n\n response = {\n \"error\": {}, # test_# : error message / passed,\n \"input\": {\"1\": 2, \"2\": 9, 3: \"random\", 4: \"random\", 5: \"random\", 6: \"random\", 7: \"random\"},\n \"stdout\": \"<br />\".join(str(stdout).split(\"\\\\n\"))\n }\n\n\n for i in range(1, len(data)):\n info = (\"<br />\".join(data[i].split(\"\\\\n\")).split(\"----------------------------------------------------------------------\")[1])\n number = int(info.split(\"test_\")[1][0])\n response[\"error\"][number] = info\n for i in range(1, 8):\n try:\n response[\"error\"][i]\n except KeyError:\n response[\"error\"][i] = \"passed\"\n\n update = False\n if b\"Error\" in stdout:\n for i in range(1, 8):\n if response[\"error\"][i] == \"passed\":\n response[\"error\"][i] = \"Possible Syntax-related Errors, recommend to see raw output for more information\"\n\n passed = True\n for i in response[\"error\"]:\n if response[\"error\"][i] != \"passed\":\n passed = False\n\n if passed:\n try:\n exist = Passed.objects.get(user_id=request.user.id, problem_id=problem)\n except:\n passed = Passed(user_id=request.user.id, problem_id=problem)\n passed.save()\n\n return JsonResponse(data=response)\n" }, { "alpha_fraction": 0.6648044586181641, "alphanum_fraction": 0.6648044586181641, "avg_line_length": 24.714284896850586, "blob_id": "f3a645a551d03930f4aea218f31ec40d890324ee", "content_id": "f7978bb26d8b7bd5d845d2c505b3b3b765a51f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 57, "num_lines": 7, "path": "/CodeEditor/urls.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"<int:problem_id>/\", views.index, name=\"index\"),\n path(\"compile/\", views.run_code, name=\"run_code\")\n]" }, { "alpha_fraction": 0.6660516858100891, "alphanum_fraction": 0.6660516858100891, "avg_line_length": 40.769229888916016, "blob_id": "876657a82c672ffbc191d0efc6a0386cde621ffd", "content_id": "185c11494451226ca76056622e81caf16448828d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 86, "num_lines": 13, "path": "/main/urls.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"info/<str:section>\", views.info, name=\"index\"),\n path(\"login/\", views.login_view, name=\"login\"),\n path(\"accounts/login/\", views.login_view, name=\"login_sys\"),\n path(\"github/authorized/\", views.github, name=\"github\"),\n path(\"logout/\", views.logout_view, name=\"logout\"),\n path(\"purchase/\", views.purchase, name=\"purchase\"),\n path(\"create-payment-intent\", views.create_payment, name=\"create-payment-intent\"),\n]" }, { "alpha_fraction": 0.8909090757369995, "alphanum_fraction": 0.8909090757369995, "avg_line_length": 8.333333015441895, "blob_id": "870f1b699f60857c6deab3738e1dacbfa553cdcb", "content_id": "f6b8cfbea9dbb047834225189e24c0f447b50232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 55, "license_type": "no_license", "max_line_length": 13, "num_lines": 6, "path": "/requirements.txt", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "Django\nrequests\ntermcolor\nstripe\ngunicorn\ndjango-heroku" }, { "alpha_fraction": 0.5857142806053162, "alphanum_fraction": 0.5928571224212646, "avg_line_length": 22.5, "blob_id": "ed03c4b5047e1878db16e96f7df3febf70979ad3", "content_id": "d8a88306c57e376d941e39200320b95f2562e2ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/solution.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "def palindrome(string):\n\tnew_str = \"\"\n\tfor i in string:\n\t\tif i.isalnum():\n\t\t\tnew_str += (i)\n\treturn new_str.lower()[::-1] == new_str.lower()" }, { "alpha_fraction": 0.6363263726234436, "alphanum_fraction": 0.6498565077781677, "avg_line_length": 54.45454406738281, "blob_id": "e74cb563f8de5d8839f0824db366366f3a3de917", "content_id": "fefa382c0657a71b37815760d36f89f6b1ca149f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2439, "license_type": "no_license", "max_line_length": 714, "num_lines": 44, "path": "/api/views.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponseRedirect, HttpRequest\nfrom django.urls import reverse\nfrom CodeEditor.models import Passed\nfrom main.models import Profile\n\n# Create your views here.\nPROBLEMS = [\n {\n \"id\": 1,\n \"title\": \"Nth Fibonacci Sequence\",\n \"description\": \"<p>Write a program to compute nth number of a <a href=\\\"https://en.wikipedia.org/wiki/Fibonacci_number\\\">fibonacci number sequence</a>.</p><br>Starting Numbers: <code>nth_fib(1) = 0</code>, <code>nth_fib(2) = 1</code><br><br><p>Example Input & Output: </p><ul><li><code>nth_fib(2) = 1</code></li><li><code>nth_fib(6) = 5</code></li></ul><br><h4>Note:</h4><p>If you see a RecursionError, make sure you try again, the maximum number for input is 15.</p> <br><br><h4>Video Walkthrough</h4><iframe width='500' height='315' src='https://www.youtube.com/embed/Hq13p2I5UbY' frameborder='0' allow='accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture' allowfullscreen></iframe>\",\n \"starter\": \"def nth_fib(n):\\n\\tpass\",\n \"paid\": \"false\"\n },\n {\n \"id\": 2,\n \"title\": \"Palindrome Checker\",\n \"description\": \"Return True if the string is a <a href='https://en.wikipedia.org/wiki/Palindrome'>palindromic string</a> (same as you read it backward as forward), given with following constraints: <ul><li>Only check the alphanumeric part of a string,</li><li>This is case insensitive, means that RaCeCar is a palindromic string.</li></ul> <br><br> <h4>Examples: </h4><ul><li><code>palindrome(\\\"race CAR\\\") = True</code></li><li><code>palindrome(\\\"2_A3*3#A2\\\") = True</code></li><li><code>palindrome(\\\"This is not palindrome string\\\") = False</code></li></ul>.\",\n \"starter\": \"def palindrome(n):\\n\\tpass\",\n \"paid\": \"true\"\n }\n]\n\n\ndef problems(request):\n pros = PROBLEMS.copy()\n for i in pros:\n try:\n Passed.objects.get(user_id=request.user.id, problem_id=i[\"id\"])\n i[\"complete\"] = \"True\"\n except Passed.DoesNotExist:\n i[\"complete\"] = \"False\"\n return JsonResponse(pros, safe=False)\n\n\ndef paid(request):\n try:\n request.META[\"HTTP_REFERER\"]\n Profile.objects.filter(user_id=request.user.id).update(paid=\"True\")\n request.session[\"paid\"] = \"True\"\n return JsonResponse(data={\"message\": \"updated\"})\n except:\n return JsonResponse(data={\"message\": \"invalid\"})" }, { "alpha_fraction": 0.6773840188980103, "alphanum_fraction": 0.6828644275665283, "avg_line_length": 34.10256576538086, "blob_id": "3e36eeb65f7ade79d2ef37f89aa812289798193b", "content_id": "997cbc609b334cb8bba36c2f6c52a00b49ca5ec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2737, "license_type": "no_license", "max_line_length": 195, "num_lines": 78, "path": "/main/views.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import Profile\nimport requests\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nimport stripe\n\n\nGITHUB_OAUTH_ID = \"9bbd97ddc2cda3583dcd\"\nGITHUB_OAUTH_SECRET = \"08bdac333b3c1fea74115e8551ffd96649a39890\"\nstripe.api_key = \"sk_test_z4lk3dEgEep4vHPlUqgV29fy00uhnUn8TF\"\n\n# Create your views here.\ndef index(request):\n return render(request, \"main/index.html\")\n\ndef info(request, section):\n if section != \"problem\":\n return HttpResponse(\"Hello World\")\n return HttpResponse(\"Hello Problem!\")\n\ndef login_view(request):\n return HttpResponseRedirect(f\"https://github.com/login/oauth/authorize?client_id={GITHUB_OAUTH_ID}\")\n\ndef github(request):\n code = request.GET[\"code\"]\n info = requests.post(f\"https://github.com/login/oauth/access_token?client_id={GITHUB_OAUTH_ID}&client_secret={GITHUB_OAUTH_SECRET}&code={code}\", headers={\"Accept\": \"application/json\"}).json()\n print(info)\n access_token = info[\"access_token\"]\n info = requests.get(\"https://api.github.com/user\", headers={\"Authorization\": f\"token {access_token}\"}).json()\n # print(info)\n # is user in the database\n try:\n user = (User.objects.get(username=info[\"login\"]))\n profile = Profile.objects.get(user=user)\n login(request, user)\n request.session[\"avator\"] = profile.avator\n request.session[\"paid\"] = profile.paid\n return HttpResponseRedirect(reverse(\"index\"))\n except User.DoesNotExist:\n user = User.objects.create_user(username=info[\"login\"], password=info[\"node_id\"], email=info[\"email\"])\n user.save()\n profile = Profile(user=user, paid=\"false\", avator=info[\"avatar_url\"])\n profile.save()\n login(request, user)\n request.session[\"avator\"] = profile.avator\n request.session[\"avator\"] = \"false\"\n return HttpResponseRedirect(reverse(\"index\"))\n\n return HttpResponse(str(info))\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\n@login_required(redirect_field_name='login')\ndef purchase(request):\n if Profile.objects.get(user_id=request.user.id).paid == \"True\":\n return HttpResponse(\"Well, I guess you don't have to pay twice.\")\n return render(request, \"main/purchase.html\")\n\ndef create_payment(request):\n try:\n intent = stripe.PaymentIntent.create(\n amount=9900,\n currency='usd'\n )\n\n return JsonResponse({\n 'clientSecret': intent['client_secret']\n })\n\n except Exception as e:\n return JsonResponse(error=str(e)), 403" }, { "alpha_fraction": 0.6909620761871338, "alphanum_fraction": 0.6909620761871338, "avg_line_length": 25.461538314819336, "blob_id": "4e89dcf3f9cc28c60ea004b1a860362cb7f4d1be", "content_id": "fadb033566e6c91598adf82f598d7afd7cc94d64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/CodeEditor/models.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Code(models.Model):\n # user_id, problem, code\n user_id = models.IntegerField()\n problem_id = models.IntegerField()\n code = models.TextField()\n\nclass Passed(models.Model):\n # user_id, problem\n user_id = models.IntegerField()\n problem_id = models.IntegerField()" }, { "alpha_fraction": 0.6725146174430847, "alphanum_fraction": 0.6725146174430847, "avg_line_length": 23.571428298950195, "blob_id": "9ed41e542b23ca49e7a850ce965ab7dd968ac8e1", "content_id": "157a5fdc4dab763a390c36859123f39e5fdb34c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 59, "num_lines": 7, "path": "/api/urls.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"problems/\", views.problems, name=\"problems_api\"),\n path(\"paid/\", views.paid, name=\"paid\"),\n]" }, { "alpha_fraction": 0.6962750554084778, "alphanum_fraction": 0.7077363729476929, "avg_line_length": 30.81818199157715, "blob_id": "74b9e5eebbdcc656117abf5a1d319d0534ca9091", "content_id": "49bb66ccf4829adc709fb2a38847ca93a2591c10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/main/models.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n paid = models.CharField(max_length=5)\n avator = models.CharField(max_length=255)\n\n def __str__(self):\n return f\"{self.user} - {self.paid}\"" }, { "alpha_fraction": 0.6376709938049316, "alphanum_fraction": 0.6552087068557739, "avg_line_length": 27.808080673217773, "blob_id": "8fa63b0776f7791d5c99e6388f245d6e483e76f2", "content_id": "12a7d7848874ccc157e887f4ab2fcbf50727299f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2851, "license_type": "no_license", "max_line_length": 192, "num_lines": 99, "path": "/test.py", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "import unittest\nfrom solution import *\nimport random\nimport string\n\n\ndef correct_fib(n):\n\t\"\"\"correct implementation for this test\"\"\"\n\tif n == 2:\n\t\treturn 1\n\telif n == 1:\n\t\treturn 0\n\telse:\n\t\treturn correct_fib(n-1) + correct_fib(n-2)\n\ndef correct_palindrome(string):\n\tnew_str = \"\"\n\tfor i in string:\n\t\tif i.isalnum():\n\t\t\tnew_str += (i)\n\treturn new_str.lower()[::-1] == new_str.lower()\n\ndef random_num(n):\n\treturn random.choice([i for i in range(n+1)])\n\ndef random_str(n):\n\tans = \"\"\n\tfor i in range(n):\n\t\tans += random.choice(string.ascii_letters)\n\treturn ans\n\nclass NthFib(unittest.TestCase):\n\tdef test_1(self):\n\t\t\"\"\"Test for fib when n = 2\"\"\"\n\t\tassert 1 == nth_fib(2)\n\tdef test_2(self):\n\t\t\"\"\"Test for fib when n = 6\"\"\"\n\t\tassert 5 == nth_fib(6)\n\tdef test_3(self):\n\t\tnum = random_num(15)\n\t\t\"\"\"random test 1, expected {num}, actual {nth_fib(num)}\"\"\"\n\t\tassert correct_fib(num) == nth_fib(num)\n\tdef test_4(self):\n\t\tnum = random_num(15)\n\t\t\"\"\"random test 2, expected {num}, actual {nth_fib(num)}\"\"\"\n\t\tassert correct_fib(num) == nth_fib(num)\n\tdef test_5(self):\n\t\tnum = random_num(15)\n\t\t\"\"\"random test 3, expected {num}, actual {nth_fib(num)}\"\"\"\n\t\tassert correct_fib(num) == nth_fib(num)\n\tdef test_6(self):\n\t\tnum = random_num(15)\n\t\t\"\"\"random test 4, expected {num}, actual {nth_fib(num)}\"\"\"\n\t\tassert correct_fib(num) == nth_fib(num)\n\tdef test_7(self):\n\t\tnum = random_num(15)\n\t\t\"\"\"random test 5, expected {num}, actual {nth_fib(num)}\"\"\"\n\t\tassert correct_fib(num) == nth_fib(num)\n\n# Testcase and question description are from: https://www.freecodecamp.org/learn/javascript-algorithms-and-data-structures/javascript-algorithms-and-data-structures-projects/palindrome-checker\nclass PalindromeChecker(unittest.TestCase):\n\tdef test_2(self):\n\t\t\"\"\"palindrome(\"eye\") should return true.\"\"\"\n\t\tassert palindrome(\"eye\") == True\n\n\tdef test_3(self):\n\t\t\"\"\"palindrome(\"_eye\") should return true.\"\"\"\n\t\tassert palindrome(\"_eye\") == True\n\n\tdef test_4(self):\n\t\t\"\"\"palindrome(\"race car\") should return true.\"\"\"\n\t\tassert palindrome(\"race car\") == True\n\n\tdef test_5(self):\n\t\t\"\"\"palindrome(\"not a palindrome\") should return false.\"\"\"\n\t\tassert palindrome(\"not a palindrome\") == False\n\n\tdef test_6(self):\n\t\t\"\"\"palindrome(\"A man, a plan, a canal. Panama\") should return true.\"\"\"\n\t\tassert palindrome(\"A man, a plan, a canal. Panama\") == True\n\n\tdef test_7(self):\n\t\t\"\"\"palindrome(\"never odd or even\") should return true.\"\"\"\n\t\tassert palindrome(\"never odd or even\") == True\n\n\tdef test_8(self):\n\t\t\"\"\"palindrome(\"nope\") should return false.\"\"\"\n\t\tassert palindrome(\"nope\") == False\n\n\tdef test_9(self):\n\t\t\"\"\"palindrome(\"almostomla\") should return false.\"\"\"\n\t\tassert palindrome(\"almostomla\") == False\n\n\tdef test_10(self):\n\t\t\"\"\"palindrome(\"My age is 0, 0 si ega ym.\") should return true.\"\"\"\n\t\tassert palindrome(\"My age is 0, 0 si ega ym.\") == True\n\n\"\"\"if __name__ == \"__main__\":\n\tunittest.main()\"\"\"" }, { "alpha_fraction": 0.7785466909408569, "alphanum_fraction": 0.7785466909408569, "avg_line_length": 47.33333206176758, "blob_id": "206c453928f6e236e5548bf64c5d96c633d54938", "content_id": "37b7dd5f0f67d72c21cc9f285a76b6025108ba5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 289, "license_type": "no_license", "max_line_length": 235, "num_lines": 6, "path": "/README.md", "repo_name": "DevWithMe/AlgoExpert-Clone", "src_encoding": "UTF-8", "text": "# AlgoExpert Clone\n\nThis is an AlgoExpert Clone, you can visit the real AlgoExpert at https://algoexpert.io. This is my attempt to create a clone similar to that site. Later this project may move to this new Repository. But you can still access this URL. \n\n# Files\nThis is a Django Project" } ]
12
doanuoc99/b-i-t-p
https://github.com/doanuoc99/b-i-t-p
f57ab33111fef7e1e127b249f18a2aa7901e84d2
457f3ea961e8ab161d7846fdd44dec914a3be99d
116076e5b312bcd9894173b88b8a4bfa7ec39db6
refs/heads/master
2020-04-27T17:19:10.292366
2019-04-21T14:00:10
2019-04-21T14:00:10
174,513,295
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.7441860437393188, "avg_line_length": 19.5, "blob_id": "d5145f3c16343e1a42490e4cdf14626a498211c6", "content_id": "fb714ec6af3bea47faa83341591a7c0e3fbed227", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/số thập phân ngẫu nhiên.py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "import random\r\nprint(random.random()*100)\r\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6547619104385376, "avg_line_length": 40, "blob_id": "982663a1f7fd2c07b3929be517238a67d737c660", "content_id": "d2302dc6fe6e3bb8164895918465f919cddf0341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 67, "num_lines": 2, "path": "/module chia hết cho 5, 7.py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "import random\r\nprint (random.choice([i for i in range(201) if i%5==0 and i%7==0]))\r\n" }, { "alpha_fraction": 0.649350643157959, "alphanum_fraction": 0.649350643157959, "avg_line_length": 13.800000190734863, "blob_id": "7d613eab147f94641208d31d440209005fe3d938", "content_id": "f79308ab7c5c8a22ec1141d002f7cf575b0cdb26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/my_string.py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "my_string=\"doan van phuc uoc\"\r\nl=len(my_string)\r\nprint(l)\r\nh=my_string.upper()\r\nprint(h)" }, { "alpha_fraction": 0.5194805264472961, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 37.5, "blob_id": "d77371fed38cdc425293d7ddb85a7bf7bacfcc7a", "content_id": "9b9c057ad74a10bf4a89b9f8cc85f7ce68d8f5b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 55, "num_lines": 2, "path": "/hiển thị đơn giản trong python.py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "print(\"hello,world\")\r\nprint(\"Đoàn Văn Phúc Ước,Mssv: 1755251030100031, ngành CNKT Đ-ĐT\")" }, { "alpha_fraction": 0.4794520437717438, "alphanum_fraction": 0.4794520437717438, "avg_line_length": 13, "blob_id": "eb6832800e038bf67c062260132ae8f898add54e", "content_id": "aeef8947926e69614d3ce9beda1aa1d5e200c8c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 73, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/hàm in hoa.py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "def inhoa(s):\r\n l=[]\r\n l=s.upper()\r\n print(l)\r\ninhoa(\"doan uoc\")" }, { "alpha_fraction": 0.4868420958518982, "alphanum_fraction": 0.5, "avg_line_length": 13.600000381469727, "blob_id": "694d1e1f0641140354ae36f232c981fdde43719e", "content_id": "e11d3aa915b372d6b519ed5fd1cec855c4d458b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/dictionary (i,i^2).py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "n=int(input('nhập n: '))\r\nd={}\r\nfor i in range(1,n):\r\n d[i]=i*i\r\nprint(d)" }, { "alpha_fraction": 0.5147929191589355, "alphanum_fraction": 0.5147929191589355, "avg_line_length": 19.375, "blob_id": "4ce601971db15bff4aa7974e9249e2ab94eabccb", "content_id": "1a43612763e05c98d2cc1adccd2fb33e0a7f52ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/ngoại lệ a chia b.py", "repo_name": "doanuoc99/b-i-t-p", "src_encoding": "UTF-8", "text": "try:\r\n a=int(input('nhập a: '))\r\n b=int(input('nhập b: '))\r\n kq=a/b\r\nexcept Exception as ex:\r\n print('lỗi '+str(ex))\r\nelse:\r\n print('kq của a/b là:, a/b')" } ]
7
JECINTA534521/News-Highligts-project
https://github.com/JECINTA534521/News-Highligts-project
5fb99cc42ee667ff837bb0e7d6b2382564ea09de
00634f8eb0fc972d28f1ec911b83fbdea838ebd5
18e8b5b87b18ff38bb72f96751c6798cf840e19a
refs/heads/master
2021-06-25T06:31:39.999820
2019-10-17T12:51:13
2019-10-17T12:51:13
214,462,327
0
0
null
2019-10-11T14:52:33
2019-10-17T12:53:14
2021-03-20T01:55:17
Python
[ { "alpha_fraction": 0.5977011322975159, "alphanum_fraction": 0.8045976758003235, "avg_line_length": 42, "blob_id": "b4b050286fbc0bda3885e8021721d5f0346f5832", "content_id": "998e90a25b561e582e3727275f6220959f4b9213", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 87, "license_type": "permissive", "max_line_length": 56, "num_lines": 2, "path": "/start.sh", "repo_name": "JECINTA534521/News-Highligts-project", "src_encoding": "UTF-8", "text": " export NEWS_API_KEY = '40a81ce19baa427bb0bfecd3e66b647e'\n python3.6 manage.py server" }, { "alpha_fraction": 0.7494744062423706, "alphanum_fraction": 0.7592852115631104, "avg_line_length": 27.808080673217773, "blob_id": "f21ef8055b9bebabfb77087fb176926eda37205e", "content_id": "3f0ebb88f0902fb601ba6de1e34558a199e5520f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2854, "license_type": "permissive", "max_line_length": 112, "num_lines": 99, "path": "/README.md", "repo_name": "JECINTA534521/News-Highligts-project", "src_encoding": "UTF-8", "text": "# News Highlights Project\n\n## Description\nAn application that allows users to get news articles form multiple news sources online.\n\n#### Link to deployed site\n\n\n## Table of content\n1. [Description](#description)\n2. [Setup and installations](#setup-and-installations)\n3. [Deployment](#deployment)\n4. [Contributing](#contributing)\n5. [Bugs](#bugs)\n6. [Licensing](#license)\n\n\n## Setup and installations\n\n#### Prerequisites\n1. Python 3.6\n2. Pip\n3. virtualenv\n\n\n## Technologies Used\n* Python 3.6.5\n* Flask Framework\n* HTML/CSS\n* JavaScript\n\n#### Clone the Repo and checkout into the project folder.\n```bash\ngit clone https://github.com/JECINTA534521/News-Highligts-project && cd News-highlights\n```\n\n#### Setting up environment variables\nCreate a file to start the application. `touch start.sh` \nInside the start file input the environment variables and start command below.\n```\nexport NEWS_API_KEY=<Get an API KEY from newsapi.com >\n\n\npython3.6 manage.py server\n\n```\n\n#### Create and activate the virtual environment\n```bash\npython3.6 -m virtualenv env\n```\n\n```bash\nsource env/bin/activate\n```\n\n#### Install dependencies\nWhile in the activated virtual environment, install dependencies needed to run the application.\n```bash\n(env)$ pip install -r requirements.txt\n```\n\n#### Run the app\nWhile in the activated virtual environment export environment variables and run the app with the commands below.\n\n```bash\n(env)$ export NEWS_API_KEY=<Your api key>\n(env)$ export SECRET_KEY=<Your secret key>\n(env)$ ./start.sh\n```\n## Deployment\nThis is the live link to the project: https://news-highlights-project.herokuapp.com/\n## Contributing\nFeel free to contribute to this repository to make pull requests.\n\n## Bugs\nNo known bugs if a bug is found feel free to contact me at: [email protected]\n\n## [LICENSE](LICENSE)\n\nCopyright (c) [2019] [JECINTA WANJIRU]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\n" }, { "alpha_fraction": 0.6402714848518372, "alphanum_fraction": 0.6447963714599609, "avg_line_length": 17.41666603088379, "blob_id": "f142d5479cb285a528c34d3b67b4646977858c50", "content_id": "e81d8bef6eb4fe784322385d4d786e8e721211ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "permissive", "max_line_length": 85, "num_lines": 24, "path": "/config.py", "repo_name": "JECINTA534521/News-Highligts-project", "src_encoding": "UTF-8", "text": "import os\n\n\nclass Config:\n '''\n General configuration parent class\n '''\n\n NEWS_API_KEY = os.environ.get('NEWS_API_KEY')\n NEWS_API_BASE_URL = 'https://newsapi.org/v1/{}?apiKey={}'\n NEWS_ARTICLES_BASE_URL = 'https://newsapi.org/v2/everything?sources={}&apiKey={}'\n\nclass ProdConfig(Config):\n pass\n\n\nclass DevConfig(Config):\n DEBUG = True\n\n\nconfig_options = {\n 'development': DevConfig,\n 'production': ProdConfig\n}\n" }, { "alpha_fraction": 0.47757256031036377, "alphanum_fraction": 0.6833773255348206, "avg_line_length": 15.52173900604248, "blob_id": "c978cdb2fd90675c3766c07ed4a27ddbb5f84baf", "content_id": "091ef3726de6f0ffd845b4313989cd76c05bf645", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 379, "license_type": "permissive", "max_line_length": 24, "num_lines": 23, "path": "/requirements.txt", "repo_name": "JECINTA534521/News-Highligts-project", "src_encoding": "UTF-8", "text": "astroid==2.3.1\nautopep8==1.4.4\nClick==7.0\ndominate==2.4.0\nFlask==1.1.1\nFlask-Bootstrap==3.3.7.1\nFlask-Script==2.0.6\nFlask-WTF==0.14.2\ngunicorn==19.9.0\nisort==4.3.21\nitsdangerous==1.1.0\nJinja2==2.10.3\nlazy-object-proxy==1.4.2\nMarkupSafe==1.1.1\nmccabe==0.6.1\npycodestyle==2.5.0\npylint==2.4.2\nsix==1.12.0\ntyped-ast==1.4.0\nvisitor==0.1.3\nWerkzeug==0.16.0\nwrapt==1.11.2\nWTForms==2.2.1" } ]
4
jgomo3/leopy
https://github.com/jgomo3/leopy
4b99575cf89c62725e1e9f4df3bbbc0df47c7c97
eedeead166594223b0292c0e3c202681688360a1
247dfa6d19a1b077a004619ef585c141467167e4
refs/heads/master
2021-01-11T09:40:02.984344
2016-12-17T01:21:06
2016-12-17T01:21:06
77,494,379
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4827713966369629, "alphanum_fraction": 0.49648016691207886, "avg_line_length": 27.723403930664062, "blob_id": "a47c2b901b24d8a89661180adacbebb64e7ee63c", "content_id": "fa7138a92f03a74d8f64b2908f381f479ff48730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2699, "license_type": "no_license", "max_line_length": 120, "num_lines": 94, "path": "/rain.py", "repo_name": "jgomo3/leopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport math\nimport random\nimport tkinter as tk\n\nclass Drop():\n\n def __init__(self, canvas, left, top, depth, color, bottom=400, right=400):\n self.__canvas = canvas\n self.__left = left\n self.__top = top\n self.__depth = depth\n self.__color = color\n self.__bottom = bottom\n self.__right = right\n\n self.__initial_vx = 0\n self.__initial_vy = 5\n self.__initial_height = 5\n self.__initial_width = 1\n self.__initial_depth = 0\n\n self.__scale()\n\n self.__id = canvas.create_line(\n self.__left,\n self.__top,\n self.__left,\n self.__top + self.__height,\n width=self.__width,\n fill=self.__color\n )\n\n def __scale(self):\n delta = self.__depth - self.__initial_depth\n\n # Affected\n factor = (delta - 5)**2 / 10\n self.__height = factor * self.__initial_height\n self.__vy = factor * self.__initial_vy\n self.__width = factor * self.__initial_width\n\n # Constants -> 1\n self.__vx = 1 * self.__initial_vx\n\n def move(self):\n if self.__canvas.coords(self.__id)[1] > self.__bottom:\n self.__canvas.coords(self.__id, self.__left, 0, self.__left, self.__height)\n else:\n self.__canvas.move(self.__id, self.__vx, self.__vy)\n\nclass App():\n\n def __init__(self, master, width=400, height=400, max_depth=5, population=50):\n self.__master = master\n self.__width = width\n self.__height = height\n self.__max_depth = max_depth\n self.__population = population\n self.__canvas = tk.Canvas(\n self.__master,\n width=self.__width,\n height=self.__height)\n self.__objects = self.__load_objects(canvas=self.__canvas)\n self.__canvas.pack()\n self.__master.after(0, self.__animation)\n\n def __animation(self):\n for _object in self.__objects:\n _object.move()\n self.__master.after(120, self.__animation)\n\n def __load_objects(self, canvas):\n return [\n Drop(canvas=canvas, left=left, top=top, depth=depth, color='blue', bottom=self.__height, right=self.__width)\n for left, top, depth in\n (\n (\n random.randrange(0, upper)\n for upper in (\n self.__width,\n self.__height,\n self.__max_depth\n )\n )\n for _ in range(self.__population)\n )\n ]\n\n\nroot = tk.Tk()\napp = App(master=root, population=100)\nroot.mainloop()" } ]
1
litt1eseven/python-project
https://github.com/litt1eseven/python-project
0364f9bf42ef58f1ed34c7e0b2aa5af5b5c46021
882eef5cc489dc723800426af003c07aba04dd50
33ea64736fde843d76638fde32c6236fcf99a4ec
refs/heads/master
2019-05-25T22:11:00.248331
2019-01-19T09:44:26
2019-01-19T09:44:26
85,579,486
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.546012282371521, "alphanum_fraction": 0.6134969592094421, "avg_line_length": 17.22222137451172, "blob_id": "e1b42c85645ef19dbd27540d2a68e48da9152a23", "content_id": "a9d9caa002d0bd49746984a119f8e1bcfba77aff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 58, "num_lines": 9, "path": "/shiyanlou-flask/simpledu/handlers/course.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/11/4 上午10:37'\n\nfrom flask import Blueprint\n\ncourse = Blueprint('course',__name__,url_prefix='/course')" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 10, "blob_id": "436878c322a9171dff7f925487e8f49a19a4cc9b", "content_id": "1a1ec873549a9ce5828625d2384cd434194dcf30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "no_license", "max_line_length": 25, "num_lines": 11, "path": "/Company-project/python-code/moudle1/c10.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\n替换字符串\n\"\"\"\nimport re\n\na = 'PythonC#PHP'\n# 1 最多替换1个,默认全部\nr = re.sub('C#','GO',a,1)\n\nprint(r)" }, { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 19.264705657958984, "blob_id": "9ab5643a3ef163293f7ba3fd3f252ea606bcd60e", "content_id": "be7775e8f9ffa696ebca9fac8d1b2076cc12fee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 735, "license_type": "no_license", "max_line_length": 84, "num_lines": 34, "path": "/Company-project/docker-python3/Dockerfile", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# 引入基础镜像\nFROM alpine:latest\n\n# 作者信息\nMAINTAINER little seven <https://www.soo9s.com>\n\n# 使用中国安装源\nRUN echo http://mirrors.ustc.edu.cn/alpine/v3.6/main > /etc/apk/repositories; \\\n echo http://mirrors.ustc.edu.cn/alpine/v3.6/community >> /etc/apk/repositories \\\n && mkdir /run/nginx\n\n# 安装相关依赖\nRUN apk update \\\n && apk upgrade \\\n && apk add --no-cache \\\n wget \\\n curl \\\n gcc \\\n g++ \\\n nginx \\\n mysql-client \\\n py-mysqldb \\\n libc-dev \\\n libffi-dev \\\n openssl-dev \\\n py-curl \\\n libxml2-dev \\\n libxslt \\\n libxslt-dev \\\n bash \\\n vim \\\n linux-headers \\\n pcre-dev \\\n python3\n" }, { "alpha_fraction": 0.540645182132721, "alphanum_fraction": 0.6154838800430298, "avg_line_length": 24.032258987426758, "blob_id": "4fb21d4c5f133bac25ce3d7b9086d275b6dd912e", "content_id": "6c40cbc0917caf495f7ad6c1fc905ffe9a88ed0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 789, "license_type": "no_license", "max_line_length": 152, "num_lines": 31, "path": "/Company-project/BeautifulGril/spider.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '4/21/17 3:04 PM'\n\nimport requests,urllib\n\nfrom lxml import etree\n\n\ndef get_image(url):\n\n headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'}\n\n req = requests.get(url,headers=headers).text\n\n res = etree.HTML(req)\n\n links = res.xpath('//img/@data-original')\n\n for link in links:\n\n imgName = link.split('/')[-1]\n print u\"\\033[95m正在下载{link}的图片\\033[0m\".format(link=link)\n urllib.urlretrieve(link,'pic/{}'.format(imgName))\n\nif __name__ == '__main__':\n for i in range(1,52):\n url = 'https://www.zhihu.com/collection/78172986?page={page}'.format(page=str(i))\n get_image(url)" }, { "alpha_fraction": 0.6582278609275818, "alphanum_fraction": 0.6835442781448364, "avg_line_length": 14.800000190734863, "blob_id": "d3aee12260c39ed25a94056d9ec2034bd0e19d45", "content_id": "2dee3179b5561a6adc6367ca6d5a970200843d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 109, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/Company-project/docker-python3/README.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "**Docker部署python3项目,打包一个超级小的镜像**\n\n- `cd docker-python3`\n\n- `docker build -t .`\n" }, { "alpha_fraction": 0.4354838728904724, "alphanum_fraction": 0.5483871102333069, "avg_line_length": 9.416666984558105, "blob_id": "01d78fa596996a49072095371ae14831d9909f6e", "content_id": "2ff10ce91555857dcfbca18ef7c93f4a3355d305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 31, "num_lines": 12, "path": "/Company-project/python-code/moudle1/c8.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\n边界符匹配\n\"\"\"\nimport re\n\nqq = '5663946'\n\n# 匹配符合5-10位的QQ号\nr = re.findall('^\\d{5,10}$',qq)\n\nprint(r)" }, { "alpha_fraction": 0.7686874866485596, "alphanum_fraction": 0.7916421294212341, "avg_line_length": 19, "blob_id": "27b63563de694634a446a3c52212ed8558f12120", "content_id": "21a1da07e038e0f4b1b2bbb99d312c3bb1ca96bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3641, "license_type": "no_license", "max_line_length": 192, "num_lines": 85, "path": "/Company-project/python简历.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# 联系方式\n\n- 手机:132\n- Email:[email protected]\n- QQ/微信号:-\n\n---\n\n# 个人信息\n\n - 徐家琪/男/1995 \n - 本科/南通大学计算机系 \n - 工作年限:2年\n - 技术博客:https://www.soo9s.com (主要记录学习的东西)\n - Github:https://github.com/litt1eseven/\n\n---\n\n# 工作经历\n\n## 南京商数信息技术有限公司 ( 2016年12月 ~ 2017年7月 )\n\n### 政府网站数据挖掘项目\n(实习期间)\n我在此项目负责了文章采集,我最困难的问题是遇到一些滑动验证的验证码或类似的反爬措施,我首先采取了手动验证的方式,先打断点然后手动的去滑动,最后实现功能,后面我就寻找一些打码平台提供的接口去实现复杂的验证方式。这个项目中,我最自豪的技术细节是学习到了深度拷贝,因为在之前,采集的数据就算是做了去重,还是会导致数据重复的添加,使用深度拷贝顺利解决问题。\n\n### 网站监控项目\n维护了一个Django的项目(4个月),使用Redis和ElasticSearch配合Django实现对电力数据进行实时存储和历史查看的功能。\n遇到的难题:\nes语法不熟悉 不知道如何整合到项目中\n解决问题的过程:\n在各大搜索引擎搜索关于任何关于es的文章,并且尝试在代码中去实现es的增删改查,后面了解到了ElasticSearch-dsl这个强大的第三方,使用它解决了项目中的需求。\n\nRedis遇到的难题:\n在存储中文的时候,存储进去就是乱码。在程序中怎么转换都不能正常存储。\n解决问题的过程:\n将要写入的数据先通过base64进行加密,然后在取出数据的时候在进行解密,问题解决。\n\n\n我在此项目负责监管各大主流平台(Ali/JD..)的数据分析,数据包的更新,\n使用NLP技术采集医学方面的语料库,然后整理归档。\n\n### 其他项目\n\n其余的项目和时间是给数据做简单的清洗工作,最多的学习还是对自然语言处理方向的研究。\n\n \n## 盐城国信优易数据有限公司 ( 2017年7月 ~ 2018年6月 )\n\n### 大数据平台项目 \n我在此项目负责了对整个项目的管理(使用Flask),主要负责整个项目的代码重构和优化,在空闲时间研究ANT-V,给整个项目整合一个大数据可视化展示的功能,这个项目中,我最自豪的技术细节是不断的重构代码优化代码,学习到了一个思想宁可定义复杂也不要调用复杂,优化最明显的效果就是重复性很多的代码少了很多,也能进一步感受到API的优秀强大之处。项目是长期的项目,目前一直在维护和代码重构中。\n\n\n### 其他兴趣\n\n- 目前想更深入学习Flask,所以在尝试读Flask的源代码(效果不佳),也在看Miguel的博客。\n\n- 在使用ANT-V写一些小的数据可视化的demo,也在学习VUE的一些相关知识。\n\n- 其余时间帮助数据采集组的同事解决一些些爬虫代码中遇到的一些问题。\n\n\n---\n\n## 开源项目\n\n - [Ycyy-bigdata](https://github.com/litt1eseven/Ycyy):项目初期的成果,目前在重构优化中。\n \n - [jobplus](https://github.com/litt1eseven/jobplus):当时\n 在其余平台学习后协同开发的项目\n\n - [python-project](https://github.com/litt1eseven/python-project/tree/master/Company%20project):一些学习的代码,爬虫和web相关的\n\n\n# 技能清单\n\n以下均为我熟练使用的技能\n\n- Web开发:Python/PHP\n- Web框架:Flask/Django/ThinkPHP\n- 前端框架:Bootstrap/vue\n\n- 数据库相关:MySQL/MongoDB/Redis/ElasticSearch\n- 版本管理:Git/Svn\n- 引擎容器: Docker" }, { "alpha_fraction": 0.5191308259963989, "alphanum_fraction": 0.5304676294326782, "avg_line_length": 28.41666603088379, "blob_id": "ccbc95090b1121a092ad83713b302b29a2645779", "content_id": "d92b33080b1970e7b9f02be78f1f40785e7c267e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2333, "license_type": "no_license", "max_line_length": 112, "num_lines": 72, "path": "/Company-project/meizitu-2/meizitu/spiders/myspider.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\nimport requests\nfrom meizitu.mongodb import TMongo\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor ##配合Rule进行URL规则匹配\nfrom scrapy.selector import Selector\nfrom meizitu.items import MeizituItem\n\n\nclass Myspider(CrawlSpider):\n name = 'meizitu'\n allowed_domains = ['meizitu.com']\n start_urls = [\n 'http://meizit.com/home/index/index?', # 遍历所有分页\n 'http://meizit.com/one/' # 遍历所有文章页\n ]\n\n rules = (\n Rule(LxmlLinkExtractor(allow=(r'p=([0-9]+)'))),\n Rule(LxmlLinkExtractor(allow=(r'\\.html')),callback='parse_item'),\n )\n\n def parse_item(self,response):\n mongo = TMongo()\n\n '''给获取到的url连接添加一个状态码,0未获取,200获取,200的将不再读取内容'''\n post = {\n 'url':response.url,\n 'code':0 # 未读取\n }\n\n '''如果获取到的连接不存在数据库,则存入'''\n mzturl = mongo.get_more()\n cz = mongo.get_info(response.url)\n if cz == 0:\n mongo.add_one(post)\n for meizi in mzturl:\n '''链接未获取,处理链接获取内容'''\n if meizi[\"code\"] == 0:\n mei = requests.get(meizi[\"url\"])\n yield self.getCon(mei,meizi[\"_id\"])\n\n def getCon(self,response,oid):\n mongo = TMongo()\n item = MeizituItem()\n res = Selector(response)\n '''文章链接'''\n item[\"url\"] = response.url\n\n re = requests.get(response.url)\n\n '''请求的连接返回200将状态码更新'''\n if re.status_code == 200:\n mongo.update(oid=oid)\n\n\n pic = res.xpath('/html/body/div[1]/div[1]/article/div/img/@src').extract()\n post = {\n 'pic':pic\n }\n res_count = mongo.get_count(post)\n if res_count == 0:\n\n '''标题'''\n title = res.xpath('/html/body/div[1]/div[1]/article/header/a/text()').extract()\n item[\"title\"] = \"\".join(title).replace('\\r\\n','').replace('\\t','').replace(' ','').replace('?','').\\\n replace('!','').replace('?','')\n\n '''图片'''\n pics = [pics for pics in pic]\n item[\"pic\"] = pics\n return item" }, { "alpha_fraction": 0.6465116143226624, "alphanum_fraction": 0.6720930337905884, "avg_line_length": 24.352941513061523, "blob_id": "3bb9d96ebbd079c780ff304b7d62da3528638a82", "content_id": "f590c9b3a4c1e94341f26330406a96c288344016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 65, "num_lines": 17, "path": "/shiyanlou-flask/simpledu/handlers/user.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/11/4 上午10:37'\n\nfrom flask import Blueprint,render_template\nfrom simpledu.models import User,Course\n\nuser = Blueprint('user', __name__)\n\n\[email protected]('/user/<username>', methods=[\"GET\", \"POST\"])\ndef index(username):\n users = User.query.filter_by(name=username).first()\n course = Course.query.all()\n return render_template(\"user.html\",users=users,course=course)" }, { "alpha_fraction": 0.5941176414489746, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 12.15384578704834, "blob_id": "9e2b12859452ca7a2e0fa909acebaf6cb01602b5", "content_id": "a4c77a139a334efe8db6492757ba0ad562d942ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/Company-project/python-code/moudle1/c9.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\n忽略大小写\n多个参数使用管道命令\nre.findall('(python)',a,re.I | re.S)\n\"\"\"\nimport re\n\na = 'python Python PyThon'\n\nr = re.findall('(Python)',a,re.I)\n\nprint(r)" }, { "alpha_fraction": 0.5258427262306213, "alphanum_fraction": 0.5348314642906189, "avg_line_length": 19.227272033691406, "blob_id": "c8f65051c4efd894619253fbdd3edbb8acbb9233", "content_id": "8fb8a8605fbfbc9ff2e5135c41ac6d9524ecfd10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 527, "license_type": "no_license", "max_line_length": 43, "num_lines": 22, "path": "/Company-project/go/gogogo.go", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "package main \n\nimport \"fmt\"\n\nfunc main() {\n var first_name string = \"学习\"\n var last_name string = \"go 语言\" // 必须双引号\n fmt.Println(first_name+last_name)\n // `:=` 语句是申明并初始化变量的简写,例如\n // 这个例子中的 `var f string = \"short\"`。\n f := \"short\"\n fmt.Println(f)\n}\nfunc plus() {\n if num := 9; num < 0 {\n fmt.Println(num,\"数字大了\")\n }else if num < 10 {\n fmt.Println(num,\"数字小了\")\n }else{\n fmt.Println(num,\"end\")\n }\n}\n" }, { "alpha_fraction": 0.7395023107528687, "alphanum_fraction": 0.7480559945106506, "avg_line_length": 14.876543045043945, "blob_id": "91a42fa0c6a54a1d3183530e0ecdb15617058b70", "content_id": "3b6d4cd3c82b9892329fd76a8d11e510d503f5e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2090, "license_type": "no_license", "max_line_length": 80, "num_lines": 81, "path": "/.gtihub/issues_template.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "> 提问前确保你做过了的\n\n```\n当遇到了Flask问题/Python问题时,请遵循以下的步骤尝试解决:\n\n检查代码是否有语法错误。\n查看相应的源码和文档。\n使用Google/StackOverflow搜索\n在github中查找过类似功能,或者项目\n\ngithub 检索方法 stars:>10000 python\n\n```\n\n---\n> 提问的技巧\n\n```\n如果问题主要涉及扩展,请在标题内包含扩展名称\n最小化的、完整的、可复现相关问题的代码\n期望效果\n实际效果(完整的错误堆栈信息)\n你的操作步骤和尝试过的解决办法\n操作系统和语言、库等的版本\n最后在代码仓库中 提交 issue,内容格式和上面一致\n```\n\n```\n例如:\n\n**期望的行为**\n\n期望在网站运行的同时,每隔一段时间运行一个价格检测程序(即flushpirce),然后发送邮件。\n\n# -*- coding=utf-8 -*-\nfrom app import create_app, db\nfrom flask_script import Manager, Shell\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom app.admin.views import flushprice\n\napp = create_app('default')\n\nsched = BackgroundScheduler()\nsched.add_job(flushprice, 'interval', seconds=1) //flushprice是定义在admin.view下的程序\nsched.start()\n\nif __name__ == '__main__':\n manager.run()\n \n**实际的行为**\n\n程序运行后,只有访问网站时才会触发flushprice的运行,如果不对网站进行访问就不会运行。\n\n**环境版本**\n\n操作系统:Mac\nPython:3.7\nFlask:apscheduler(非flask-apscheduler)\n如果问题和扩展相关,请一并写出扩展的版本。\n\n```\n> 可以先看那些awesome\n\n```\n建议观看: https://github.com/Nyloner/Nyspider # 关于爬虫\n\n可以以后看: https://github.com/vinta/awesome-python # 所有优秀的开源项目\n```\n\n> 前期准备\n\n```\npip # python开发者必用的包管理工具.\nvirtualenv # 虚拟环境,固然强大,但是太臃肿.\npipenv # Python官方推荐的虚拟环境管理工具,requests作者的产物.\n\npip install -i https://pypi.douban.com/simple pipenv 即可安装pipenv\n\n\npython version 3.7 # 稳定,功能强大.\n```\n" }, { "alpha_fraction": 0.6994535326957703, "alphanum_fraction": 0.750455379486084, "avg_line_length": 25.80487823486328, "blob_id": "f9a7e70aa8cb22eb7ad1d3db792fcd592bff9dec", "content_id": "9fbcf393edd36303c287c4c0dccd9cea08dd4228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1190, "license_type": "no_license", "max_line_length": 114, "num_lines": 41, "path": "/Company-project/docker-tomcat/Dockerfile", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# 引入基础镜像\nFROM ubuntu:16.04\n# 运行shell脚本,更换安装源\nCOPY run.sh /\nRUN apt-get install bash -y\nRUN /bin/sh run.sh\n\n# 安装必要软件\nRUN apt-get update && apt-get install vim -y\n\n# 安装jdk\nRUN mkdir /home/java\nADD jdk1.8.0_151/ /home/java/jdk1.8.0_151\n\n# 安装maven\nRUN mkdir /var/tmp/maven\nADD apache-maven-3.5.2/ /var/tmp/maven\n\n# 安装tomcat\nRUN mkdir /usr/local/tomcat\nADD apache-tomcat-8.5.23/ /usr/local/tomcat\n\n# 配置环境变量\nENV MAVEN_HOME=/var/tmp/maven\nENV PATH=${PATH}:${MAVEN_HOME}/bin\nENV JAVA_HOME=/home/java/jdk1.8.0_151\nENV JRE_HOME=/home/java/jdk1.8.0_151/jre\nENV PATH=$JAVA_HOME/bin/:$JAVA_HOME=/jre/bin:$PATH:$HOME/bin\n# 创建日志文件\nRUN touch /usr/local/tomcat/bin/run.log\n\n# 部署项目环境\nADD freshstart-iec104/server.xml /usr/local/tomcat/conf/\nADD freshstart-iec104 /usr/local/tomcat/webapps/freshstart-iec104\nWORKDIR /usr/local/tomcat/webapps/freshstart-iec104/\nRUN mvn clean package\nRUN cd /usr/local/tomcat/webapps/freshstart-iec104/target/ && mv freshstart-iec104.war /usr/local/tomcat/webapps/\nRUN cd /usr/\nWORKDIR /usr/local/tomcat/webapps/\nRUN rm -rf freshstart-iec104/\nCMD /usr/local/tomcat/bin/catalina.sh run" }, { "alpha_fraction": 0.3471502661705017, "alphanum_fraction": 0.4715026021003723, "avg_line_length": 12.785714149475098, "blob_id": "5ecbbe626b0ba68a581e97a79955c2e247b240da", "content_id": "80d1bb527236c2ce936e8a0578d62a394242f556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 34, "num_lines": 14, "path": "/Company-project/python-code/moudle1/c1.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\n\"\"\"\n@ Created by Seven on 2018/06/19 \n\"\"\"\n# for i in range(1, 10, 2):\n# print(i, end=',')\n\n# 代替for循环\n\na = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nb = a[0:len(a):2]\n\nprint(b)\n" }, { "alpha_fraction": 0.8222923278808594, "alphanum_fraction": 0.8222923278808594, "avg_line_length": 72.15384674072266, "blob_id": "a99369d3480f266249fec26f0bd8666022ecddf8", "content_id": "01c52cb9f89aec07ec36548012b22d1ac9c42540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 951, "license_type": "no_license", "max_line_length": 94, "num_lines": 13, "path": "/Company-project/docker-tomcat/run.sh", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#/bin/bash\ncat <<EOF > /etc/apt/sources.list\ndeb http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse\nEOF\n" }, { "alpha_fraction": 0.48739495873451233, "alphanum_fraction": 0.5546218752861023, "avg_line_length": 9.909090995788574, "blob_id": "e5c00a569350e17424049dae68c8631a0f91611c", "content_id": "9d8651f354887c447e202159e1d5af34d01048a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 30, "num_lines": 11, "path": "/Company-project/python-code/moudle1/c5.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\n数量词\n\"\"\"\nimport re\n\na = 'python javaphp78907dfs'\n\nr = re.findall('[a-z]{3,6}',a)\n\nprint(r)" }, { "alpha_fraction": 0.5431165099143982, "alphanum_fraction": 0.5728063583374023, "avg_line_length": 16.16883087158203, "blob_id": "2ba5217dc848f293520d0172360c1e105e6d8d66", "content_id": "7230e89684fb9f0c691ec46cada7a7b5dfed8f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6224, "license_type": "no_license", "max_line_length": 90, "num_lines": 308, "path": "/Company-project/record.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/3/23 下午2:58'\n\n'''\nOS Record start\n'''\nimport os\n\nprint os.getcwd() # 获取当前路径\n\nprint os.listdir('/Users/xxx/Desktop/spider/pythontest/Company project/') # 获取路径下所有文件和文件夹\n\n# 将新的路径表示出来\n# /Users/xxx/Desktop/test\ndd = os.path.join(os.getcwd(), 'test')\n\nprint os.mkdir('test') # 创建一个名为test的文件夹\n\nprint os.rmdir('test') # 删除一个名为test的文件夹\n\nprint os.rename('test', 'test1') # 重命名一个文件名\n\n'''\nOS Record end\n'''\n\n'''\nColor Record start\n'''\nHEADER = '\\033[95m'\n\nOKBLUE = '\\033[94m'\n\nOKGREEN = '\\033[92m'\n\nWARNING = '\\033[93m'\n\nFAIL = '\\033[91m'\n\nENDC = '\\033[0m'\n\nBOLD = '\\033[1m'\n\nUNDERLINE = '\\033[4m'\n\n# 整体的是这样写的\nprint '\\033[93m 颜色测试 \\033[0m'\n# 上面把颜色全部给变量可以这样写\n\nprint HEADER + \"颜色测试\" + ENDC\n'''\nColor Record end\n'''\n\n'''\nPythonic \"很python的写法\" Record start\n'''\n# 使用zip创建键值对\n# 将两个list合成一个数组\nkeys = ['Name', 'Sex', 'Age']\n\nvalues = ['Se7en', 'Male', '22']\n\nd = {\"1\": \"321\", \"2\": \"123\"}\ndic = dict(zip(keys, values))\nprint dic\n\n# enumerate\nfor i, e in enumerate(keys):\n print i, e\n\nage = 61\n\nif 18 < age < 60:\n print 'young man'\nelse:\n print 'old man'\n\n# 遍历键值对\nfor key, value in d.items():\n print \"another keyword arg: %s=%s\" % (key, value)\n\n# 三元运算符\ngender = 'man'\ntext = '男' if gender == 'man' else '女'\n\n# 字符串格式化\n# 通常我们的写法是\na = 'test'\nprint '你好%s' % a\n\n# python 这样是符合pythonic的\n# 感觉没有比上一种方法减少多少代码,但是能很迅速的理解此方法的使用是干嘛的\na = 'test'\nprint '你好{name}'.format(name=a)\n\n# 字符串链接\n# 不python的写法\nnames = ['raymond', 'rachel', 'matthew', 'roger',\n 'betty', 'melissa', 'judith', 'charlie']\n\ns = names[0]\nfor name in names[1:]:\n s += ',' + name\n\nprint s\n\n# pythonic的写法\n\nprint (','.join(names))\n\n# 序列解包\np = 'vttalk', 'female', 30, '[email protected]'\n\nname = p[0]\ngender = p[1]\nage = p[2]\nemail = p[3]\n\n# pythonic的写法\n\nname, gender, age, email = p\n\nprint u'邮箱:{email}'.format(email=email)\n\n# for 语句的写法\n# 判断里面是否存在某个字符\nnum = ['123', '567', '789', 'abc']\n# 1\nfor i in num:\n if \"123\" in num:\n print num\n# 2\nstr = ['123', '567', '789', 'abc']\nstr2 = ['123', '123', '890']\n\nfor y in str:\n if y in str2:\n print y\n# pythonic 的写法\n# 1\nx = [i for i in num if \"123\" in num]\n\n# 2\no = [y for y in str if y in str2]\n\n'''\nPythonic \"很python的写法\" Record end\n'''\n\n'''\n*args and **kwargs 使用 Record start\n'''\n\n\n# 当函数的参数不确定时,可以使用*args 和**kwargs,*args 没有key,**kwargs有key。\ndef func(name, *args, **kwargs):\n print \"name:\", name\n\n for i in args:\n print \"another value :value=%s\" % i\n # pythonic 写法\n for key, value in kwargs.items():\n print \"another keyword arg: %s=%s\" % (key, value)\n\n # 虽然也可以这么写\n\n for key in kwargs:\n print \"========%s=%s\" % (key, kwargs[key])\n\n\nfunc(1, 1, 7, 8, myarg1=1, myarg2=2, myarg3=3)\n\n\n#\ndef test(*args, **kwargs):\n # *args 是在不知道要传入多少值的时候使用。(未知数量) 它只有值没有键\n # **kwargs 有键和值\n for ar in args:\n # pythonic写法 pep8规范\n print u\"传入的args值为:{number}\".format(number=ar)\n\n # 也可以这样写,不过不直观\n\n # print u\"传入的args值为:{}\".format(i)\n\n\n # 不pythonic写法\n\n # print u\"传入的args值为:%s\" % ar\n\n # pythonic 写法\n for key, value in kwargs.items():\n print u\"输入的键是:{key} - 值是:{value}\".format(key=key, value=value)\n # 也可以这样写,不过不直观\n\n # print u\"输入的键是:{} - 值是:{}\".format(key,value)\n\n\ntest(1, 2, 3, 4, test1=1, test2=2, test3=3)\n\n'''\n*args and **kwargs 使用 Record end\n'''\n\n'''\n*装饰器带参数 Record start\n'''\n\n\ndef route(path): # 接收装饰器传来的参数\n\n def _wrapper(func): # 接收装饰器下面执行的方法\n\n def __wrapper(*args, **kwargs): # 接收方法中的变量\n\n print \"http://blog.soo9s.me{path}\".format(path=path)\n\n for i in args: # 取出index方法传来的参数\n print i\n\n for k, v in kwargs.items(): # 取出index方法传来的参数\n print k, v\n\n return func(*args, **kwargs) # 返回接收的方法和参数\n\n return __wrapper\n\n return _wrapper\n\n\n@route('/index')\ndef index(*args, **kwargs):\n print \"1\"\n\n\nif __name__ == '__main__':\n index(1, 1, 1, test=\"1\")\n\n'''\n装饰器带参数 Record end\n'''\n\n'''\n装饰器遍历目录内容 Record start\n'''\n#-*- encoding: utf-8 -*-\nimport os\ndef check(func):\n def _check(dir):\n for filename in os.listdir(dir):\n filename = os.path.join(dir,filename)\n filename = os.path.normcase(filename)\n try:\n with open(filename,'r') as f:\n func(f.read())\n except:\n print \"\\n\"+\"此文件打不开或不存在:{error}\".format(error=filename)\n\n return _check\n\n@check\ndef test(filename):\n print filename\n\n@check\ndef test2(filename):\n print filename\n\nif __name__ == '__main__':\n test('/Users/xujiaqi/Desktop/spider')\n test2('/Users/xujiaqi/Desktop/test')\n\n\n# python3 下这么写\n\n#-*- encoding: utf-8 -*-\nimport os\ndef check(func):\n def _check(dir):\n for filename in os.listdir(dir):\n filename = os.path.join(dir,filename)\n filename = os.path.normcase(filename)\n try:\n with open(filename,mode='r',encoding='utf-8') as f:\n func(f.read())\n except:\n print(\"\\n\"+\"此文件打不开或不存在:{error}\".format(error=filename))\n\n return _check\n\n@check\ndef test(filename):\n print(filename)\n\n@check\ndef test2(filename):\n print(filename)\n\nif __name__ == '__main__':\n test('/Users/xujiaqi/Desktop/spider')\n test2('/Users/xujiaqi/Desktop/test')\n'''\n装饰器遍历目录 Record end\n'''\n" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.8780487775802612, "avg_line_length": 19.5, "blob_id": "ddfb83523e06b112802e3e50f1cf5192e36033b1", "content_id": "18abc7ba70ef261d775137d495f30529d2197aa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 176, "license_type": "no_license", "max_line_length": 46, "num_lines": 4, "path": "/Company-project/docker-tomcat/README.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "2017-11-01\n\n优化了一下Dockerfile,现在只要把几个包放在和Dockerfile同目录下就可以了。\n使用的话需要更改项目的名称,还有项目的路径!\n" }, { "alpha_fraction": 0.61654132604599, "alphanum_fraction": 0.6240601539611816, "avg_line_length": 25.799999237060547, "blob_id": "b0e1de62bc7b7b056be05a5374e3c442adec78d4", "content_id": "20fd63f61fb566bb5f4912af8946c16937477dab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 76, "num_lines": 5, "path": "/Company-project/python-code/moudle1/c2.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\ninfos = dir()\n\nprint(__file__)\nprint(__package__ or \"当前模块不属于任何包\") # 容错功能,如果__package__为None 输出 \"当前模块不属于任何包\"" }, { "alpha_fraction": 0.6188119053840637, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 17.363636016845703, "blob_id": "18ec7e2c1ef3725361cc4ea550724d64434e1744", "content_id": "5f0da5a83b17a063daa43e6f68341ac1007729e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 28, "num_lines": 11, "path": "/shiyanlou-flask/simpledu/handlers/__init__.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/11/4 上午10:37'\n\nfrom .admin import admin\nfrom .course import course\nfrom .front import front\nfrom .user import user\nfrom .seven import seven\n" }, { "alpha_fraction": 0.46679139137268066, "alphanum_fraction": 0.5257249474525452, "avg_line_length": 17.771930694580078, "blob_id": "242e0a27b5be8e44aa83e320d55028c34a1d6ce2", "content_id": "944e6f581397f3e0108ee3b4b28faae52287f197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 79, "num_lines": 57, "path": "/Company-project/crfp.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/3/20 下午7:08'\n\nline = {}\nj = []\ndef run(Totallist,Correctlist):\n\n tote = len(Totallist)\n\n total = len(Correctlist)\n\n for i in Correctlist:\n if Correctlist.count(i)>1:\n line[i] = Correctlist.count(i)\n\n if line:\n num = [g for g in Totallist if g in Correctlist]\n\n qu = len(num)\n\n dRmoval = qu\n else:\n num = [g for g in Totallist if g in Correctlist]\n\n qu = len(num)\n\n dRmoval = qu\n\n R = float(dRmoval) / float(total) * 100\n\n P = float(dRmoval) / float(tote) * 100\n\n F = 2 * P * R / (P+R)\n\n print totallist\n print correctlist\n print \"\\n\"\n\n print u\"准确率: %0.2f\" %(R)+\"%\"\n print u\"召回率: %0.2f\" %(P)+\"%\"\n print u\"F-测度值: %0.2f\" %(F)+\"%\"\n\n print \"\\n\"\n\n print u\"总实体数:%s\" % tote\n print u\"总实体识别数:%s\" % total\n print u\"正确识别数:%s\" % dRmoval\n\n\ntotallist = ['123','abc','678','789','666','777','999','jjh','jjh','jjh','jjh']\n\ncorrectlist = ['123','123','002','003','000','000','022']\n\nrun(totallist,correctlist)" }, { "alpha_fraction": 0.6263577342033386, "alphanum_fraction": 0.6270818114280701, "avg_line_length": 27.79166603088379, "blob_id": "52bea642fe732f322a65989192312449ad8a3160", "content_id": "9f71332e39b2b5edbbe759c82f5b900b6134e51b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1419, "license_type": "no_license", "max_line_length": 60, "num_lines": 48, "path": "/Company-project/meizitu-2/meizitu/pipelines.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom pymongo import MongoClient\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.exceptions import DropItem\n\nclass MeizituPipeline(object):\n\n def __init__(self, mongo_uri, mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DATABASE')\n )\n\n def open_spider(self, spider):\n self.client = MongoClient(self.mongo_uri)\n self.database = self.client[self.mongo_db]\n self.database.authenticate('账号','密码')\n self.dbs = self.client[\"数据库名\"]\n self.db = self.dbs[\"mztu\"] # 表明\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n self.db.insert(dict(item))\n return item\n\n'''\n自定义下载图片管道\n'''\nclass MeizituImagesPipeline(ImagesPipeline):\n\n def get_media_requests(self, item, info):\n for image_url in item['pic']:\n yield scrapy.Request(image_url)\n\n def item_completed(self, results, item, info):\n image_paths = [x['path'] for ok, x in results if ok]\n if not image_paths:\n raise DropItem(\"Item contains no images\")\n item['image_paths'] = image_paths\n return item" }, { "alpha_fraction": 0.4688572883605957, "alphanum_fraction": 0.4919078052043915, "avg_line_length": 35.74774932861328, "blob_id": "731c14b4ef07df1d428b9ea44ec6f783a965fd87", "content_id": "45b0b2a13d7ea682bf74af415a160b50c132e689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4214, "license_type": "no_license", "max_line_length": 128, "num_lines": 111, "path": "/Company-project/circ/circ/spiders/myspider.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport re\n\nimport requests\nimport time\nfrom scrapy.spiders import CrawlSpider, Rule, Request\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor ##配合Rule进行URL规则匹配\nfrom scrapy.selector import Selector\nfrom circ.mongodb import CMongo\nfrom circ.items import CircItem\nfrom bs4 import BeautifulSoup as bs\n\n\nclass Myspider(CrawlSpider):\n name = 'circ'\n allowed_domains = ['www.circ.gov.cn/']\n start_urls = [\n 'http://www.circ.gov.cn/web/site0/tab5216/module14349/', # 遍历所有分页\n 'http://www.circ.gov.cn/web/site0/tab5216/', # 遍历所有文章地址\n ]\n\n rules = (\n Rule(LxmlLinkExtractor(allow=(r'page([0-9]+)'))),\n Rule(LxmlLinkExtractor(allow=(r'info([0-9]+)')), callback='parse_item'),\n )\n\n def parse_item(self, response):\n mongo = CMongo()\n post = {\n 'url':response.url,\n 'code':0\n }\n ciurl = mongo.get_more()\n ciurl_count = mongo.get_info(response.url)\n if ciurl_count == 0:\n mongo.add_one(post)\n for url in ciurl:\n '''文章遍历结果'''\n if url[\"code\"] == 0:\n ur = requests.get(url[\"url\"])\n ur.encoding = 'utf-8'\n yield self.parse_url(ur,url[\"_id\"])\n\n '''搜索内容'''\n for page in range(1, 368):\n mongo = CMongo()\n url = 'http://www.circ.gov.cn/dig/search.action?' \\\n 'siteId=&ty=&w=false&f=&dr=true&p={page}' \\\n '&sr=score+desc&rp=&advtime=&advrange=title&fq=' \\\n '&ext=siteid%3A0&firstq=%E4%BA%92%E8%81%94%E7%BD%91%E4%BF%9D%E9%99%A9' \\\n '&q=%E4%BA%92%E8%81%94%E7%BD%91%E4%BF%9D%E9%99%A9'.format(page=page)\n res = requests.get(url)\n soup = bs(res.text, 'lxml')\n con_list = soup.find('div', class_='cen_list').find_all('h3')\n for u in con_list:\n if 'http://news.xinhuanet.com' not in u.find('a')['href']:\n response = requests.get(u.find('a')['href'])\n response.encoding = 'utf-8'\n curl = mongo.get_more()\n ciurl_count = mongo.get_info(response.url)\n post = {\n 'url':response.url,\n 'code':0\n }\n if ciurl_count == 0:\n mongo.add_one(post)\n for url in curl:\n ur = requests.get(url[\"url\"])\n ur.encoding = 'utf-8'\n yield self.parse_url(ur,url[\"_id\"])\n\n def parse_url(self, response,oid):\n mongo = CMongo()\n item = CircItem()\n res = Selector(response)\n\n '''如果请求成功,给url添加状态码'''\n re = requests.get(response.url)\n if re.status_code == 200:\n mongo.update(oid=oid)\n\n\n '''文章url'''\n item['url'] = response.url\n\n '''当前位置'''\n place = res.xpath(\n '//*[@id=\"ess_ctr14264_ListC_Info_ctl00_ctl22_essSOURCEBREADCRUMB_BreadCrumbCtl_lblBreadCrumb\"]/a/text()').extract()\n if place != []:\n item[\"place\"] = \">\".join(place).replace('\\r\\n', \"\").replace('\\t', \"\")\n\n '''文号'''\n con_number = res.xpath('//*[@id=\"zoom\"]/p[1]/text()').extract()\n if con_number != []:\n item[\"number\"] = \"\".join(con_number).replace('\\r\\n', \"\").replace('\\t', \"\")\n\n '''标题'''\n title = res.xpath('//*[@id=\"tab_content\"]/tbody/tr[1]/td/text()').extract()\n item[\"title\"] = \"\".join(title).replace('\\r\\n', \"\").replace('\\t', \"\")\n\n '''文章内容'''\n con_list = res.xpath('//*[@id=\"zoom\"]/p/text()').extract()\n con = [con for con in con_list]\n item[\"con\"] = \"\".join(con).replace('\\r\\n', \"\").replace('\\t', \"\")\n\n '''发布时间'''\n release_time = res.xpath('//*[@id=\"tab_content\"]/tbody/tr[2]/td/text()[1]').re(\n r'发布时间:(\\d{0,4}-\\d{0,2}-\\d{0,2})')\n item[\"release_time\"] = \"\".join(release_time).replace('\\r\\n', \"\").replace('\\t', \"\")\n time.sleep(0.7)\n return item" }, { "alpha_fraction": 0.5844155550003052, "alphanum_fraction": 0.5844155550003052, "avg_line_length": 10.142857551574707, "blob_id": "13af4e6e902c95cf5747302f2cdfcdccddc3ef30", "content_id": "147a33d03c8ad4660f9a93ad92e10ac09a19f7b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/Company-project/python-code/moudle1/c4.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "import re\n \ns = \"c|c++|php|Python|Java\"\n\nr = re.findall('A-Za-z',s)\n\nprint(r)" }, { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.7169811129570007, "avg_line_length": 16.66666603088379, "blob_id": "a0fd6e001e5fd739176fb3714746dfe3318b1e57", "content_id": "b02316880f147b001fb7373774e6b9a633d73f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/README.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# 主要记录一些历史项目和配置\n\n我的博客 [soo9s](https://www.soo9s.com)\n" }, { "alpha_fraction": 0.5075376629829407, "alphanum_fraction": 0.6030150651931763, "avg_line_length": 10.764705657958984, "blob_id": "1da4918dab1fb07568c6515f72145601f4895e4d", "content_id": "29648887aa83148f4e48be5477ab66afd242e230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/Company-project/jiepai/config.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '4/23/17 10:06 PM'\n\nMONGO_URL='mongodb://localhost:27017/'\n\nMONGO_DB='jiandan'\n\nMONGO_TABLE='toutiao'\n\nSTART = 1\n\nEND = 20\n\nKEYWORD = 'jiepai'" }, { "alpha_fraction": 0.3378378450870514, "alphanum_fraction": 0.4864864945411682, "avg_line_length": 14, "blob_id": "6997e29e12c3c234a866090e82f2ce5ad759c761", "content_id": "b7dcbf32c5626bed145999586e7f362a2113181c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/shiyanlou-flask/simpledu/__init__.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/11/2 上午11:04'" }, { "alpha_fraction": 0.6013824939727783, "alphanum_fraction": 0.6036866307258606, "avg_line_length": 21.842105865478516, "blob_id": "543bfc20aec7be1101b278d261c0c6365344accb", "content_id": "3dbeb414782170c28fbb64e6f1f476d0e2a48ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 488, "license_type": "no_license", "max_line_length": 51, "num_lines": 19, "path": "/Company-project/zhengce/zhengce/items.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy import Field, Item\n\n\nclass ZhengceItem(Item):\n url = Field() # 文章链接\n place = Field() # 当前位置\n con = Field() # 文章内容\n download = Field() # 下载链接\n title = Field() # 标题\n filename = Field() # 附件名称\n tax_number = Field() # 税务号\n time = Field() # 时间\n" }, { "alpha_fraction": 0.5636363625526428, "alphanum_fraction": 0.6181818246841431, "avg_line_length": 10.066666603088379, "blob_id": "eac3f60f48ae0733695a5ba9fab4888d6c46fe15", "content_id": "86de5a6bb0992d6b7c45fa359e28eb18886521ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 29, "num_lines": 15, "path": "/Company-project/python-code/moudle1/c7.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\n匹配一次和匹配N次\n\"\"\"\nimport re\n\n# * 匹配0次或者无限次\n# + 匹配1次或者无限次\n# ? 匹配0次或者1次\n\na = 'pytho0npython11pythonn2'\n\nr = re.findall('python+',a)\n\nprint(r)" }, { "alpha_fraction": 0.536912739276886, "alphanum_fraction": 0.6107382774353027, "avg_line_length": 17.75, "blob_id": "05845736cc2ab64a8e9d47bccd556d39b2a0ceb7", "content_id": "18a9ce311f6e00489156944ab98a1875feadcdd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/Company-project/meizitu-2/run.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '8/30/17 11:05 AM'\n\nfrom scrapy.cmdline import execute\nexecute(['scrapy','crawl','meizitu'])" }, { "alpha_fraction": 0.5541740655899048, "alphanum_fraction": 0.5595026612281799, "avg_line_length": 30.27777862548828, "blob_id": "bd11a64f42d3d6192b224d4cc9af6ef7368345af", "content_id": "cdb412ec63762d8eb334043dd4a713001f309681", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/Company-project/ValidateFriendsUrl.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\ndef start_urls():\n url = 'https://www.soo9s.com/friends.py'\n res = requests.get(url)\n Soup = bs(res.text,'lxml')\n urls = Soup.find('div',class_='link-box').find_all('a')\n for friends_url in urls:\n f_url = friends_url['href']\n try:\n requests.get(f_url)\n print(\"网站 {f_url} 可以正常访问!\".format(f_url=f_url))\n except Exception:\n print('网站 {f_url} 不可以正常访问!'.format(f_url=f_url))\nif __name__ == '__main__':\n start_urls()\n" }, { "alpha_fraction": 0.5380710363388062, "alphanum_fraction": 0.5939086079597473, "avg_line_length": 13.142857551574707, "blob_id": "798050212d78afa3ad50e5ab0ea27f14d4d063fd", "content_id": "2db69540d4c28075c314014dae36b7ece4e74670", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 35, "num_lines": 14, "path": "/shiyanlou-flask/simpledu/handlers/seven.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/11/4 上午11:17'\n\nfrom flask import Blueprint\n\nseven = Blueprint('seven',__name__)\n\n\[email protected]('/seven')\ndef index():\n return \"seven\"" }, { "alpha_fraction": 0.5122525691986084, "alphanum_fraction": 0.5202639102935791, "avg_line_length": 33.225807189941406, "blob_id": "0baaf035efcc95672180817d105e40ad1fcf8577", "content_id": "f1eaa8d7250e4592ba48949fc417a8a49f11c72f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2134, "license_type": "no_license", "max_line_length": 88, "num_lines": 62, "path": "/Company-project/zhengce/zhengce/spiders/myspider.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport requests\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom scrapy.spiders import CrawlSpider, Request\nfrom scrapy.selector import Selector\nfrom zhengce.items import ZhengceItem\n\n\nclass MofgovSpider(CrawlSpider):\n name = \"zhengce\"\n allowed_domains = [\"url\"]\n\n startUrl = 'url'\n endurl = '.htm'\n\n def start_requests(self):\n for i in range(0, 23):\n url = self.startUrl + \"index_\" + str(i) + self.endurl\n yield Request(url, self.getUrl)\n yield Request(self.startUrl, self.getUrl)\n\n def getUrl(self, response):\n res = Selector(response)\n\n href = res.xpath('//*[@id=\"id_bl\"]/tr/td/a/@href').extract()\n\n for url in href:\n item = ZhengceItem()\n item[\"url\"] = url # 文章url\n html = requests.get(url)\n html.encoding = 'gb2312'\n res = bs(html.text, 'lxml')\n\n places = res.find('table', bgcolor=\"#EBF4FD\").find_all('a')\n item[\"place\"] = places[0].text + \">\" + places[1].text + \">\" + places[2].text\n\n title = res.find('td', class_='font_biao1')\n item[\"title\"] = \"\".join(title.text).strip()\n\n cons = res.find('div', class_=\"TRS_Editor\").find_all('p')\n tax_number = res.find('div', class_=\"TRS_Editor\").find('p')\n time = res.find('div', class_=\"TRS_Editor\").find_all('p')[-1].get_text()\n item[\"time\"] = \"\".join(time)\n item[\"tax_number\"] = tax_number.text\n con = [con.text for con in cons]\n item[\"con\"] = ''.join(con)\n try:\n downows = res.find('span', id=\"appendix\").find_all('a')\n if downows != []:\n\n filename = [down.get_text() for down in downows]\n item[\"filename\"] = filename # 附件名称\n\n url = \"/\".join(item[\"url\"].split('/')[:-1])\n file_url = [url + down[\"href\"][1:] for down in downows]\n item[\"download\"] = file_url\n except:\n pass\n\n yield item\n" }, { "alpha_fraction": 0.5707762837409973, "alphanum_fraction": 0.621004581451416, "avg_line_length": 14.714285850524902, "blob_id": "4575ccf3900dbf3ab64fc31b8cc0241b2d51d733", "content_id": "6ee7782544b340c49b5615c56d993ebd8928b093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/shiyanlou-flask/simpledu/handlers/admin.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\n__author__ = 'se7en'\n\n__date__ = '17/11/4 上午10:37'\n\nfrom flask import Blueprint,render_template\n\nadmin = Blueprint('admin',__name__)\n\n\[email protected]('/admin')\ndef admin_index():\n return \"admin\"" }, { "alpha_fraction": 0.6130790114402771, "alphanum_fraction": 0.6158038377761841, "avg_line_length": 20.58823585510254, "blob_id": "538dd4290b114b6b788d2d15c456accfe7efcd25", "content_id": "8866cabf95abcec0461030a8684bb92ea3be2a20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/Company-project/circ/circ/items.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy import Item, Field\n\n\nclass CircItem(Item):\n url = Field() # 文章链接\n number = Field() # 文号\n place = Field() # 位置\n title = Field() # 标题\n con = Field() # 内容\n release_time = Field() # 发布时间\n" }, { "alpha_fraction": 0.7350343465805054, "alphanum_fraction": 0.7438665628433228, "avg_line_length": 13.557143211364746, "blob_id": "292f41df15eb4487883c999afbff71894a46208e", "content_id": "81c11548a47e2838b3c1e3908e86ee0cc28bce32", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Markdown", "length_bytes": 1695, "license_type": "no_license", "max_line_length": 80, "num_lines": 70, "path": "/.github/ISSUE_TEMPLATE/---------.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "---\nname: 请认真描述您的问题\nabout: 根据下面的格式总结问题\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n> 提问前确保你做过了的\n\n```\n当遇到了Flask问题/Python问题时,请遵循以下的步骤尝试解决:\n\n检查代码是否有语法错误。\n查看相应的源码和文档。\n使用Google/StackOverflow搜索\n在github中查找过类似功能,或者项目\n\ngithub 检索方法 stars:>10000 python\n\n```\n\n---\n> 提问的技巧\n\n```\n如果问题主要涉及扩展,请在标题内包含扩展名称\n最小化的、完整的、可复现相关问题的代码\n期望效果\n实际效果(完整的错误堆栈信息)\n你的操作步骤和尝试过的解决办法\n操作系统和语言、库等的版本\n最后在代码仓库中 提交 issue,内容格式和上面一致\n```\n\n```\n例如:\n\n**期望的行为**\n\n期望在网站运行的同时,每隔一段时间运行一个价格检测程序(即flushpirce),然后发送邮件。\n\n# -*- coding=utf-8 -*-\nfrom app import create_app, db\nfrom flask_script import Manager, Shell\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom app.admin.views import flushprice\n\napp = create_app('default')\n\nsched = BackgroundScheduler()\nsched.add_job(flushprice, 'interval', seconds=1) //flushprice是定义在admin.view下的程序\nsched.start()\n\nif __name__ == '__main__':\n manager.run()\n \n**实际的行为**\n\n程序运行后,只有访问网站时才会触发flushprice的运行,如果不对网站进行访问就不会运行。\n\n**环境版本**\n\n操作系统:Mac\nPython:3.7\nFlask:apscheduler(非flask-apscheduler)\n如果问题和扩展相关,请一并写出扩展的版本。\n\n```\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 4, "blob_id": "15dbe08c2ebc4c648b4e6b47c4070c2a3086febb", "content_id": "6ed2909481a890a80c70b7e55761ab63dc1df9ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 13, "num_lines": 5, "path": "/Company-project/README.md", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "在这里:\n\n还是主要以纪录和学习为主。\n\n加油!" }, { "alpha_fraction": 0.568493127822876, "alphanum_fraction": 0.6027397513389587, "avg_line_length": 10.307692527770996, "blob_id": "a77b18ff2255a67373c17b2381747fff6fa090d4", "content_id": "20c40d09ed4b004161a06c085cb71290e815d658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 32, "num_lines": 13, "path": "/Company-project/python-code/moudle1/c6.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n\n\"\"\"\n贪婪模式和非贪婪模式\npython 默认是贪婪模式\n\"\"\"\nimport re\n\na = 'python javabphp678php sdsd'\n\nr = re.findall('[a-z]{6,}?',a)\n\nprint(r)" }, { "alpha_fraction": 0.5678790211677551, "alphanum_fraction": 0.5768815279006958, "avg_line_length": 21.770492553710938, "blob_id": "9c576e8b01dac12640efd704df628e055c674a2a", "content_id": "327e9874cca3318f5c3f4afa59c7d17c70cca67e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2819, "license_type": "no_license", "max_line_length": 70, "num_lines": 122, "path": "/Company-project/jiepai/spider.py", "repo_name": "litt1eseven/python-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom requests.exceptions import RequestException\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nfrom urllib import urlretrieve\nfrom multiprocessing import Pool\nimport requests\nimport json\nimport re\nimport os\n\n\n\nfrom config import *\n\nclient = MongoClient(MONGO_URL,connect=False)\n\ndb = client[MONGO_DB]\n\n\n\n__author__ = 'se7en'\n\n__date__ = '4/23/17 4:13 PM'\n\n\ndef get_index(offset, keyword):\n data = {\n 'offset': offset,\n 'format': 'json',\n 'keyword': keyword,\n 'autoload': 'true',\n 'count': '20',\n 'cur_tab': 1,\n }\n\n url = 'http://www.toutiao.com/search_content/?'\n try:\n\n response = requests.get(url, params=data)\n\n if response.status_code == 200:\n return response.json()\n\n return None\n\n except RequestException:\n print u\"页面请求有误!\"\n return None\n\n\ndef parse_page_index(html):\n if html and 'data' in html.keys():\n for item in html.get('data'):\n yield item.get('article_url')\n\ndef get_page_detail(url):\n try:\n\n response = requests.get(url)\n\n if response.status_code == 200:\n return response.text\n\n return None\n\n except RequestException:\n print u\"页面请求有误!{}\".format(url)\n return None\n\n\ndef parse_page_detail(url, imgurl):\n soup = BeautifulSoup(url,'lxml')\n title = soup.select('title')[0].get_text()\n # print title\n images_pattern = re.compile('var gallery = (.*?);', re.S)\n result = re.search(images_pattern,url)\n if result:\n data = json.loads(result.group(1))\n if data and 'sub_images' in data.keys():\n sub_images = data.get('sub_images')\n images = [item.get('url') for item in sub_images]\n for image in images:download_images(image)\n return {\n 'title': title,\n 'url' : imgurl,\n 'images': images,\n }\n\n\ndef save_to_mongo(result):\n if result is not None:\n\n if db[MONGO_TABLE].insert(result):\n print u\"存储到mongodb成功-{}\".format(result)\n return True\n return False\n\ndef download_images(html):\n print u\"正在下载\"+ html\n path = os.getcwd() + '/images'\n if not os.path.exists(path):\n os.mkdir(path)\n name = html.split('/')[-1]\n urlretrieve(html,'/{path}/{name}.jpg'.format(path=path,name=name))\n\n\ndef main(offset):\n html = get_index(offset, KEYWORD)\n for url in parse_page_index(html):\n html = get_page_detail(url)\n if html:\n result = parse_page_detail(html,url)\n save_to_mongo(result)\n\nif __name__ == '__main__':\n groups = [x*20 for x in range(START,END)]\n try:\n pool = Pool()\n pool.map(main,groups)\n except:\n pass" } ]
39
rijulg/fafml
https://github.com/rijulg/fafml
c53da06f3b8201a641a01b65f8f61791f02d46a9
31985d636ea05e42862eb516e324d200719c4600
da1b977b7846469a92b71ee8164a9594163111ca
refs/heads/master
2020-04-03T05:58:40.939699
2018-10-30T04:03:19
2018-10-30T04:03:19
155,061,585
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.596969723701477, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 26.5, "blob_id": "0c0a88b7c71f1b929a0bd250c3f26e7a53a21543", "content_id": "6c5920dcafa3b999e892b516dacb5dbd92d912c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 330, "license_type": "permissive", "max_line_length": 77, "num_lines": 12, "path": "/convertTifToJpg.bash", "repo_name": "rijulg/fafml", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Needs imagemagick, libjpeg-progs\n\nfor f in **/*.tif **/*.tiff\ndo \n basename=${f%.*}\n echo \"Converting ${f} TIFF image to JPEG image (compression level 100%)\"\n convert \"${f}\" \"${basename}.jpeg\"\n djpeg -pnm \"${basename}.jpeg\" | cjpeg -quality 100 > \"${basename}-z80.jpeg\"\n chmod 644 \"${basename}-z80.jpeg\"\ndone " }, { "alpha_fraction": 0.7776735424995422, "alphanum_fraction": 0.7861163020133972, "avg_line_length": 61.764705657958984, "blob_id": "52b9038a48ba1c297b451fb25fb85be22e18f209", "content_id": "a273f03f03deb984c94d275192faf03abec4328c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1066, "license_type": "permissive", "max_line_length": 249, "num_lines": 17, "path": "/README.md", "repo_name": "rijulg/fafml", "src_encoding": "UTF-8", "text": "# Feasibility Analysis Framework for Machine Learning\n\nThis project serves as a framework to quickly test a range of machine learning models on given data set with segmentation of test data into sets of increasing amount of data partitioned from the original dataset.\n\nThe framework works based on the retraining algorithm implementation available in tensorflow\n\n## Executing tests\n\n1. Make a modules file and write down all the models you want to use for training (ex. modules.csv)\n2. Load all your files in a test folder (the framework generates a .temp folder in that folder for storing segments/partitions of data, intermediate and final results)\n3. Execute the tests as\n\n ```bash\n python analyse.py --image_dir=D:/Temp/flower_photos --modules=D:/Temp/modules.csv --trainingSteps=100 --segmentSize=50\n ```\n\n4. The results can then be obtained from \"D:\\Temp\\flower_photos\\\\.temp\" folder. The results folder will contain the final models for different segment size and algorithms, and the .logs folder will contain the logs of the training of all the models." }, { "alpha_fraction": 0.6112711429595947, "alphanum_fraction": 0.6140263080596924, "avg_line_length": 25.9797306060791, "blob_id": "5e5cbb4d53784d8079c75efaa7e26a4b92a126b4", "content_id": "8eefbacf8a5c71366d3f80ef559b7717e4dc6541", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7985, "license_type": "permissive", "max_line_length": 85, "num_lines": 296, "path": "/analyse.py", "repo_name": "rijulg/fafml", "src_encoding": "UTF-8", "text": "# ###########################################\n# Analyse.py\n# An analysis framework for feasibility of application of Machine Learning Algorithms\n# Currently limited to test feasibility of image classification problems.\n# @author Rijul Gupta\n# @since 28 October 2018\n\nimport argparse\nfrom subprocess import Popen, PIPE, STDOUT\nimport os\nimport shutil\nimport re\nimport matplotlib.pyplot as plt\n\nDATAPATH = ''\n\"\"\"\nstr: DATAPATH\nThe basepath where to get the data from\n\"\"\"\n\nBASEPATH = ''\n\"\"\"\nstr: BASEPATH\nThe basepath where to work on the data used for analysis\n\"\"\"\n\nMODELSPATH = ''\n\"\"\"\nstr: MODELSPATH\nThe path where to store the models information\n\"\"\"\n\nSEGMENTSPATH = ''\n\"\"\"\nstr: SEGMENTSPATH\nThe path where to store the segments\n\"\"\"\n\nRESULTSPATH = ''\n\"\"\"\nstr: RESULTSPATH\nThe path where to store the results\n\"\"\"\n\nSEGMENTSIZE = 100\n\"\"\"\nint: SEGMENTSIZE\nThe minimum size of one segment\n\"\"\"\n\nSEGMENTS = []\n\"\"\"\narray: SEGMENTS\nSegments of data to be tested\n\"\"\"\n\nSEGMENTSIZES = []\n\"\"\"\narray: SEGMENTSIZES\nSizes of the segments of data to be tested\n\"\"\"\n\nMODULESPATH = './modules.csv'\n\"\"\"\nstr: MODULESPATH\nThe path of the file that contains the modules that should be used for analysis\n\"\"\"\n\nMODULES = []\n\"\"\"\narray: MODULES\nModules to test feasibility with\n\"\"\"\n\nRESULTS = []\n\"\"\"\narray: RESULTS\nThe results of the analysis\n\"\"\"\n\nTRAININGSTEPS = 0\n\"\"\"\nint: TRAININGSTEPS\nThe number of steps to train for\n\"\"\"\n\ndef createTemp():\n \"\"\"\n creates a temp directory for working on the analysis\n \"\"\"\n global BASEPATH, SEGMENTSPATH, MODELSPATH, RESULTSPATH\n BASEPATH = os.path.join(DATAPATH,'.temp')\n SEGMENTSPATH = os.path.join(BASEPATH,'segments')\n MODELSPATH = os.path.join(BASEPATH,'models')\n RESULTSPATH = os.path.join(BASEPATH,'results')\n '''Cleaning old files'''\n if os.path.exists(BASEPATH) and os.path.isdir(BASEPATH):\n shutil.rmtree(BASEPATH)\n '''Preparing folders for new analysis'''\n os.mkdir(BASEPATH)\n os.mkdir(SEGMENTSPATH)\n os.mkdir(MODELSPATH)\n os.mkdir(RESULTSPATH)\n\ndef getDirs(dir):\n \"\"\"\n retrieves directories from given directory\n \"\"\"\n return [name for name in os.listdir(dir)\n if os.path.isdir(os.path.join(dir, name))]\n\ndef getDataDirs():\n \"\"\"\n retrieves the data directories from DATAPATH\n \"\"\"\n dirs = getDirs(DATAPATH)\n dirs.remove('.temp')\n return dirs\n\ndef getFiles(dir):\n \"\"\"\n retrieves files from a given directory\n \"\"\"\n dir = os.path.join(DATAPATH, dir)\n return [name for name in os.listdir(dir)\n if os.path.isfile(os.path.join(dir, name))]\n\ndef getNumFiles(dir):\n \"\"\"\n retrieves the number of files in a given directory\n \"\"\"\n return len(getFiles(dir))\n\ndef segmentData():\n \"\"\"\n rearranges the data into segments for subsequent testing\n \"\"\"\n global SEGMENTS\n datadirs = getDataDirs()\n for dir in datadirs:\n numFiles = getNumFiles(dir)\n if ('minSegment' not in locals() or numFiles < minSegment):\n minSegment = numFiles\n segments = (minSegment // SEGMENTSIZE) + 1\n for segment in range(segments-1, -1, -1):\n segmentPath = os.path.join(SEGMENTSPATH,'segment'+str(segment))\n os.mkdir(segmentPath)\n SEGMENTSIZES.append(minSegment // (segment+1))\n for dir in datadirs:\n files = getFiles(dir)\n dirPath = os.path.join(segmentPath,dir)\n os.mkdir(dirPath)\n for num in range(minSegment // (segment+1)):\n file = files[num]\n src = os.path.join(DATAPATH, dir, file)\n dst = os.path.join(dirPath, file)\n shutil.copy(src, dst)\n SEGMENTS.append(segmentPath)\n\ndef loadModules():\n \"\"\"\n loads the modules to test for feasibility\n \"\"\"\n global MODULES\n with open(MODULESPATH) as file:\n lines = [line.rstrip('\\n') for line in file]\n MODULES = lines\n\ndef analyseData(segment = None, module = None):\n \"\"\"\n analyses the given data for given module\n\n Args:\n segment (str): Path of the segment to test with\n module (str): Identifier of the module to test with\n \n Returns:\n object: result of the analysis\n \"\"\"\n modName = re.sub('[^A-Za-z0-9]+', '', module)\n key = modName + '_' + os.path.basename(segment)\n bottleneckDir = os.path.join(BASEPATH, '.bottleneck', key)\n graphDir = os.path.join(MODELSPATH, key)\n segModDir = os.path.join(RESULTSPATH, key)\n summariesDir = os.path.join(segModDir, 'summary')\n modelsDir = os.path.join(segModDir, 'model')\n logDir = os.path.join(BASEPATH, '.logs')\n logfile = os.path.join(logDir, key+'.out.log')\n if not os.path.exists(logDir):\n os.makedirs(logDir)\n log = open(logfile, 'a+')\n\n cmd = [ 'python', 'retrain.py',\n '--image_dir', segment,\n '--tfhub_module', module,\n '--summaries_dir', summariesDir,\n '--bottleneck_dir', bottleneckDir,\n '--saved_model_dir', modelsDir,\n '--output_graph', graphDir\n ]\n if (TRAININGSTEPS != 0):\n cmd.append('--how_many_training_steps')\n cmd.append(str(TRAININGSTEPS))\n return Popen(cmd, stdout=log, stderr=STDOUT)\n\ndef processResults():\n \"\"\"\n Parses and stores the results\n \"\"\"\n logDir = os.path.join(BASEPATH, '.logs')\n logs = getFiles(logDir)\n results = [['Model', 'Segment Size', 'Accuracy', 'N']]\n accuracies = []\n for index, log in enumerate(logs):\n modelName = log.split('_')[0]\n file = open(os.path.join(logDir, log), 'r')\n for line in file:\n if re.search('^.*Final test accuracy.*', line):\n match = re.match(r'.* = (.*)\\% \\(N=(.*)\\)', line)\n accuracy = match.group(1)\n N = match.group(2)\n result = [modelName, SEGMENTSIZES[index], accuracy, N]\n accuracies.append(accuracy)\n results.append(result)\n csvString = '\\n'.join([','.join(str(v) for v in x) for x in results])\n resultsDir = os.path.join(BASEPATH, 'results')\n resultsFile = os.path.join(resultsDir, 'result.csv')\n file = open(resultsFile, 'w+')\n file.write(csvString)\n plotFile = os.path.join(resultsDir, 'results.png')\n plt.xlabel('$Segments$ $(number of items)$')\n plt.ylabel('$Accuracy$ $(%)$')\n plt.grid(False)\n plt.plot(SEGMENTSIZES, accuracies, 'r-')\n plt.savefig(plotFile)\n\ndef prepareData():\n \"\"\"\n prepares the data for testing\n \"\"\"\n createTemp()\n segmentData()\n\ndef main():\n \"\"\"\n main function to start the analysis\n \"\"\"\n prepareData()\n loadModules()\n processes = []\n for segment in SEGMENTS:\n for module in MODULES:\n processes.append(analyseData(segment, module))\n print('total processes running: '+str(len(processes)))\n for index, process in enumerate(processes):\n print('waiting for process '+str(index))\n process.wait()\n processResults()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\n type=str,\n default='',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--modules',\n type=str,\n default='',\n help='Path to file that contains modules that should be used.'\n )\n parser.add_argument(\n '--trainingSteps',\n type=int,\n default='',\n help='No of steps to be used for training.'\n )\n parser.add_argument(\n '--segmentSize',\n type=int,\n default='',\n help='Minimum size of segments to be used.'\n )\n flags, unparsed = parser.parse_known_args()\n if not flags.image_dir:\n raise Exception('Must set flag --image_dir.')\n if flags.modules:\n MODULESPATH = flags.modules\n if flags.trainingSteps:\n TRAININGSTEPS = flags.trainingSteps\n if flags.segmentSize:\n SEGMENTSIZE = flags.segmentSize\n DATAPATH = os.path.abspath(flags.image_dir)\n main()" } ]
3
testsite3984/markdowntohtml
https://github.com/testsite3984/markdowntohtml
6b6ee9831903559b7be2b301d7be3bc8fa198aeb
8fed0dbcde96a30564d28009858e07293c881d84
9747692b83af6a0bda833aa7fa8c0d2f358de330
refs/heads/master
2022-12-18T13:14:59.806960
2020-09-16T23:14:11
2020-09-16T23:14:11
296,166,902
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5165863633155823, "alphanum_fraction": 0.5264968872070312, "avg_line_length": 65.26851654052734, "blob_id": "b189f0917ce408003def9d7852223b30eadd6d01", "content_id": "0a0ea4c6d0844b6052c72a04033439eda359a79b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7265, "license_type": "no_license", "max_line_length": 195, "num_lines": 108, "path": "/main.py", "repo_name": "testsite3984/markdowntohtml", "src_encoding": "UTF-8", "text": "import markdown\r\nimport os\r\nimport sys\r\nimport fileinput\r\n\r\n\r\ndef parseMarkdown(markdownTableFilePath, firstHTMLfilePath):\r\n with open(markdownTableFilePath) as infile, open(firstHTMLfilePath, 'w') as outfile:\r\n oldContent = infile.read()\r\n outfile.write(\"B_E_G_I_N\\n\")\r\n newContent = markdown.markdown(oldContent, extensions=['tables'])\r\n outfile.write(newContent)\r\n outfile.write(\"E_N_D\\n\")\r\n infile.close()\r\n\r\n\r\ndef parseHTML(firstHTMLfilePath, bootstrapHTMLfilePath):\r\n replacements = {'B_E_G_I_N': \"<!doctype html>\\n\" +\r\n \" <!--\\n\" +\r\n \" Material Design Lite\\n\" +\r\n \" Copyright 2015 Google Inc. All rights reserved.\\n\" +\r\n \" Licensed under the Apache License, Version 2.0 (the \\\"License\\\"); you may not use this file except in compliance with the License.\\n\" +\r\n \" You may obtain a copy of the License at\\n\" +\r\n \" https://www.apache.org/licenses/LICENSE-2.0\\n\" +\r\n \" Unless required by applicable law or agreed to in writing, software\\n\" +\r\n \" distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n\" +\r\n \" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n\" +\r\n \" See the License for the specific language governing permissions and\\n\" +\r\n \" limitations under the License\\n\" +\r\n \" NOTICE in compliance with the License - modified by max\\n\" +\r\n \"-->\\n\" +\r\n \"<html lang=\\\"en\\\">\\n\" +\r\n \" <head>\\n\" +\r\n \"<title>Toddios</title>\\n\" +\r\n \"<!--MDL-->\\n\" +\r\n \"<link rel=\\\"stylesheet\\\" href=\\\"https://fonts.googleapis.com/icon?family=Material+Icons\\\">\\n\" +\r\n \"<link rel=\\\"stylesheet\\\" href=\\\"https://code.getmdl.io/1.3.0/material.blue_grey-orange.min.css\\\">\\n\" +\r\n \" <!--bootstrap-->\\n\" +\r\n \"<link rel=\\\"stylesheet\\\" href=\\\"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css\\\" integrity=\\\"sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm\\\" crossorigin=\\\"anonymous\\\">\\n\" +\r\n \" <!--fixes and customization-->\\n\" +\r\n \"<link rel=\\\"stylesheet\\\" href=\\\"stylishtoddios.scss\\\">\\n\" +\r\n \"\\n\" +\r\n \"<meta charset=\\\"utf-8\\\">\\n\" +\r\n \"<meta name=\\\"viewport\\\" content=\\\"width=device-width, initial-scale=1, shrink-to-fit=no\\\">\\n\" +\r\n \"</head>\\n\" +\r\n \"<body class=\\\"mdl-demo mdl-color--grey-100 mdl-color-text--grey-700 mdl-base\\\">\\n\" +\r\n \"<div class=\\\"mdl-layout mdl-js-layout\\\">\\n\" +\r\n \"<header class=\\\"mdl-layout__header mdl-layout__header--scroll\\\">\\n\" +\r\n \" <div class=\\\"mdl-layout__header-row\\\">\\n\" +\r\n \" <!-- title - left empty for now -->\\n\" +\r\n \"<span class=\\\"mdl-layout-title\\\"></span>\\n\" +\r\n \" <!-- navigation to the right -->\\n\" +\r\n \"<div class=\\\"mdl-layout-spacer\\\"></div>\\n\" +\r\n \" <!-- extra navigation -->\\n\" +\r\n \"<nav class=\\\"mdl-navigation\\\">\\n\" +\r\n \" <a class=\\\"mdl-navigation__link\\\" href=\\\"index.html\\\">Home</a>\\n\" +\r\n \" <a class=\\\"mdl-navigation__link\\\" href=\\\"https://notwhorosethinks.com/faq.html\\\">FAQ</a>\\n\" +\r\n \" <a class=\\\"mdl-navigation__link\\\" href=\\\"https://www.patreon.com/notwhorosethinks\\\">Patreon</a>\\n\" +\r\n \" <a class=\\\"mdl-navigation__link\\\" href=\\\"https://docs.google.com/forms/d/16pvofg2HoMDkAh2JWW_nXkaJGnuVNhC7H5JHFje4Cj0/viewform?edit_requested=true\\\">NonConditional Love</a>\\n\" +\r\n \" </nav>\\n\" +\r\n \"</div>\\n\" +\r\n \"</header>\\n\" +\r\n \"<!-- drawer sidebar -->\\n\" +\r\n \"<div class=\\\"mdl-layout__drawer\\\">\\n\" +\r\n \"<span class=\\\"mdl-layout-title\\\">Toddios</span>\\n\" +\r\n \"<nav class=\\\"mdl-navigation\\\">\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"megatable.html\\\">Entire Table</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"1patreonExclusives.html\\\">Patreon Exclusives</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"2syrinExclusives.html\\\">Syrin Exclusives</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"3publicAudios.html\\\">Public Audios</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"4youtube.html\\\">YouTube</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"5subExclusives.html\\\">Subreddit Exclusives</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"6candid.html\\\">Candid</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"7reading.html\\\">Reading</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"8shitposts.html\\\">Shitposts</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"9nonconLove.html\\\">NonConditional Love</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"10unrec.html\\\">Old (And Unrecommended) Audios</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"11TSC.html\\\">Todd's Story Cubes Series</a>\\n\" +\r\n \"<a class=\\\"mdl-navigation__link\\\" href=\\\"12bucket.html\\\">Bucket</a>\\n\" +\r\n \" <a class=\\\"mdl-navigation__link\\\" href=\\\"13graveyard.html\\\">Graveyard</a>\\n\" +\r\n \" </nav>\\n\" +\r\n \"</div>\\n\" +\r\n \"<!-- table opens -->\\n\" +\r\n \"<center>\\n\", '<h2>': '<h4 style=\\\"padding-bottom: 10px; padding-top: 10px;\\\">',\r\n '</h2>': '</h4>', '<table>': '<div class=\\\"table-responsive\\\">\\n<table class=\\\"table table-bordered\\\">',\r\n '<th>': '<th scope=\"col\">', 'E_N_D': ' </div>\\n' +\r\n \"</div>\\n\" +\r\n \" </center>\\n\" +\r\n \" <!-- end table -->\\n\" +\r\n \"<!--MDL-->\\n\" +\r\n \"<script defer src=\\\"https://code.getmdl.io/1.3.0/material.min.js\\\"></script>\\n\" +\r\n \"<!--bootstrap-->\\n\" +\r\n \"<script src=\\\"https://code.jquery.com/jquery-3.2.1.slim.min.js\\\" integrity=\\\"sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN\\\" crossorigin=\\\"anonymous\\\"></script>\\n\" +\r\n \"<script src=\\\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js\\\" integrity=\\\"sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q\\\" crossorigin=\\\"anonymous\\\"></script>\\n\" +\r\n \"<script src=\\\"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js\\\" integrity=\\\"sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl\\\" crossorigin=\\\"anonymous\\\"></script>\\n\" +\r\n \"</body></html>\"}\r\n\r\n with open(firstHTMLfilePath) as infile, open(bootstrapHTMLfilePath, 'w') as outfile:\r\n for line in infile:\r\n for src, target in replacements.items():\r\n line = line.replace(src, target)\r\n outfile.write(line)\r\n\r\n\r\n# parseMarkdown(os.path.join('S:', 'truetestytable.txt'), os.path.join('S:', 'HTMLinput.txt'))\r\n# parseHTML(os.path.join('S:', 'HTMLinput.txt'), os.path.join('S:', 'testsite3984', 'test-site', 'htmltest.html'))\r\nparseMarkdown(input(\"Markdown table's file path: \"), os.path.join('C:', 'HTMLinput.txt'))\r\nparseHTML(os.path.join('C:', 'HTMLinput.txt'), input(\"File path of HTML to overwrite: \"))\r\n" } ]
1
A5308Y/Dorfsim
https://github.com/A5308Y/Dorfsim
b8c3d86eaab8663f918c449d297c870fa47fc853
0ac525c2903cd48d1b6839a5c64d520a6854a3b0
b0ffd0f81a6cd9a01a1d30c70e02a223e82685b2
refs/heads/master
2020-04-11T05:59:29.985464
2011-06-29T12:06:54
2011-06-29T12:06:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6141512989997864, "alphanum_fraction": 0.6657372117042542, "avg_line_length": 43.84375, "blob_id": "985faaeb9063a3a76f21c05ec3326b94c3adae8b", "content_id": "7557544d4587ede7f78ad0d0656301dd6266bd5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2869, "license_type": "no_license", "max_line_length": 114, "num_lines": 64, "path": "/Panel.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport pygame\n\nclass StatusPanel(object):\n def __init__(self, world):\n self.screen_font = pygame.font.SysFont(\"umpush\", 16)\n self.world = world\n self.height = 166\n self.menu_width = 150\n self.menu_start = self.world.SCREEN_SIZE[0]-self.menu_width\n self.displayed_entities = []\n self.status_display_mode = \"list\"\n self.displayed_type = \"animal\"\n self.displayed_entity_number = 0\n\n def blitrendertext(self, text, row, screen):\n statustext = self.screen_font.render(text, True, (0,0,0))\n screen.blit(statustext, (10, 20*row, 15, 15))\n\n def define_displayed_entities(self):\n self.displayed_entities = []\n for entity in self.world.entities.itervalues():\n entity.indicated = False\n if entity.typestring == self.displayed_type:\n\tself.displayed_entities.append(entity)\n\n def draw_menu(self, screen):\n menu_start = self.menu_start\n pygame.draw.rect(screen, (180, 200, 210), pygame.Rect((menu_start,0),(self.world.SCREEN_SIZE[0],self.height)))\n screen.blit(self.screen_font.render(\"Quit\", True, (0,0,0)), (menu_start, 0, 15, 15))\n screen.blit(self.screen_font.render(\"Animals\", True, (0,0,0)), (menu_start, 20, 15, 15))\n screen.blit(self.screen_font.render(\"Sites\", True, (0,0,0)), (menu_start, 40, 15, 15))\n screen.blit(self.screen_font.render(\"Humans\", True, (0,0,0)), (menu_start, 60, 15, 15))\n screen.blit(self.screen_font.render(\"List/Single\", True, (0,0,0)), (menu_start, 80, 15, 15))\n screen.blit(self.screen_font.render(\"<\", True, (0,0,0)), (menu_start, 100, 15, 15))\n screen.blit(self.screen_font.render(\">\", True, (0,0,0)), (menu_start + 20, 100, 15, 15))\n\n def draw_panel(self, screen):\n rectangle = pygame.Rect((0,0),(self.world.SCREEN_SIZE[0],self.height))\n pygame.draw.rect(screen, (180, 210, 210), rectangle)\n self.define_displayed_entities()\n if self.status_display_mode == \"list\":\n for entity in self.displayed_entities:\n\tentity.indicated = True\n\tstatus_text = entity.__str__()\n\tself.blitrendertext(status_text, self.displayed_entities.index(entity), screen)\n\n elif self.status_display_mode == \"single\":\n for entity in self.displayed_entities:\n\tif self.displayed_entities.index(entity) == self.displayed_entity_number:\n\t entity.indicated = True\n\t description_list = entity.description_list()\n\t for entry in description_list:\n\t self.blitrendertext(entry, description_list.index(entry), screen)\n \n self.draw_menu(screen)\n\n #time_of_day_symb = screen_font.render(time_of_day, True, (0,0,0))\n #screen.blit(time_of_day_symb, (700, 20, 15, 15))\n #time_pic = screen_font.render(str(round(time_o_clock,2)), True, (0,0,0))\n #screen.blit(time_pic, (750, 20, 15, 15))\n #zeitraffer_pic = screen_font.render(\"Zeitraffer(Click): \" + str(zeitraffer), True, (0,0,0))\n #screen.blit(zeitraffer_pic, (700, 40, 15, 15))" }, { "alpha_fraction": 0.6471264362335205, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 22.54054069519043, "blob_id": "9b5c57ff3ef8b6df555c5f9861d462e035dc0063", "content_id": "31e139f407a6411d0de65b9d9b42b789103fc211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 56, "num_lines": 37, "path": "/Dorfsim.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport pygame, random, dorflib, Panel, Input\nfrom dorflib import *\nfrom pygame.locals import *\n\nSCREEN_SIZE = (1000, 600)\n\ndef run():\n pygame.init()\n screen = pygame.display.set_mode(SCREEN_SIZE, 0, 32)\n pygame.display.set_caption('Dorfsimulator')\n world = World(SCREEN_SIZE)\n panel = Panel.StatusPanel(world)\n input_checker = Input.InputChecker()\n clock = pygame.time.Clock()\n world.populate()\n\n while True:\n\n for event in pygame.event.get():\n if event.type == QUIT:\n\treturn\n\n time_passed = clock.tick(30)\n \n input_checker.check_input(world, panel, time_passed)\n world.process(time_passed)\n\n screen.set_clip(0, panel.height, *SCREEN_SIZE)\n world.render(screen)\n screen.set_clip(0, 0, SCREEN_SIZE[0], panel.height)\n panel.draw_panel(screen)\n pygame.display.update()\n\nif __name__ == \"__main__\":\n run()" }, { "alpha_fraction": 0.6283501982688904, "alphanum_fraction": 0.6515783071517944, "avg_line_length": 31.30769157409668, "blob_id": "0ffcc265ec2b678e46db1cddebb505eadd62e2ef", "content_id": "c01f26cd65d3c891574a40fb2b59e99a8075c472", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1679, "license_type": "no_license", "max_line_length": 98, "num_lines": 52, "path": "/Input.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport pygame, sys\nfrom pygame.locals import *\n\nclass InputChecker():\n def __init__(self):\n self.last_pressed_cooldown = 0\n\n def check_input(self, world, panel, time_passed):\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[K_UP]:\n world.camera_y += 10\n if pressed_keys[K_DOWN]:\n world.camera_y -= 10\n if pressed_keys[K_LEFT]:\n world.camera_x += 10\n if pressed_keys[K_RIGHT]:\n world.camera_x -= 10\n if self.last_pressed_cooldown > 0:\n self.last_pressed_cooldown -= time_passed\n if pygame.mouse.get_pressed()[0] and self.last_pressed_cooldown <= 0:\n self.last_pressed_cooldown = 100\n mouse_x = pygame.mouse.get_pos()[0]\n mouse_y = pygame.mouse.get_pos()[1]\n if mouse_x >= panel.menu_start:\n\tif mouse_y <= 20:\n\t exit_game()\n\telif mouse_y <= 40:\n\t panel.displayed_type = \"animal\"\n\t panel.displayed_entity_number = 0\n\telif mouse_y <= 60:\n\t panel.displayed_type = \"site\"\n\t panel.displayed_entity_number = 0\n\telif mouse_y <= 80:\n\t panel.displayed_type = \"human\"\n\t panel.displayed_entity_number = 0\n\telif mouse_y <= 100:\n\t if panel.status_display_mode == \"list\":\n\t panel.status_display_mode = \"single\"\n\t else:\n\t panel.status_display_mode = \"list\"\n\telif mouse_y <= 120:\n\t if mouse_x <= panel.menu_start + 15:\n\t panel.displayed_entity_number -= 1 \n\t panel.displayed_entity_number = panel.displayed_entity_number % len(panel.displayed_entities)\n\t else:\n\t panel.displayed_entity_number += 1 % len(panel.displayed_entities)\n\t panel.displayed_entity_number = panel.displayed_entity_number % len(panel.displayed_entities)\n\t \ndef exit_game():\n sys.exit()" }, { "alpha_fraction": 0.6480622887611389, "alphanum_fraction": 0.6610632538795471, "avg_line_length": 31.462366104125977, "blob_id": "42b15d79a01c7600d639f9244fa5aef70fa3ef36", "content_id": "38b7e823173ab28e1f2daf0f929b6c9b500d2eda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12076, "license_type": "no_license", "max_line_length": 126, "num_lines": 372, "path": "/dorflib.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": " # -*- coding: utf-8 -*-\n\nimport pygame, random, sys, Vector2, Panel\nfrom Vector2 import *\nimport Statemachine\nfrom Statemachine import *\nimport Projects\nfrom Projects import *\n\nclass World(object):\n def __init__(self, SCREEN_SIZE):\n self.entities={}\n self.add_entity_cache=[]\n self.entity_id = 0\n self.SCREEN_SIZE = SCREEN_SIZE\n self.background = pygame.surface.Surface(SCREEN_SIZE).convert()\n self.background.fill((255,255,255))\n self.camera_x = self.camera_y = 0\n self.indicator_symb = pygame.image.load(\"pics/indicator.png\").convert_alpha()\n\n def add_entity(self, entity):\n self.entities[self.entity_id] = entity\n entity_id = self.entity_id\n self.entity_id += 1\n\n def remove_entity(self, entity):\n del self.entities[entity_id]\n\n def get(self, entity_id):\n if entity_id in self.entities:\n return self.entities[entity_id]\n else:\n return None\n\n def process(self, time_passed):\n self.time_passed_seconds = time_passed / 1000.0\n self.time_passed = time_passed\n for entity in self.entities.itervalues():\n entity.process(self.time_passed_seconds)\n for entity in self.add_entity_cache:\n self.add_entity(entity)\n self.add_entity_cache=[]\n\n def render(self, surface):\n surface.blit(self.background, (0,0))\n for entity in self.entities.itervalues():\n entity.render(surface)\n if entity.indicated == True:\n\tself.indicate(entity, surface, self.indicator_symb)\n\n def indicate(self, entity, surface, indicator):\n x = entity.location.x + entity.world.camera_x\n y = entity.location.y + entity.world.camera_y\n w, h = self.indicator_symb.get_size()\n surface.blit(indicator, (x-w/2, y-h/2 - h))\n\n def get_close_entity(self, name, location, range=100.):\n for entity in self.entities.itervalues():\n if entity.typestring == name:\n\tdistance = location.get_distance_to(entity.location)\n\tif distance < range:\n\t return entity\n return None\n\n def closest_entity_in_list(self, animal, entity_list):\n nearest_entity = None\n min_distance = -1\n for entity in entity_list:\n distance = entity.location.get_distance_to(animal.location)\n if distance <= min_distance or min_distance == -1:\n\tmin_distance = distance\n\tnearest_entity = entity\n return nearest_entity\n\n def entities_of_type(self, type):\n returnlist = []\n for entity in self.entities.itervalues():\n if entity.typestring == type:\n\treturnlist.append(entity)\n return returnlist\n\n def populate(self):\n w, h = self.SCREEN_SIZE\n picfolder=\"pics/\"\n animal_image = pygame.image.load(picfolder + \"bear.png\").convert_alpha()\n site_image = pygame.image.load(picfolder + \"ziel.png\").convert_alpha()\n human_image = pygame.image.load(picfolder + \"person.png\").convert_alpha()\n\n for site in xrange(1):\n site = Farm(self)\n site.location = Vector2(random.randint(0,w), random.randint(0,h))\n self.add_entity(site)\n\n for site in xrange(3):\n site = Woods(self, site_image)\n site.location = Vector2(random.randint(0,w), random.randint(0,h))\n self.add_entity(site)\n\n for animal_no in xrange(3):\n animal = Animal(self, animal_image)\n animal.location = Vector2(random.randint(0,w), random.randint(0,h))\n animal.brain.set_state(\"exploring\")\n self.add_entity(animal)\n\n for human_name in ['Bob', 'Mary', 'Paula', 'Ariel', 'Moore']:\n human = Human(self, human_image, human_name)\n human.location = Vector2(random.randint(0,w), random.randint(0,h))\n human.brain.set_state(\"idle\")\n self.add_entity(human)\n\nclass Perception(World):\n def __init__(self, animal):\n SCREEN_SIZE=(100, 60)\n #perception_size = (self.animal.sightrange)\n World.__init__(self, SCREEN_SIZE)\n self.animal = animal\n self.possible_targets = []\n\n def am_attacked(self):\n if self.am_seeked_by() == None:\n return False\n return True\n\n def am_seeked_by(self):\n for entity in self.entities.itervalues():\n different = entity.typestring != self.animal.typestring\n aggressive = entity.appearance == \"aggressive\"\n if entity != self and different and aggressive:\n\treturn entity\n return None\n\n def percieve(self):\n self.entities = {}\n entity_names = []\n self.possible_targets = []\n for entity in self.animal.world.entities.itervalues():\n if self.percieves_entity(entity):\n\tself.add_entity(entity)\n\tentity_names.append(entity.name)\n\tif entity.typestring != self.animal.typestring and entity.alive:\n\t self.possible_targets.append(entity)\n return entity_names\n #self.percieved_time = time_of_day\n\n def setmood(self, being):\n if being.hunger <= 2:\n being.mood = Mood(\"content\")\n if being.hunger >= 12:\n being.mood = Mood(\"hungry\")\n for other in being.percieved_objects:\n if other.appearance.name == \"aggressive\" and other.name != being.name:\n\tbeing.interrupted = True\n\tbeing.percieved_threats.append(other)\n\tif being.personality.name == \"aggressive\":\n\t being.mood = Mood(\"aggressive\")\n\telse:\n\t being.mood = Mood(\"in fear\")\n\n def percieves_entity(self, entity):\n if self.animal.location.get_distance_to(entity.location) <= 50.:\n return True\n else:\n return False\n\nclass GameEntity(object):\n def __init__(self, world, name, image):\n self.world = world\n self.name = name\n self.image = image\n self.state_icon = None\n self.location = Vector2(0,0)\n self.destination = Vector2(0,0)\n self.speed = 0\n self.brain = StateMachine(self)\n self.perception = Perception(self)\n self.id = 0\n self.indicated = False\n self.appearance = \"\"\n self.alive = False\n self.attack_cooldown = 200\n self.attack_cooldown_timer = 0\n\n def __str__(self):\n return self.name + \" \" + str((round(self.location.x), round(self.location.y)))\n\n def description_list(self):\n return [self.__str__()]\n\n def render(self, surface):\n x = self.location.x + self.world.camera_x\n y = self.location.y + self.world.camera_y\n w, h = self.image.get_size()\n surface.blit(self.image, (x-w/2, y-h/2))\n\n if self.state_icon:\n surface.blit(self.state_icon, (x-w , y-h))\n\n def process(self, time_passed):\n\n self.perception.percieve()\n self.brain.think()\n \n if self.attack_cooldown_timer > 0:\n self.attack_cooldown_timer -= time_passed * 100\n if self.speed > 0 and self.location != self.destination:\n vec_to_destination = self.destination - self.location\n distance_to_destination = vec_to_destination.get_magnitude()\n heading = vec_to_destination.normalize()\n travel_distance = min(distance_to_destination, time_passed * self.speed)\n self.location += heading * travel_distance\n\nclass Animal(GameEntity):\n def __init__(self, world, image, *name):\n GameEntity.__init__(self, world, \"animal\", image)\n exploring_state = AnimalStateExploring(self)\n seeking_state = AnimalStateSeeking(self)\n going_home_state = AnimalStateGoingHome(self)\n hunting_state = AnimalStateHunting(self)\n dead_state = AnimalStateDead(self)\n idle_state = AnimalStateIdle(self)\n\n self.brain.add_state(exploring_state)\n self.brain.add_state(seeking_state)\n self.brain.add_state(going_home_state)\n self.brain.add_state(hunting_state)\n self.brain.add_state(dead_state)\n self.brain.add_state(idle_state)\n\n self.mood = \"content\"\n self.appearance = \"calm\"\n self.home = Vector2(10,10)\n self.carried = {}\n self.carry_image = None\n self.alive = True\n self.normal_speed = 80\n\n self.typestring = \"animal\"\n\n self.target = None\n\n self.attack_modifier = 1\n self.attack_damage_dice_type = 6\n self.attack_damage_dice_count = 1\n self.attack_damage_plus = 0\n self.attack_cooldown = 200\n self.attack_target = None\n self.defence = 10\n self.hp = self.maxhp = 10\n\n def render(self, surface):\n GameEntity.render(self, surface)\n\n #------------- draw health bar -------------\n x = self.location.x + self.world.camera_x\n y = self.location.y + self.world.camera_y\n w, h = self.image.get_size()\n bar_x = x - w/2\n bar_y = y + h/2\n surface.fill( (255, 0, 0), (bar_x, bar_y, w, 3))\n surface.fill( (0, 255, 0), (bar_x, bar_y, w * self.hp / self.maxhp, 3))\n\n if self.carry_image:\n surface.blit(self.carry_image, (x-w , y -h))\n \n def __str__(self):\n return self.name + \" \" + str((round(self.location.x), round(self.location.y))) + \" state: \" + self.brain.active_state.name\n\n def description_list(self):\n if self.target == None:\n target_name = \"None\"\n else:\n target_name = self.target.name\n string0 = self.__str__()\n string1 = str(self.hp) + \"/\" + str(self.maxhp) + \" hp \"\n string2 = \"State: \" + self.brain.active_state.name + \" Target: \" + target_name\n string3 = \"carried: \" + str(self.carried)\n string4 = \"percieves: \" + str(self.perception.percieve())\n return [string0, string1, string2, string3, string4]\n\nclass Human(Animal):\n def __init__(self, world, image, name):\n Animal.__init__(self, world, image, name)\n self.name = name\n self.typestring = \"human\"\n\n self.defence = 17 #testzwecke\n self.hp = self.maxhp = 20\n\n self.projectmachine = ProjectMachine()\n working_state = HumanStateWorking(self)\n going_to_work_state = HumanStateGoingToWork(self)\n idle_state = HumanStateIdle(self)\n\n build_project = BuildProject(self, Farm(self.world))\n finding_constr_place_state = HumanStateFindingConstructionPlace(self)\n constructing_state = HumanStateConstructing(self)\n bringing_ressources_state=HumanStateBringingRessources(self)\n\n self.brain.add_state(working_state)\n self.brain.add_state(going_to_work_state)\n self.brain.add_state(idle_state)\n\n self.projectmachine.add_project(build_project)\n self.brain.add_state(finding_constr_place_state)\n self.brain.add_state(constructing_state)\n self.brain.add_state(bringing_ressources_state)\n\n self.home = Vector2(100,100)\n\n def __str__(self):\n return self.name + \" \" + str((round(self.location.x), round(self.location.y))) + \" state: \" + self.brain.active_state.name\n\n def pick_up_ressource(self, ressource, site):\n self.carried[ressource] = 1\n site.ressources[ressource] -= 1\n self.carry_image = pygame.image.load(\"pics/paket.png\").convert_alpha()\n\n def drop_all_ressources(self, site):\n for ressource in self.carried.iterkeys():\n site.increase_ressource(ressource,1)\n self.carried = {}\n self.carry_image = None\n\nclass Site(GameEntity):\n def __init__(self, world, image):\n GameEntity.__init__(self, world, \"site\", image)\n self.typestring = \"site\"\n self.finished = True\n self.completion = 100\n self.produced_ressources = \"\"\n self.ressources ={}\n self.production_delay = 0.0\n\n def __str__(self):\n return self.name + \" \" + str((round(self.location.x), round(self.location.y))) + \" Ressources: \" + str(self.ressources)\n\n def description_list(self):\n string0 = self.__str__()\n string1 = \"Completion: \" + str(self.completion) + \"/\" + str(100)\n string2 = \"Ressources at Site: \" + str(self.ressources)\n return [string0, string1, string2]\n\n def check_ressource(self, ressource):\n if self.ressources.keys().count(ressource) != 0:\n return self.ressources[ressource]\n else:\n return 0\n\n def increase_ressource(self, ressource, value):\n if self.ressources.keys().count(ressource) == 0:\n self.ressources[ressource] = 0\n self.ressources[ressource] += value\n\nclass Farm(Site):\n def __init__(self, world):\n self.image = pygame.image.load(\"pics/ziel.png\").convert_alpha()\n Site.__init__(self, world, self.image)\n self.produced_ressources = \"wheat\"\n self.name = \"Farm\"\n self.production_delay = 200\n self.needed_ressources = {\"lumber\":10, \"wheat\":2}\n self.finished = True\n\nclass Woods(Site):\n def __init__(self, world, image):\n Site.__init__(self, world, image)\n self.produced_ressources = \"lumber\"\n self.ressources ={\"lumber\":2}\n self.image = pygame.image.load(\"pics/woods.png\").convert_alpha()\n self.production_delay = 300\n self.finished = True\n self.name = \"Woods\"" }, { "alpha_fraction": 0.5985915660858154, "alphanum_fraction": 0.6056337952613831, "avg_line_length": 19.428571701049805, "blob_id": "4fcdaa36b6b4b9ef156f2d587180748aead6376f", "content_id": "3e1e6efd0886c7d9d6bcdf36e18ecf3cbd100351", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 31, "num_lines": 7, "path": "/knowledge.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nclass Knowledge(object):\n def __init__(self):\n pass\nclass Building_Plan(Knowledge):\n def __init__(self):\n pass" }, { "alpha_fraction": 0.6569122076034546, "alphanum_fraction": 0.6639757752418518, "avg_line_length": 25.105262756347656, "blob_id": "c1dc802c88396b2c73eae88962ccac1228d07b52", "content_id": "ef1ca6faaedccfd73e8a20be4bc2719ebf7ebab8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 991, "license_type": "no_license", "max_line_length": 76, "num_lines": 38, "path": "/perception.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport dorflib, Vector2\n\nclass Perception(World):\n def __init__(self, human):\n self.human = human\n\n def percieve(self):\n pass\n\n def is_interrupted(self):\n pass\n\n def set_perception(self):\n for entity in self.human.world.enitities.itervalues():\n if self.percieves_entity(entity):\n\tself.add_entity(entity)\n #self.percieved_time = time_of_day\n\n def setmood(self, being):\n if being.hunger <= 2:\n being.mood = Mood(\"content\")\n if being.hunger >= 12:\n being.mood = Mood(\"hungry\")\n for other in being.percieved_objects:\n if other.appearance.name == \"aggressive\" and other.name != being.name:\n\tbeing.interrupted = True\n\tbeing.percieved_threats.append(other)\n\tif being.personality.name == \"aggressive\":\n\t being.mood = Mood(\"aggressive\")\n\telse:\n\t being.mood = Mood(\"in fear\")\n\n def percieves_entity(self, entity):\n if self.human.location.get_distance(entity.location) <= 50.:\n return True\n else:\n return False" }, { "alpha_fraction": 0.680907130241394, "alphanum_fraction": 0.6877744793891907, "avg_line_length": 32.308509826660156, "blob_id": "4bb73382a5ffb6d2283c77e9e5e1ad5a20cacf93", "content_id": "8d7e39183526a02bfd6069c79f9b6e28235405ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12523, "license_type": "no_license", "max_line_length": 116, "num_lines": 376, "path": "/Statemachine.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport Vector2, random, dorflib, pygame\nfrom Vector2 import *\n\nclass State(object):\n def __init__(self, name):\n self.name = name\n\n def do_actions(self):\n pass\n \n def check_conditions(self):\n pass\n\n def entry_actions(self):\n pass\n\n def exit_actions(self):\n pass\n\nclass StateMachine(object):\n def __init__(self, animal):\n self.states = {}\n self.active_state = None\n self.animal = animal\n \n def add_state(self, state):\n self.states[state.name] = state\n\n def think(self):\n if self.active_state is None:\n return\n self.active_state.do_actions()\n new_state_name = self.active_state.check_conditions()\n if new_state_name is not None:\n self.set_state(new_state_name)\n\n def set_state(self, new_state_name):\n if self.active_state is not None:\n self.active_state.exit_actions()\n self.active_state = self.states[new_state_name]\n self.active_state.entry_actions()\n\n#-------------- DEFINED STATES --------------------\n\nclass AnimalStateIdle(State):\n def __init__(self, animal):\n State.__init__(self, \"idle\")\n self.animal = animal\n\n def check_conditions(self):\n return \"exploring\"\n\nclass AnimalStateExploring(State):\n def __init__(self, animal):\n State.__init__(self, \"exploring\")\n self.animal = animal\n\n def random_destination(self):\n w, h = self.animal.world.SCREEN_SIZE\n self.animal.destination = Vector2(random.randint(0,w), random.randint(0,h))\n\n def entry_actions(self):\n self.animal.speed = 80\n self.random_destination()\n\n def do_actions(self):\n if random.randint(1,20) == 1:\n self.random_destination()\n\n def check_conditions(self):\n if self.animal.perception.am_attacked():\n self.animal.target = self.animal.perception.am_seeked_by()\n return \"hunting\"\n\n if len(self.animal.perception.possible_targets) > 0:\n target = random.choice(self.animal.perception.possible_targets)\n if not target == None and target != self.animal:\n\tself.animal.target = target\n\treturn \"seeking\"\n\nclass AnimalStateSeeking(State):\n def __init__(self, animal):\n State.__init__(self, \"seeking\")\n self.animal = animal\n self.target = None\n\n def entry_actions(self):\n self.animal.speed = 110\n self.target = self.animal.target\n self.animal.appearance = \"aggressive\"\n self.animal.destination = self.target.location\n\n def check_conditions(self):\n if self.animal.location.get_distance_to(self.animal.destination) <= 10:\n return \"hunting\"\n\n def do_actions(self):\n self.animal.destination = self.target.location\n\nclass AnimalStateGoingHome(State):\n def __init__(self, animal):\n State.__init__(self, \"going home\")\n self.animal = animal\n\n def entry_actions(self):\n self.animal.speed = 60\n self.animal.destination = self.animal.home\n\n def check_conditions(self):\n if self.animal.perception.am_attacked():\n\tself.animal.target = self.animal.perception.am_seeked_by()\n\treturn \"hunting\"\n\nclass AnimalStateDelivering(State):\n pass\n\nclass AnimalStateDead(State):\n def __init__(self, animal):\n State.__init__(self, \"dead\")\n self.animal = animal\n\n def entry_actions(self):\n self.animal.speed = 0\n self.animal.appearance = \"dead\"\n self.animal.image = pygame.transform.flip(self.animal.image, 0, 1)\n print str(self.animal.name) + \" died.\"\n self.animal.alive = False\n\nclass AnimalStateHunting(State):\n def __init__(self, animal):\n State.__init__(self, \"hunting\")\n self.animal = animal\n\n def entry_actions(self):\n self.animal.speed = 180\n self.target = self.animal.target\n self.animal.destination = self.target.location\n self.animal.apperance = \"aggressive\"\n self.animal.state_icon = pygame.image.load(\"pics/hunting.png\")\n\n def do_actions(self):\n self.animal.destination = self.target.location\n if self.animal.location.get_distance_to(self.target.location) <= 5.0:\n if self.animal.attack_cooldown_timer <= 0:\n\tself.attack(self.target)\n \n def check_conditions(self):\n if not self.target.alive:\n return \"idle\"\n\n def exit_actions(self):\n self.animal.appearance = \"calm\"\n self.animal.target = None\n self.animal.speed = self.animal.normal_speed\n self.animal.state_icon = None\n\n def attack(self, target):\n self.animal.attack_cooldown_timer = self.animal.attack_cooldown\n damage = self.animal.attack_damage_plus\n if self.animal.location.get_distance_to(self.target.location) <= 5.0:\n if random.randint(1,20) + self.animal.attack_modifier >= self.target.defence:\n\tfor i in range(self.animal.attack_damage_dice_count):\n\t damage += random.randint(1,self.animal.attack_damage_dice_type)\n\ttarget.hp -= damage\n\tprint self.animal.name + \" hits \" + target.name + \" for \" + str(damage) + \" HP. (\" + str(target.hp) + \" HP left.) \"\n\tif target.hp <= 0:\n\t target.brain.set_state(\"dead\")\n else:\n\tprint self.animal.name + \" misses \" + target.name\n else:\n print \"target out of range.\"\n\n#-------------------- HUMAN STATES ---------------------------\n#------------------ Project States -------------------------\n\nclass HumanStateWorking(State):\n def __init__(self, human):\n State.__init__(self, \"working\")\n self.human = human\n self.production_increase_timer = 0\n\n def entry_actions(self):\n self.human.speed = 0\n self.human.state_icon = pygame.image.load(\"pics/working.png\").convert_alpha()\n #self.human.mood = \"annoyed\"\n self.production_increase_timer = self.human.workplace.production_delay\n self.product = self.human.workplace.produced_ressources\n\n def do_actions(self):\n self.production_increase_timer -= self.human.world.time_passed_seconds * 100\n if self.production_increase_timer <= 0:\n self.production_increase_timer = self.human.workplace.production_delay\n self.human.workplace.increase_ressource(self.product, 1)\n\n def check_conditions(self):\n if self.human.perception.am_attacked():\n self.human.target = self.human.perception.am_seeked_by()\n return \"hunting\"\n if self.human.workplace.check_ressource(self.product) >= 5:\n return \"idle\"\n\n def exit_actions(self):\n self.human.state_icon = None\n\nclass HumanStateGoingToWork(State):\n def __init__(self, human):\n State.__init__(self, \"going to work\")\n self.human = human\n\n def entry_actions(self):\n self.project = self.human.projectmachine.active_project\n self.human.speed = 100\n self.human.workplace = random.choice(self.human.world.entities_of_type(\"site\"))\n\n def do_actions(self):\n if self.project != None:\n if self.project.name == \"build project\":\n\tself.project.set_ressource_producing_sites()\n\tsites = self.project.ressource_producing_sites\n\tworld = self.human.world\n\tself.human.workplace = world.closest_entity_in_list(self.human, sites)\n self.human.destination = self.human.workplace.location\n\n def check_conditions(self):\n if self.human.perception.am_attacked():\n self.human.target = self.human.perception.am_seeked_by()\n return \"hunting\"\n if self.human.mood == \"content\" and self.human.location.get_distance_to(self.human.destination) <= 10:\n if self.project != None:\n\tfor ressource in self.human.projectmachine.active_project.needed_ressources.iterkeys():\n\t if self.human.workplace.check_ressource(ressource) >= 1 :\n\t return \"bringing ressources\"\n return \"working\"\n\nclass HumanStateIdle(State):\n def __init__(self, human):\n State.__init__(self, \"idle\")\n self.human = human\n\n def entry_actions(self):\n self.project = self.human.projectmachine.active_project\n\n def check_conditions(self):\n if self.human.perception.am_attacked():\n self.human.target = self.human.perception.am_seeked_by()\n return \"hunting\"\n if self.human.mood == \"content\":\n if self.project == None:\n\tif random.randint(1,100) <= 50:\n\t self.human.projectmachine.set_project(\"build project\")\n if self.human.projectmachine.active_project != None:\n\treturn self.human.projectmachine.active_project.needed_actions[0]\n\n #def decide_project #!!!\n # pass\n\nclass HumanStateFindingConstructionPlace(State):\n def __init__(self, human):\n State.__init__(self, \"finding construction place\")\n self.human = human\n\n def random_destination(self):\n w, h = self.human.world.SCREEN_SIZE\n self.human.destination = Vector2(random.randint(0,w), random.randint(0,h))\n\n def entry_actions(self):\n self.human.speed = 80\n self.random_destination()\n self.project = self.human.projectmachine.active_project\n self.site = self.project.site\n\n def check_conditions(self):\n if self.human.perception.am_attacked():\n self.human.target = self.human.perception.am_seeked_by()\n return \"hunting\"\n if self.human.location.get_distance_to(self.human.destination) <= 1.0:\n self.project.set_construction_site(self.human.destination)\n return \"constructing\"\n\nclass HumanStateConstructing(State):\n def __init__(self, human):\n State.__init__(self, \"constructing\")\n self.human = human\n\n def entry_actions(self):\n self.project = self.human.projectmachine.active_project\n self.site = self.project.site\n self.human.destination = self.site.location\n\n def do_actions(self):\n arrived = self.human.location.get_distance_to(self.human.destination) <= 1.0\n if arrived and self.project.ressources_delivered():\n self.site.completion += 0.02 * self.human.world.time_passed\n if self.site.completion >= 100:\n self.site.completion = 100\n self.site.finished = True\n self.human.projectmachine.active_project = None\n\n def check_conditions(self):\n if self.human.perception.am_attacked():\n self.human.target = self.human.perception.am_seeked_by()\n return \"hunting\"\n if self.site.finished:\n self.site.ressources = {} # muss auch noch anders\n return \"going to work\"\n arrived = self.human.location.get_distance_to(self.human.destination) <= 1.0\n if arrived and not self.project.ressources_delivered():\n return \"bringing ressources\"\n\nclass HumanStateBringingRessources(State):\n def __init__(self, human):\n State.__init__(self, \"bringing ressources\")\n self.human = human\n\n def entry_actions(self):\n self.human.speed = self.human.normal_speed\n self.project = self.human.projectmachine.active_project\n self.site = self.project.site\n self.ressource_site = None\n\n self.choose_ressource_site()\n #if len(self.human.carried) == 0:\n #self.human.destination = self.ressource_site.location\n #else:\n #self.human.destination = self.site.location\n\n def choose_ressource_site(self):\n self.project.set_next_ressource()\n self.project.define_ressource_sites(self.project.next_ressource)\n self.ressource_site = self.human.world.closest_entity_in_list(self.human, self.project.ressource_sites)\n\n def do_actions(self):\n distance_to_site = self.human.location.get_distance_to(self.site.location)\n \n if not self.ressource_site == None:\n if self.ressource_site.check_ressource(self.project.next_ressource) <= 0:\n\tself.project.ressource_sites.remove(self.ressource_site)\n\tself.choose_ressource_site()\n\n if len(self.human.carried) >= 1:\n self.human.destination = self.site.location\n if distance_to_site <= 1:\n\tself.human.drop_all_ressources(self.site)\n\n if len(self.human.carried) <= 0:\n self.choose_ressource_site()\n if not self.ressource_site == None:\n\tdistance_to_ressource = self.human.location.get_distance_to(self.ressource_site.location)\n\tself.human.destination = self.ressource_site.location\n\tif distance_to_ressource <= 1 and self.ressource_site.check_ressource(self.project.next_ressource) >= 1:\n\t self.human.pick_up_ressource(self.project.next_ressource, self.ressource_site)\n\n def check_conditions(self):\n if self.human.perception.am_attacked():\n self.human.target = self.human.perception.am_seeked_by()\n return \"hunting\"\n if self.project.ressources_delivered():\n return \"constructing\"\n if len(self.human.carried) <= 0 and len(self.project.ressource_sites) == 0:\n return \"going to work\"\n\n#not implemented\nclass HumanStateFindingRessources(State):\n def __init__(self, human):\n State.__init__(self, \"checking ressources\")\n self.human = human\n\n def entry_actions(self):\n ressource_name = self.human.project_machine.active_project.needed_ressource_name\n ressource_count = self.human.project_machine.active_project.needed_ressource_count\n\n def check_conditions(self):\n if destination.ressource_name < destination.ressource_count[ressource_name]:\n return \"bringing ressources\"" }, { "alpha_fraction": 0.6967324614524841, "alphanum_fraction": 0.6987746953964233, "avg_line_length": 30.26595687866211, "blob_id": "136942fa837978a19b024fab122e1043d34b02b2", "content_id": "63eee91a671139fed41816ec342c56bd22426f2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2938, "license_type": "no_license", "max_line_length": 95, "num_lines": 94, "path": "/Projects.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport dorflib, pygame\n\nclass ProjectMachine(object):\n def __init__(self):\n self.projects = {}\n self.active_project = None\n\n def add_project(self, project):\n self.projects[project.name] = project\n\n def set_project(self, new_project_name):\n if self.active_project is not None:\n self.active_project.exit_actions()\n self.active_project = self.projects[new_project_name]\n self.active_project.entry_actions()\n\nclass Project(object):\n def __init__(self, name):\n self.needed_ressources = {}\n self.name = name\n self.site = None\n\n def entry_actions(self):\n pass\n\n def exit_actions(self):\n pass\n\n def add_needed_ressource(self):\n pass\n\nclass BuildProject(Project):\n def __init__(self, human, building):\n Project.__init__(self, \"build project\")\n picfolder=\"pics/\"\n self.site_image = pygame.image.load(picfolder + \"construction_site.png\").convert_alpha()\n self.human = human\n self.building = building\n self.site = None\n self.ressource_sites = []\n self.ressource_producing_sites = []\n self.next_ressource = \"\"\n\n def entry_actions(self):\n self.needed_actions = [\"finding construction place\", \"bringing ressources\", \"constructing\"]\n self.needed_ressources = self.building.needed_ressources\n\n def ressources_delivered(self):\n if not self.site == None:\n enough_ressources = True\n for ressource in self.needed_ressources.iterkeys():\n\tif self.needed_ressources[ressource] > self.site.check_ressource(ressource):\n\t enough_ressources = False\n return enough_ressources\n\n def set_construction_site(self, destination):\n self.site = dorflib.Site(self.human.world, self.site_image)\n self.site.location = self.human.location\n self.site.name = \"construction site\"\n self.site.completion = 0\n self.site.finished = False\n self.human.world.add_entity_cache.append(self.site)\n del self.needed_actions[0]\n\n def finish_building(self):\n pass\n\n def set_next_ressource(self):\n for ressource in self.needed_ressources.iterkeys():\n if self.needed_ressources[ressource] > self.site.check_ressource(ressource):\n\tif self.ressource_available(ressource):\n\t self.next_ressource = ressource\n\n def define_ressource_sites(self, ressource):\n for site in self.human.world.entities_of_type(\"site\"):\n if site.check_ressource(ressource) >= 1 and site != self.site and site.finished:\n\tself.ressource_sites.append(site)\n\n def set_ressource_producing_sites(self):\n for site in self.human.world.entities_of_type(\"site\"):\n if site.produced_ressources in self.needed_ressources:\n\tself.ressource_producing_sites.append(site)\n\n def ressource_available(self, ressource):\n added_ressources = 0\n for site in self.human.world.entities_of_type(\"site\"):\n if ressource in site.ressources:\n\tadded_ressources += site.ressources[ressource]\n if added_ressources >= 5:\n return True\n else:\n return False" }, { "alpha_fraction": 0.6678500771522522, "alphanum_fraction": 0.6721893548965454, "avg_line_length": 39.90322494506836, "blob_id": "74a55bf503679a4e4dd4d2197091b66ce6291784", "content_id": "7f2ef010235fb2eed8352d227073ee54790212b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2535, "license_type": "no_license", "max_line_length": 96, "num_lines": 62, "path": "/Statmachineadditives.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n def define_state(self, being, time_of_day, dorfactive):\n if being.present_project.name == \"go to sleep\":\n being.go_to(being.home)\n if being.arrived():\n\tbeing.state = \"sleeping\"\n\tbeing.mood = Mood(\"content\")\n elif being.present_project.name == \"hide\":\n being.go_to(being.home)\n being.mood.cooldown = 50\n if being.arrived():\n\tbeing.state = \"hiding\"\n elif being.present_project.name == \"working\":\n being.go_to(being.workplace)\n if being.arrived():\n\tbeing.state = \"working\"\n elif being.present_project.name == \"get food\":\n being.go_to(being.home)\n if being.arrived():\n\t being.state = \"eating\"\n elif being.present_project.name == \"wandering\" and being.arrived():\n being.go_to(random.choice(dorfactive.locdict.values()))\n elif being.present_project.name == \"build windmill\":\n being.go_to(Vector2.Vector2(*being.present_project.destination))\n if being.arrived():\n\tbeing.state = \"constructing windmill\"\n elif being.present_project.name == \"attack threat\":\n if len(being.percieved_threats) >= 1:\n\tbeing.attack_target = being.percieved_threats[random.randint(0,len(being.percieved_threats)-1)]\n being.go_to(being.attack_target)\n if being.arrived():\n\tbeing.state = \"fighting\"\n\n #valid next_actions as planned: go_to, find, get, persuade, pursue, ...\n def decide_present_project(self, being, time_of_day, dorfactive):\n if time_of_day == \"night\" and len(being.percieved_threats) == 0:\n being.present_project = Project(\"go to sleep\")\n elif time_of_day == \"day\" and being.mood.name == \"content\":\n if isinstance(being, dorflib.Person):\n\tif being.profession == \"Lumberjack\" and dorfactive.locdict['Woods'].product_count >= 20:\n\t being.present_project = Project(\"build windmill\")\n\t being.present_project.destination = (dorfactive.windmill_x, dorfactive.windmill_y)\n\telif being.profession == \"soldier\":\n\t being.present_project = Project(\"wandering\")\n\telse:\n\t being.present_project = Project(\"working\")\n else:\n\tbeing.present_project = Project(\"wandering\")\n elif being.mood.name == \"in fear\":\n being.present_project = Project(\"hide\")\n elif being.mood.name == \"hungry\":\n being.present_project = Project(\"get food\")\n elif being.mood.name == \"aggressive\":\n being.present_project = Project(\"attack threat\")\n\n\nclass HumanStateWorking(State):\n def __init__(self, human):\n State.__init__(self, \"working\")\n self.human = human\n def do_actions(self):\n self.human.state = \"working\"" }, { "alpha_fraction": 0.5546296238899231, "alphanum_fraction": 0.5805555582046509, "avg_line_length": 21.978723526000977, "blob_id": "2c86aaf5aa5f1805ce4c74c25a840f61f9d8884f", "content_id": "fede231d633083732f7da51bd16f2622ad5c6af0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 62, "num_lines": 47, "path": "/Vector2.py", "repo_name": "A5308Y/Dorfsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport math\n\nclass Vector2(object):\n\n def __init__(self, x=0.0, y=0.0):\n self.x = x\n self.y = y\n\n def __str__(self):\n return \"(%s, %s)\"%(self.x, self.y)\n\n @staticmethod\n def from_points(P1, P2):\n return Vector2( P2.x - P1.x, P2.y - P1.y )\n\n def get_magnitude(self):\n return math.sqrt( self.x**2 + self.y**2 )\n\n def normalize(self):\n magnitude = self.get_magnitude()\n if magnitude != 0:\n self.x /= magnitude\n self.y /= magnitude\n return Vector2(self.x, self.y)\n else:\n self.x = 0\n self.y = 0\n return Vector2(0,0)\n\n def __add__(self, rhs):\n return Vector2(self.x + rhs.x, self.y + rhs.y)\n\n def __sub__(self, rhs):\n return Vector2(self.x - rhs.x, self.y - rhs.y)\n\n def __neg__(self):\n return Vector2(-self.x, -self.y)\n\n def __mul__(self, scalar):\n return Vector2(self.x * scalar, self.y * scalar)\n\n def __div__(self, scalar):\n return Vector2(self.x / scalar, self.y / scalar)\n\n def get_distance_to(self, position):\n return Vector2.from_points(self, position).get_magnitude()\n" } ]
10