repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jelmerbergman/PythonDataScience | https://github.com/jelmerbergman/PythonDataScience | 40f49a18860249cf73c31f4eb0d8aae16c067e5d | 5300438e3e331bb191f5a6dd3834ccccd1e1a976 | 242dbcf560893133adf664f1a476a09082449e14 | refs/heads/master | 2020-12-13T12:17:14.111049 | 2020-08-18T07:53:13 | 2020-08-18T07:53:13 | 234,413,014 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6618191599845886,
"alphanum_fraction": 0.6792702078819275,
"avg_line_length": 26.413043975830078,
"blob_id": "e77c95f6e1c102d4d660444f5b8ecd642bec405b",
"content_id": "9dede89b34524816c99bb6ab59ea1ab876630688",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3782,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 138,
"path": "/Fitbit data.py",
"repo_name": "jelmerbergman/PythonDataScience",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 23 22:31:27 2020\n\n@author: jelmerbergman\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nos.chdir(\"/Users/jelmerbergman/downloads/data/Fitbit Data\")\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n\n#combine all files in the list\n#combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])\n#export to csv\n#combined_csv.to_csv( \"combined_csv.csv\", index=False, encoding='utf-8-sig')\n\n\n\"\"\"\n\"\"\"\nclass display(object):\n \"\"\"Display HTML representation of multiple objects\"\"\"\n template = \"\"\"<div style=\"float: left; padding: 10px;\">\n <p style='font-family:\"Courier New\", Courier, monospace'>{0}</p>{1}\n </div>\"\"\"\n def __init__(self, *args):\n self.args = args\n \n def _repr_html_(self):\n return '\\n'.join(self.template.format(a, eval(a)._repr_html_())\n for a in self.args)\n \n def __repr__(self):\n return '\\n\\n'.join(a + '\\n' + repr(eval(a))\n for a in self.args)\n\n\"\"\"\n\"\"\"\n\n\n\n\nFitbit1Data = pd.read_csv('/Users/jelmerbergman/downloads/data/Fitbit Data/combined_csv.csv')\nFitbit1Data['daydate'] = pd.to_datetime(Fitbit1Data['date']).dt.date\nFitbitData = Fitbit1Data[Fitbit1Data.steps != 0]\n\nFitbit1Data = Fitbit1Data.set_index((pd.DatetimeIndex(Fitbit1Data['date'])))\n#times = FitbitData.to_datetime(FitbitData.timestamp_col)\n#FitbitData.groupby([times.hour, times.minute]).steps.sum()\n#(pd.DatetimeIndex(FitbitData['date']))\n#FitbitData.groupby(FitbitData.date.hour).mean()\n\n\n\"\"\"\nOnderstaande levert een draaitabel op. Is dit handig? \n\"\"\"\ngrouper = Fitbit1Data.groupby([pd.Grouper(freq='1H'), 'fitbit_id'])\nresult = grouper['steps'].sum().unstack('fitbit_id').fillna(0)\n\n\"\"\"\nResample levert een hergroepering op op fitbitid, maar de fitbitid in de column wordt ook geaggregeerd, hoe kan die voorkomen worden? \n\"\"\"\nFitbit1DataHourly = Fitbit1Data.groupby('fitbit_id').resample('1H', label='right').sum()\n\n\n\nFitbit1DataDaily = Fitbit1Data.groupby('fitbit_id').resample('1D', label='right').sum()\n\ndef f(row):\n if row['steps'] >= 10000:\n val = 1\n \n else:\n val = 0\n return val\n\ndef f2(row):\n if row['steps'] > 0:\n val = 1\n \n else:\n val = 0\n return val\n\nFitbit1DataDaily['Goal'] = Fitbit1DataDaily.apply(f, axis=1)\n\n\n\nFitbit1DataDaily['Usable'] = Fitbit1DataDaily.apply(f2, axis=1)\n\n\n\nFitbit1DataHourly.boxplot()\n\nFitbit1DataHourly = Fitbit1DataHourly.drop(columns=[ 'fitbit_id'])\nFitbit1DataHourly.to_csv('/Users/jelmerbergman/downloads/data/Fitbit Data/combined_hourly_csv.csv')\n\nFitbit1DataHourly.boxplot(by=\"treatment_id\", column = \"steps\")\n\n\nFitbitAnalysisData = pd.read_csv('/Users/jelmerbergman/downloads/data/Fitbit Data/combined_hourly_csv.csv')\nFitbitAnalysisData = FitbitAnalysisData.drop(columns=['treatment_id', 'fitbit_id.1'])\n\n\n#Extraheer de datum uit de datetime\nFitbitAnalysisData['day_date'] = pd.to_datetime(\n FitbitAnalysisData['date'], errors='coerce'\n).dt.floor('D')\n\n\n#Kijk naar dagtotalen\nFitbitAnalysisData['zero'] = FitbitAnalysisData.groupby(['fitbit_id','day_date'])['steps'].agg('sum')\n\n\nprint(\"Start de statistiek\")\ngrp = Fitbit1DataHourly.groupby(['treatment_id']) \n\nprint (grp.max()) \nprint (grp.mean()) \nprint (grp.count()) \n\ngrph = Fitbit1DataDaily.groupby(['treatment_id']) \nprint (grph.max()) \nprint (grph.mean()) \nprint (grph.count()) \n\nprint(\"Eindig de statistiek\")\npivot = pd.pivot_table(Fitbit1DataHourly, values=[\"steps\",\"calories\",\"mets\"], index=['treatment_id'], aggfunc=np.mean)\npivot.plot\n\n#grouper = df.groupby([pd.Grouper(freq='1H'), 'Location'])\n#result = grouper['Event'].count().unstack('Location').fillna(0)"
}
] | 1 |
ravenscroftj/SmartQMPD | https://github.com/ravenscroftj/SmartQMPD | fe5638c65b46caa503b634ba9d096e60ce5798a9 | b375617cfea98acfce8b3472157b30c69c3f6bad | 45b58b74b166d5a134fc66aadb422b6d62561361 | refs/heads/master | 2021-01-01T19:11:21.621544 | 2012-10-03T11:07:02 | 2012-10-03T11:07:02 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.533502995967865,
"alphanum_fraction": 0.533502995967865,
"avg_line_length": 27.42168617248535,
"blob_id": "9956cad7c0e18949a6f6630b8f936069fd7299fd",
"content_id": "82aa8d362f10b3657bb20a99c7dfe2e3f5f53ba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2358,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 83,
"path": "/orm.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "'''\n\nDatabase stuff for the clustering application\n\n'''\nimport logging\n\nfrom sqlalchemy import create_engine, Column, Integer, String, Table, ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\ndef init(engine):\n ''' Given a db engine, create the relevent tables\n '''\n Base.metadata.create_all(engine)\n\n#---------------------------------------------------------------\n\nartist_tags = Table('artist_tags', Base.metadata,\n Column('artist_id', Integer, ForeignKey('artists.id')),\n Column('tag_id', Integer, ForeignKey('tags.id'))\n)\n\n#---------------------------------------------------------------\n\nalbum_tags = Table('album_tags', Base.metadata,\n Column('album_id', Integer, ForeignKey('albums.id')),\n Column('tag_id', Integer, ForeignKey('tags.id'))\n)\n\n#---------------------------------------------------------------\n\nsong_tags = Table('song_tags', Base.metadata,\n Column('song_id', Integer, ForeignKey('songs.id')),\n Column('tag_id', Integer, ForeignKey('tags.id'))\n)\n \n#---------------------------------------------------------------\n\nclass Tag(Base):\n \n __tablename__ = 'tags'\n \n id = Column(Integer, primary_key=True)\n tag = Column(String)\n\n#---------------------------------------------------------------\n\nclass Artist(Base):\n \n __tablename__ = 'artists'\n \n id = Column(Integer, primary_key=True)\n name = Column(String)\n songs = relationship(\"Song\", backref=\"artist\")\n albums = relationship(\"Album\", backref=\"artist\")\n tags = relationship(\"Tag\", secondary=artist_tags, backref=\"artists\")\n \n#---------------------------------------------------------------\n\nclass Album(Base):\n __tablename__ = 'albums'\n \n id = Column(Integer, primary_key=True)\n title = Column(String)\n artist_id = Column(Integer, ForeignKey('artists.id'))\n tags = relationship(\"Tag\", secondary=album_tags, backref=\"albums\")\n \n#---------------------------------------------------------------\n\nclass Song(Base):\n \n __tablename__ = 'songs'\n \n id = Column(Integer, primary_key=True)\n title = Column(String)\n artist_id = Column(Integer, ForeignKey('artists.id'))\n album_id = Column(Integer, ForeignKey('albums.id'))\n tags = relationship(\"Tag\", secondary=song_tags, backref=\"songs\")\n\n#---------------------------------------------------------------"
},
{
"alpha_fraction": 0.6159420013427734,
"alphanum_fraction": 0.6252588033676147,
"avg_line_length": 21.372093200683594,
"blob_id": "b929cf8486530387498ea51e29809f964f4405f3",
"content_id": "9af2213e3eb01c4878a752820738c7787944bbf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 43,
"path": "/main.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "import mutagen\nimport os\nimport sys\nimport logging\nimport pylast\nimport threading\nimport traceback\nfrom decision import DecisionEngine\nfrom mpdevt import PollingMPDClient, mpd_listener\n\n@mpd_listener('OnSongChange')\ndef test_listener(type, evt):\n logging.info(\"Now playing: %s - %s\", evt['title'], evt['artist'])\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n logging.info(\"Starting QMPD...\")\n \n if(len(sys.argv) > 1):\n \n if(len(sys.argv) < 3):\n port = 6600\n else:\n port = sys.argv[2]\n \n try:\n #connect mpd client\n client = PollingMPDClient()\n client.connect(host=sys.argv[1], port=port)\n \n #populate the decision element\n d = DecisionEngine(client.client)\n d.populate()\n \n l = raw_input()\n except Exception as e:\n traceback.print_exc()\n print \"exiting...\"\n \n client.running = False\n \n else:\n print \"Usage: %s <host> [port]\" % sys.argv[0]\n "
},
{
"alpha_fraction": 0.5009846687316895,
"alphanum_fraction": 0.5179204344749451,
"avg_line_length": 23.190475463867188,
"blob_id": "e9f6829975eee8e568eaeab9743a6c484bacfc06",
"content_id": "e9142a68b5ae61bcff61699a2c871bcd2488a719",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2539,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 105,
"path": "/clustertest.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "import cluster\nimport pylast\nimport os\nimport json\nimport mpd\nfrom mutagen.easyid3 import EasyID3\nimport cluster\n\nAPI_KEY = \"21e7e21f732bac4749c6deb03b902cc5\"\nAPI_SECRET = \"5d529e71dacfef10ae597a086a435afe\"\n\nnetwork = pylast.LastFMNetwork(api_key=API_KEY, api_secret=API_SECRET)\n\n#----------------------------------------------------------------------------\n\ndef gather_mpd_data( ):\n \n music_files = []\n client = mpd.MPDClient()\n client.connect(host=\"localhost\", port=6600)\n \n songs = client.playlistinfo()\n music_count = len(songs)\n print \"There are %d tracks detected\" % music_count\n \n i = 1\n for song in songs:\n print \"[%d%%] %d / %d \" % ( i*100/music_count , i, music_count )\n song['tags'] = get_tags(song['artist'], song['title'])\n i += 1\n return songs\n#----------------------------------------------------------------------------\n\ndef gather_data( path ):\n \n metadata = []\n music_files = []\n \n #walk the directory looking for music\n for root, dirs, files in os.walk(path):\n for file in files:\n if(file.endswith(\".mp3\")):\n music_files.append(os.path.join(root,file))\n\n #now do the processing\n music_count = len(music_files)\n print \"There are %d MP3 files detected\" % music_count\n\n i = 1\n for f in music_files:\n print \"[%d%%] %d / %d \" % ( i*100/music_count , i, music_count )\n metadata.append( process_file(f) )\n i += 1\n return metadata\n\n#----------------------------------------------------------------------------\n\ndef process_file( file_path ):\n '''Process a file and get metadata'''\n metadata = {}\n e = EasyID3( file_path )\n \n for key in 'title','artist','album':\n metadata[key] = e[key][0]\n \n #get the last fm data\n metadata['tags'] = get_tags(e['artist'][0], e['title'][0])\n \n return metadata\n\n#----------------------------------------------------------------------------\n\ndef get_tags(artist , title):\n \n \n l_track = network.get_track(artist, title)\n \n tags = {}\n for tag in l_track.get_top_tags():\n if(tag.weight > 20):\n tags[tag.item.name] = float(tag.weight)\n \n return tags\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n \n if not(os.path.exists(\"/tmp/testdata\")):\n data = gather_mpd_data()\n #data = gather_data(\"/home/james/tmp/\")\n \n with open(\"/tmp/testdata\",'w') as f:\n json.dump(data, f)\n \n else:\n \n with open(\"/tmp/testdata\",'r') as f:\n data = json.load(f)\n \n c = cluster.MusicClusterer()\n \n cl = c.cluster(data)\n \n c.display_songs(cl.data[0].data[0])"
},
{
"alpha_fraction": 0.45985692739486694,
"alphanum_fraction": 0.46621620655059814,
"avg_line_length": 20.69827651977539,
"blob_id": "e209b02435a3030cb9deb21f3d5be2bdd7b2ce90",
"content_id": "2cb837b9c4724dd5cdb47ae95fa7aef626fbad2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2516,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 116,
"path": "/cluster.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "import os\nimport math\nimport copy\nimport logging\n \nclass SongDict(dict): \n __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n \n def __repr__(self):\n return unicode(\"%s by %s\" % (self.title, self.artist))\n \n#---------------------------------------------------------------\nclass MusicClusterer(object):\n \n def __init__(self):\n self.logger = logging.getLogger(\"Clusterer\")\n \n #-------------------------------------------------------------\n\n def cluster(self, objects):\n \n first_clusters = []\n \n for x in objects:\n first_clusters.append(Cluster([SongDict(**x)]))\n \n return self.__cluster_impl(first_clusters)\n \n #-------------------------------------------------------------\n \n def __cluster_impl(self, clusters):\n \n #make sure there is some work to do\n if(len(clusters) < 2):\n return clusters[0]\n \n todo = copy.copy(clusters)\n new_sets = []\n \n for x in clusters:\n #skip clusters we have already seen\n if x not in todo:\n continue\n \n nearest_dist = float(\"inf\")\n n = None\n \n for y in todo:\n \n #no self comparisons\n if(y == x): continue\n \n dist = x.distance(y)\n \n if( dist < nearest_dist ):\n n = y\n nearest_dist = dist\n \n #pop the nearest element from todo\n todo.remove(x)\n \n if n == None:\n new_sets.append( Cluster([x]) )\n else:\n todo.remove(n)\n new_sets.append( Cluster([x,n]) )\n \n #cluster new sets\n return self.__cluster_impl(new_sets)\n \n \n def display_songs(self, c):\n \n if( isinstance(c, Cluster) ):\n for d in c.data:\n self.display_songs(d)\n else:\n print \"%s - %s \" % (c.title, c.artist)\n \n#---------------------------------------------------------------\nclass Cluster:\n \n tags = None\n \n def __init__(self, data):\n self.data = data\n \n def distance(self, cluster):\n tags_1 = self.get_tags()\n tags_2 = cluster.get_tags()\n i_1 = float(len(tags_1))\n i_2 = float(len(tags_2))\n \n x = 0.0\n \n \n for t in tags_1.keys():\n if t in tags_2.keys():\n x += tags_1[t] * tags_2[t]\n \n return x / max( i_1, i_2 )\n \n def get_tags(self):\n \n if(self.tags == None):\n self.tags = {}\n \n for d in self.data:\n self.tags = dict(self.tags.items() + d.tags.items())\n \n return self.tags\n \n \n def __repr__(self):\n return unicode(self.data)"
},
{
"alpha_fraction": 0.6239354610443115,
"alphanum_fraction": 0.6270730495452881,
"avg_line_length": 27.151899337768555,
"blob_id": "cf33ae60f5a1fa14a3862e3df9f848eb13e954f4",
"content_id": "2ece6251537a6f440485d08c04d954c893da9403",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2231,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 79,
"path": "/mpdevt.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "import mpd\nimport threading\nimport logging\nimport time\n\n'''\nTo enable the decision system and learning system to listen for MPD events, I wrote an MPD event dispatcher.\n\n'''\n\n#number of seconds between server checks\nPOLLING_INTERVAL = 0.5\n\n_event_listeners = {}\n\ndef mpd_listener( evttype ):\n ''' This is a decorator function that turns objects into MPD event listeners\n \n This decorator is used to add the given callable object to the MPD event listeners\n under a given condition\n '''\n if not(_event_listeners.has_key(evttype)):\n _event_listeners[evttype] = []\n \n return lambda x: _event_listeners[evttype].append(x)\n\n#------------------------------------------- PollingMPDClient -------------------------------------------\n\nclass PollingMPDClient(object):\n \n currentSongID = 0\n running = False\n \n def __init__(self):\n self.logger = logging.getLogger(\"MPDClient\")\n self.client = mpd.MPDClient()\n \n \n def connect(self, host, port=6600, password=None):\n '''Connect to an MPD instance and poll'''\n self.logger.info(\"Now connecting to %s:%d...\", host,port)\n self.client.connect(host=host, port=port)\n \n if(password != None):\n self.logger.info(\"Authenticating...\")\n self.client.password(password)\n else:\n self.logger.info(\"No password provided, skipping authentication\")\n \n #start the main loop\n self.thread = threading.Thread(target=self.run_poller)\n self.thread.start()\n \n \n def run_poller(self):\n '''This is the main poller that gets events and feeds them back\n '''\n \n self.running = True\n \n while self.running:\n status = self.client.status()\n \n if(status.has_key('songid')):\n if(self.currentSongID != status['songid']):\n self.emit(\"OnSongChange\", evt=self.client.currentsong())\n self.currentSongID = status['songid']\n\n time.sleep(POLLING_INTERVAL)\n \n \n def emit(self, type, *args, **kwargs):\n '''Emits event of the given type to all registered listeners'''\n \n self.logger.debug(\"MPDEvent: %s - %s - %s\", type, args, kwargs)\n \n if(_event_listeners.has_key(type)):\n for listener in _event_listeners[type]:\n listener(type, *args, **kwargs)\n "
},
{
"alpha_fraction": 0.7012779712677002,
"alphanum_fraction": 0.7012779712677002,
"avg_line_length": 22.037036895751953,
"blob_id": "c7a4a773eb150eb88b26dc53f6006e096c0aa098",
"content_id": "5d481ebba194d803d9b3770f839d500a2ce50aab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 626,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 27,
"path": "/learning.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "'''\nMusic database learning engine\n\nby James Ravenscroft\n'''\nimport logging\n\nfrom sqlalchemy.orm import sessionmaker\nfrom mpdevt import mpd_listener\n\n#database setup stuff\n_Session = sessionmaker()\n\nclass LearningEngine(object):\n '''Class that does some clever stuff to help figure out what song to play next\n '''\n songs = {}\n \n def __init__(self, dbengine):\n self.logger = logging.getLogger(\"LearningEngine\")\n _Session.configure(bind=dbengine)\n self.session = _Session()\n \n @mpd_listener('OnSongChange')\n def OnSongChange(event_type, evt):\n '''Called when the MPD client finds that a new song is playing\n '''\n "
},
{
"alpha_fraction": 0.6022409200668335,
"alphanum_fraction": 0.6181139349937439,
"avg_line_length": 22.19565200805664,
"blob_id": "a1c72f6675adef19985808f01414cc4174adbcd1",
"content_id": "04379c4d0a05fcd46689f37b71049d252b2abb7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1071,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 46,
"path": "/decision.py",
"repo_name": "ravenscroftj/SmartQMPD",
"src_encoding": "UTF-8",
"text": "'''\nThe track chooser logic for the queue manager\n\n'''\nimport orm\nimport logging\nimport pylast\nfrom mpdevt import mpd_listener\n\nAPI_KEY = \"21e7e21f732bac4749c6deb03b902cc5\"\nAPI_SECRET = \"5d529e71dacfef10ae597a086a435afe\"\n\nclass DecisionEngine(object):\n '''Class that decides what to add to the play queue\n '''\n \n songs = {}\n \n def __init__(self, mpdclient):\n '''Create a new mpd decision engine\n '''\n self.client = mpdclient\n self.logger = logging.getLogger(\"DecisionEngine\")\n \n def populate(self):\n '''Get MPD metadata from the server\n '''\n for song in self.client.listallinfo():\n \n if(song.has_key('title') & song.has_key('artist')):\n \n #add song to list\n self.songs[song['file']] = song\n \n #add last fm data to song\n \n else:\n self.logger.info(\"Could not add song %s\", song)\n \n \n \n @mpd_listener('OnSongChange')\n def OnSongChange(event_type, evt):\n '''Called when the MPD client finds that a new song is playing\n '''\n #add a new track to the queue using the decision engine\n "
}
] | 7 |
LEGEND18325/PROJECT107 | https://github.com/LEGEND18325/PROJECT107 | e61d43389d853faadc2342e4a41cb46b1444d09b | 4037493051337773c498a6555cbd45919b8d22db | ddff272cc1c179e37e3696f477920a81436ceb91 | refs/heads/main | 2023-08-27T00:42:56.719715 | 2021-10-22T10:49:16 | 2021-10-22T10:49:16 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6349614262580872,
"alphanum_fraction": 0.6349614262580872,
"avg_line_length": 14.208333015441895,
"blob_id": "f6627841a0bfa1865ab03f9aa838868617d1bf49",
"content_id": "be1261e4f08e1e2fffdcc8583e5b4bb6df8bff5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 24,
"path": "/Data.py",
"repo_name": "LEGEND18325/PROJECT107",
"src_encoding": "UTF-8",
"text": "import pandas as p\r\nimport csv\r\nfrom pandas.core import groupby\r\n\r\nimport plotly_express as px\r\n\r\nfileName = p.read_csv('StudentData.csv')\r\n\r\n\r\n\r\ngrouping=fileName.groupby([\"student_id\",'level'],as_index=False)['attempt'].mean()\r\n\r\ngraph=px.scatter(\r\n grouping,\r\n x='student_id',\r\n y='level',\r\n size='attempt',\r\n color='attempt'\r\n \r\n)\r\n\r\nprint(grouping)\r\n\r\ngraph.show()\r\n"
}
] | 1 |
joao-afonso-pereira/Apples_vs_Tomatoes | https://github.com/joao-afonso-pereira/Apples_vs_Tomatoes | 5feb46acbf68b7fda4ea08e6fd51aa8b2f568c2e | 247150d7e9a1e1dc58d6931ff0f2c4142908a625 | 473dd090501c721d776135cf80a8094c47c7362c | refs/heads/master | 2022-11-28T16:01:44.709031 | 2020-07-31T13:22:08 | 2020-07-31T13:22:08 | 283,522,864 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5346112847328186,
"alphanum_fraction": 0.5638977885246277,
"avg_line_length": 21.638553619384766,
"blob_id": "912639adc8bcadf0664bca18c8d402aaad3a3c67",
"content_id": "d3c21408c6b1c45db371c6f17dbccaa5cff2d3fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1878,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 83,
"path": "/cnn.py",
"repo_name": "joao-afonso-pereira/Apples_vs_Tomatoes",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom statistics import mean\nimport sys\nfrom torchsummary import summary\n\nclass Flat(nn.Module):\n \n def __init__(self):\n super(Flat, self).__init__()\n \n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass CNN(nn.Module):\n \n def __init__(self):\n \n super(CNN, self).__init__()\n \n self.main = nn.Sequential(\n\n nn.Conv2d(3, 64, 5),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n \n nn.Conv2d(64, 64, 5),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n\n nn.Conv2d(64, 64, 5),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n \n Flat(),\n\n nn.Linear(861184, 128),\n nn.ReLU(inplace=True),\n \n nn.Linear(128, 64),\n nn.ReLU(inplace=True),\n \n nn.Linear(64, 2),\n \n )\n\n def forward(self, input):\n\n return self.main(input)\n\n \nif __name__ == '__main__':\n\n print()\n if torch.cuda.is_available():\n DEVICE = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc. \n print(\"Running on the GPU...\")\n else:\n DEVICE = torch.device(\"cpu\")\n print(\"Running on the CPU...\")\n print()\n\n \n model = CNN().to(DEVICE)\n print(model)\n\n summary(model, (3, 128, 128))"
},
{
"alpha_fraction": 0.5450901985168457,
"alphanum_fraction": 0.551872968673706,
"avg_line_length": 27.831111907958984,
"blob_id": "a95b71c371843071639115e3edfe22dfeb307327",
"content_id": "80757ba92493116f9fbe874cade57b424e7e3ac1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6487,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 225,
"path": "/train.py",
"repo_name": "joao-afonso-pereira/Apples_vs_Tomatoes",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom statistics import mean\nimport sys\n\nfrom data import get_data_loaders\nfrom cnn import CNN\n\nEPOCHS = 50\n\nloss_fn = F.cross_entropy\n\nLEARNING_RATE = 1e-04\nREG = 1e-04\n\ndef fit(model, data, device):\n \n # train and validation loaders\n train_loader, valid_loader = data\n \n print(\"Train/Val batches: {}/{}\".format(len(train_loader),\n len(valid_loader)))\n\n # Set the optimizer\n optimizer = torch.optim.Adam(model.parameters(),\n lr=LEARNING_RATE,\n weight_decay=REG)\n\n # Start training\n train_history = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}\n\n # Best validation params\n best_val = -float('inf')\n best_epoch = 0\n\n for epoch in range(EPOCHS):\n \n print('\\nEPOCH {}/{}\\n'.format(epoch + 1, EPOCHS))\n\n # TRAINING\n # set model to train\n model.train()\n for i, (x, y) in enumerate(train_loader): # iterations loop\n # send mini-batch to gpu\n x = x.to(device)\n \n y = y.type(torch.LongTensor)\n y = y.to(device)\n \n # forward pass\n y_pred = model(x)\n \n ypred_ = torch.argmax(y_pred, dim=1)\n\n # Compute vae loss\n loss = loss_fn(y_pred, y)\n\n # Backprop and optimize\n optimizer.zero_grad() # clear previous gradients\n loss.backward() # compute new gradients\n optimizer.step() # optimize the parameters\n\n # display the mini-batch loss\n sys.stdout.write(\"\\r\" + '........{}-th mini-batch loss: {:.3f}'.format(i, loss.item()))\n sys.stdout.flush()\n \n # Validation\n tr_loss, tr_acc = eval_model(model, train_loader, device)\n train_history['train_loss'].append(tr_loss.item())\n train_history['train_acc'].append(tr_acc)\n \n val_loss, val_acc = eval_model(model, valid_loader, device)\n train_history['val_loss'].append(val_loss.item())\n train_history['val_acc'].append(val_acc)\n\n\n # save best validation model\n if best_val < val_acc:\n torch.save(model.state_dict(), 'outputs/cnn.pth')\n best_val = val_acc\n best_epoch = epoch\n\n # display the training loss\n print()\n print('\\n>> Train loss: {:.3f} |'.format(tr_loss.item()) + ' Train Acc: {:.3f}'.format(tr_acc))\n\n print('\\n>> Valid loss: {:.3f} |'.format(val_loss.item()) + ' Valid Acc: {:.3f}'.format(val_acc))\n\n print('\\n>> Best model: {} / Acc={:.3f}'.format(best_epoch+1, best_val))\n print()\n\n # save train/valid history\n plot_fn = 'outputs/train_history.png'\n plot_train_history(train_history, plot_fn=plot_fn)\n\n # return best validation model\n model.load_state_dict(torch.load('outputs/cnn.pth'))\n\n return model\n\n\ndef plot_train_history(train_history, plot_fn=None):\n plt.switch_backend('agg')\n\n best_val_epoch = np.argmin(train_history['val_loss'])\n best_val_acc = train_history['val_acc'][best_val_epoch]\n best_val_loss = train_history['val_loss'][best_val_epoch]\n plt.figure(figsize=(7, 5))\n epochs = len(train_history['train_loss'])\n x = range(epochs)\n plt.subplot(211)\n plt.plot(x, train_history['train_loss'], 'r-')\n plt.plot(x, train_history['val_loss'], 'g-')\n plt.plot(best_val_epoch, best_val_loss, 'bx')\n plt.xlabel('Epoch')\n plt.ylabel('Train/Val loss')\n plt.legend(['train_loss', 'val_loss'])\n plt.axis([0, epochs, 0, max(train_history['train_loss'])])\n plt.subplot(212)\n plt.plot(x, train_history['train_acc'], 'r-')\n plt.plot(x, train_history['val_acc'], 'g-')\n plt.plot(best_val_epoch, best_val_acc, 'bx')\n plt.xlabel('Epoch')\n plt.ylabel('Train/Val acc')\n plt.legend(['train_acc', 'val_acc'])\n plt.axis([0, epochs, 0, 1])\n if plot_fn:\n plt.show()\n plt.savefig(plot_fn)\n plt.close()\n else:\n plt.show()\n\n\ndef eval_model(model, data_loader, device, debug=False):\n with torch.no_grad():\n\n model.eval()\n \n loss_eval = 0\n N = 0\n n_correct = 0\n \n for i, (x, y) in enumerate(data_loader):\n # send mini-batch to gpu\n x = x.to(device)\n \n y = y.type(torch.LongTensor)\n y = y.to(device)\n\n # forward pass\n y_pred = model(x) \n\n # Compute cnn loss\n loss = loss_fn(y_pred, y)\n loss_eval += loss * x.shape[0]\n\n # Compute Acc\n N += x.shape[0]\n ypred_ = torch.argmax(y_pred, dim=1)\n n_correct += torch.sum(1.*(ypred_ == y)).item()\n \n y = y.cpu().numpy()\n ypred_ = ypred_.cpu().numpy()\n \n\n loss_eval = loss_eval / N\n acc = n_correct / N\n \n \n return loss_eval, acc \n\n\ndef main():\n\n print()\n if torch.cuda.is_available():\n DEVICE = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc. \n print(\"Running on the GPU\")\n else:\n DEVICE = torch.device(\"cpu\")\n print(\"Running on the CPU\")\n \n \n model = CNN().to(DEVICE)\n \n \n (train_loader, valid_loader, test_loader) = get_data_loaders()\n \n \n # Fit model\n model, train_history, _, best_epoch = fit(model=model, data=(train_loader, valid_loader), device=DEVICE)\n \n \n # Test results\n test_loss, test_acc = eval_model(model, test_loader, DEVICE)\n \n print('\\nTest loss: {:.3f} |'.format(test_loss.item()) + ' Test Acc: {:.3f}'.format(test_acc))\n \n results_test = [test_loss.item(), test_acc]\n \n np.savetxt('results.txt', results_test, fmt='%.3f', delimiter=',')\n \n \n print(\"\\n\\nDONE!\")\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6275748610496521,
"alphanum_fraction": 0.634990394115448,
"avg_line_length": 23.44295310974121,
"blob_id": "8b3b9d752aee6974a4beb3a1c3d592a2d659fa1f",
"content_id": "9238c61473915ac38ed1bd93e381f02a3ad647cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3641,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 149,
"path": "/data.py",
"repo_name": "joao-afonso-pereira/Apples_vs_Tomatoes",
"src_encoding": "UTF-8",
"text": "import torch\nfrom torch.utils.data import Dataset\nimport scipy.io as sio\nimport os\nimport numpy as np\nimport copy\nfrom sklearn.model_selection import train_test_split\nfrom torchvision import transforms\nfrom PIL import Image\nfrom PIL import ImageOps\nimport matplotlib.pyplot as plt\nimport random \nfrom sklearn import metrics\nfrom statistics import mean\nfrom statistics import mode \nfrom torch.utils import data \nfrom torch import nn\nfrom torch.nn import functional as F\nimport math\nfrom torch import optim\nimport sys\nimport pickle\nimport cv2\nfrom sklearn.feature_extraction import image\n\nclass numpyToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n return torch.from_numpy(sample).float()\n \nPATH = \"L:/Projects/Apples_vs_Tomatoes/Dataset/\"\n \nclass DATASET(Dataset):\n \n def __init__(self):\n \n apples_path = PATH + \"apples.txt\"\n tomatoes_path = PATH + \"tomatoes.txt\"\n\n # Initialize X (data_names), y (apple=0, tomato=1)\n X = []\n y = []\n\n # read apples names\n with open(apples_path, 'r') as f:\n apples_names = f.readlines()\n\n self.n_apples = len(apples_names)\n\n # append real_data to X and y\n X.extend(apples_names)\n y.extend([0]*self.n_apples)\n \n # read tomatoes names\n with open(tomatoes_path, 'r') as f:\n tomatoes_names = f.readlines()\n\n self.n_tomatoes = len(tomatoes_names)\n\n # append real_data to X and y\n X.extend(tomatoes_names)\n y.extend([1]*self.n_tomatoes)\n\n self.n_samples = len(y)\n\n self.X = np.array(X)\n self.y = np.array(y)\n\n def __getitem__(self, idx):\n\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = self.X[idx]\n\n \n sample = Image.open(img_name.rstrip()) \n sample = sample.resize((300, 300))\n \n sample = np.array(sample)\n \n transformation = self.transformations()\n \n return [transformation(sample).view((3, sample.shape[0], sample.shape[1])), self.y[idx]]\n\n def show(self, idx):\n\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = self.X[idx]\n label = self.y[idx]\n\n \n sample = Image.open(img_name.rstrip()) \n sample = sample.resize((64, 64))\n \n sample = np.array(sample)\n\n plt.imshow(sample)\n plt.axis('off')\n plt.title(\"Image number {} (label={})\".format(idx, label))\n plt.show()\n\n def transformations(self):\n data_transform = transforms.Compose([transforms.ToTensor()])\n \n return data_transform\n\n def __len__(self):\n return self.n_samples\n\n def count(self):\n print(\"Number of real samples: {}\\nNumber of fake samples: {}\\nTOTAL: {}\\n\".format(self.n_real, self.n_fake, self.n_samples))\n\n \n#%%DATA LOADER\n \ndef get_data_loaders():\n \n \n dataset = DATASET()\n index = random.randint(0, len(dataset)-1)\n dataset.show(index)\n \n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n _dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])\n \n train_size = int(0.8 * len(_dataset))\n val_size = len(_dataset) - train_size\n train_dataset, val_dataset = torch.utils.data.random_split(_dataset, [train_size, val_size])\n \n # Parameters\n params = {'batch_size': 16,\n 'shuffle': True,\n 'num_workers': 0}\n \n \n train_loader = data.DataLoader(train_dataset, **params)\n valid_loader = data.DataLoader(val_dataset, **params)\n test_loader = data.DataLoader(test_dataset, **params)\n \n return train_loader, valid_loader, test_loader\n\nif __name__ == '__main__':\n \n train, val, test = get_data_loaders()"
},
{
"alpha_fraction": 0.8301886916160583,
"alphanum_fraction": 0.8301886916160583,
"avg_line_length": 51.5,
"blob_id": "0a9bf61c83c01fa1bca5cdc8f003883281be74a7",
"content_id": "771979a26f4b104cac52dbb7c6194d9087578ef1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 2,
"path": "/README.md",
"repo_name": "joao-afonso-pereira/Apples_vs_Tomatoes",
"src_encoding": "UTF-8",
"text": "# Apples_vs_Tomatoes\nSimple Convolutional Neural Network to distinguish between red apples and tomatoes. \n"
},
{
"alpha_fraction": 0.6765249371528625,
"alphanum_fraction": 0.6765249371528625,
"avg_line_length": 30.882352828979492,
"blob_id": "eed7bb7ece036e8651fcd38afc0db9857204f1d4",
"content_id": "51a6b501fcf4c4001f3cd8f4cfb725f82b3d9cc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 17,
"path": "/create_txt.py",
"repo_name": "joao-afonso-pereira/Apples_vs_Tomatoes",
"src_encoding": "UTF-8",
"text": "import glob\n\napples = []\nfor filename in glob.glob(\"L:/Projects/Apples_vs_Tomatoes/Dataset/Apples/*.png\"):\n apples.append(filename)\n\nwith open('L:/Projects/Apples_vs_Tomatoes/Dataset/apples.txt', 'w') as f:\n for item in apples:\n f.write(\"{}\\n\".format(item))\n \ntomatoes = []\nfor filename in glob.glob(\"L:/Projects/Apples_vs_Tomatoes/Dataset/Tomatoes/*.png\"):\n tomatoes.append(filename)\n\nwith open('L:/Projects/Apples_vs_Tomatoes/Dataset/tomatoes.txt', 'w') as f:\n for item in tomatoes:\n f.write(\"{}\\n\".format(item))"
}
] | 5 |
thiagoalmeidadon/django-rest-api | https://github.com/thiagoalmeidadon/django-rest-api | bae83c4d118bda4b56e9964f768697fbbb1aa694 | c72df9a5d75875ebf9ccd69fd86e41558a88bd79 | 9252893df7b8f161d510eebbc2301fe62cba6438 | refs/heads/master | 2023-05-31T01:05:49.531419 | 2021-06-25T03:22:23 | 2021-06-25T03:22:23 | 380,101,887 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5264977216720581,
"alphanum_fraction": 0.557603657245636,
"avg_line_length": 30,
"blob_id": "a49f4ef26fff09ddd96d8b22d4d4a4f876966eb4",
"content_id": "668f9982452071fe71c63c71234f03cbba63b505",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 868,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 28,
"path": "/livros/migrations/0001_initial.py",
"repo_name": "thiagoalmeidadon/django-rest-api",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.4 on 2021-06-25 03:12\n\nfrom django.db import migrations, models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Livros',\n fields=[\n ('id_livro', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('titulo', models.CharField(max_length=255)),\n ('autor', models.CharField(max_length=255)),\n ('ano_lancamento', models.IntegerField()),\n ('estado', models.CharField(max_length=50)),\n ('paginas', models.IntegerField()),\n ('empresa', models.CharField(max_length=255)),\n ('create_at', models.DateField(auto_now_add=True)),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.7877551317214966,
"alphanum_fraction": 0.7877551317214966,
"avg_line_length": 32.85714340209961,
"blob_id": "fe13b83c544cf4ac25427c39bf70efb64fb84676",
"content_id": "5ad6d995703fb6ee848a744fd64143fc49b8faa5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 245,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 7,
"path": "/livros/api/viewsets.py",
"repo_name": "thiagoalmeidadon/django-rest-api",
"src_encoding": "UTF-8",
"text": "from rest_framework import viewsets\r\nfrom livros.api import serializers\r\nfrom livros import models\r\n\r\nclass LivrosViewSet(viewsets.ModelViewSet):\r\n serializer_class = serializers.LivrosSerializer\r\n queryset = models.Livros.objects.all() \r\n"
},
{
"alpha_fraction": 0.721818208694458,
"alphanum_fraction": 0.7454545497894287,
"avg_line_length": 35.733333587646484,
"blob_id": "c6bbe8a6b68da05537810719729bc548cba15ab1",
"content_id": "bed4f8147bac237873dc6f603e6aa07d17263923",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 15,
"path": "/livros/models.py",
"repo_name": "thiagoalmeidadon/django-rest-api",
"src_encoding": "UTF-8",
"text": "from django.contrib.admin import autodiscover\nfrom django.db import models\nfrom uuid import uuid4\n\n# Create your models here.\n\nclass Livros(models.Model):\n id_livro = models.UUIDField(primary_key=True, default=uuid4, editable=False)\n titulo = models.CharField(max_length=255)\n autor = models.CharField(max_length=255)\n ano_lancamento = models.IntegerField()\n estado = models.CharField(max_length=50)\n paginas = models.IntegerField()\n empresa = models.CharField(max_length=255)\n create_at = models.DateField(auto_now_add=True)"
}
] | 3 |
doluk/Event_Discord_Bot | https://github.com/doluk/Event_Discord_Bot | 6bfcb4186c3263e5c4a6adccb4f514758a0e7b00 | eeef3d2c47197c31a4cb0cebbf9bcaf37e40103d | ba0c4c3c4d35cdc1a9657a65f59725389d78dc44 | refs/heads/master | 2023-04-01T08:24:17.868847 | 2021-04-10T11:20:08 | 2021-04-10T11:20:08 | 356,555,518 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7485380172729492,
"alphanum_fraction": 0.752436637878418,
"avg_line_length": 72.28571319580078,
"blob_id": "2bd83eaae4b06d7c1491faba3bce952ad024f041",
"content_id": "c2ea0491ed99c443d68ed494c5dc078d1a14b7f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 513,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 7,
"path": "/README.md",
"repo_name": "doluk/Event_Discord_Bot",
"src_encoding": "UTF-8",
"text": "# Event_Discord_Bot\nA little discord bot for event embeds and a feedback system via reactions\n\nCreate a file called ```config.json``` similiar to the ```demo_config.json``` file and install all the requirements listed in the ```requirements.txt``` file.\n\nTo create an event via ```,create <title> <description> <datetime>```. The format for the datetime attribute is ```DD.MM.YYYYTHH:MM```.\nVia reaction user can communicate their status. The list in the embed is updated every 20 seconds until the event starts.\n"
},
{
"alpha_fraction": 0.678260862827301,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 13.5,
"blob_id": "197a5c557379202d7e35de8f15761ebafd493648",
"content_id": "08b0881f964697afc4fed33b0933920323f174cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "doluk/Event_Discord_Bot",
"src_encoding": "UTF-8",
"text": "asyncio~=3.4.3\nAPScheduler~=3.7.0\nDateTime~=4.3\npython-dateutil~=2.8.1\nnest_asyncio\ndiscord.py\nSQLAlchemy\npysqlite3"
},
{
"alpha_fraction": 0.5751189589500427,
"alphanum_fraction": 0.5893949866294861,
"avg_line_length": 38.40178680419922,
"blob_id": "09aac4db95fabc0113c09ec912b141a2694884cd",
"content_id": "6627cbd1bcb4dd6ecd8594173a2e1e5595b88987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4421,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 112,
"path": "/core.py",
"repo_name": "doluk/Event_Discord_Bot",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport datetime\n\nimport discord\nimport nest_asyncio\nfrom dateutil import tz\nfrom discord.ext import commands\n\nfrom utils.automation import scheduler\nfrom utils.core import bot\n\nnest_asyncio.apply()\n\n\[email protected]\nasync def on_connect() -> None:\n '''event handler triggered when the bot is connected to Discord. Logs this information\n '''\n pass\n\n\[email protected]()\nasync def ping(ctx: commands.Context):\n msg = f'Pong! {int(bot.client.latency * 1000)} ms'\n await ctx.send(msg)\n\n\[email protected]_invoke\nasync def typing(ctx: commands.Context):\n await ctx.trigger_typing()\n\n\nasync def event_update(channel_id: int, msg_id: int):\n try:\n channel = await bot.client.fetch_channel(channel_id)\n msg: discord.Message = await channel.fetch_message(msg_id)\n except discord.errors.Forbidden:\n scheduler.remove_job(f\"event_update-{msg.id}\")\n return\n embed: discord.Embed = msg.embeds[0]\n options = {\"\\u2705\": 'Yes ' + \"\\u2705\",\n \"\\u2754\": 'Maybe ' + \"\\u2754\",\n \"\\u274C\": 'No ' + \"\\u274C\"}\n mapping = {s: [] for s in options}\n end = False\n for r in msg.reactions:\n if r.emoji in options:\n async for user in r.users():\n if not user.bot:\n mapping[str(r.emoji)].append(user.display_name)\n updated_embed = discord.Embed(title=embed.title,\n description=embed.description,\n color=discord.Color.dark_gold())\n updated_embed.add_field(name=embed.fields[0].name, value=embed.fields[0].value, inline=embed.fields[0].inline)\n for m in mapping:\n users = sorted(mapping[m], key=lambda x: x)\n if users == \"\" or users is None or users == []:\n users = ['']\n updated_embed.add_field(name=f\"{options[m]}\", value=\"\\n\".join(users), inline=True)\n await msg.edit(embed=updated_embed)\n start: datetime.datetime = datetime.datetime.strptime(embed.fields[0].value, \"%d.%m.%Y %I:%M %p %Z\")\n from_zone = tz.tzutc()\n start = start.replace(tzinfo=from_zone)\n if start - datetime.timedelta(seconds=20) < datetime.datetime.now(from_zone):\n scheduler.remove_job(f\"event_update-{msg.id}\")\n await msg.clear_reactions()\n\n\[email protected](name='create')\nasync def create(ctx: commands.Context, title: str, description: str, date: str):\n \"\"\"Create a new embed for an event and add the reactions to vote for\n Format of the date should be DD.MM.YYYYTHH:MM\"\"\"\n try:\n start: datetime.datetime = datetime.datetime.strptime(date, \"%d.%m.%YT%H:%M\")\n except Exception as e:\n embed = discord.Embed(\n title=f\"Invalid date. Please use the format ``DD.MM.YYYYTHH:MM``.\", description=str(e),\n color=discord.Color.red())\n await ctx.send(embed=embed)\n return\n from_zone = tz.tzutc()\n to_zone = tz.tzlocal()\n date = start.replace(tzinfo=from_zone)\n start = start.replace(tzinfo=from_zone)\n start = start.astimezone(to_zone)\n embed = discord.Embed(title=title,\n description=description,\n color=discord.Color.dark_gold())\n embed.add_field(name=\"Date\",\n value=date.strftime(\"%d.%m.%Y\") + \" `\" + date.strftime(\"%I:%M\") + \"` \" + date.strftime(\"%p %Z\"),\n inline=False)\n embed.add_field(name='Yes ' + \"\\u2705\", value='', inline=True)\n embed.add_field(name='Maybe ' + \"\\u2754\", value='', inline=True)\n embed.add_field(name='No ' + \"\\u274C\", value='', inline=True)\n embed.set_footer(text=\"This bot is coded by Doluk#9534\")\n msg = await ctx.send(embed=embed)\n EVENT_EMOJI = [\"\\u2705\", \"\\u2754\", \"\\u274C\"]\n for emoji in EVENT_EMOJI:\n # Add all the applicable emoji to the message\n await msg.add_reaction(emoji)\n scheduler.add_job(event_update, 'interval', seconds=20,\n end_date=str(start),\n args=[msg.channel.id, msg.id],\n coalesce=True, id=f\"event_update-{msg.id}\", misfire_grace_time=15,\n replace_existing=True)\n\n\n###############################################################################\n# Start the bot\n###############################################################################\n\nasyncio.get_event_loop().run_until_complete(bot.client.run(bot.config.discord.token))\n"
},
{
"alpha_fraction": 0.46043166518211365,
"alphanum_fraction": 0.4808153510093689,
"avg_line_length": 38.66666793823242,
"blob_id": "18005aeddf47f2ca5e66c4812529d75e97a6a913",
"content_id": "ef721376fdf5548e5eeabdfc3e2b4df4be2325a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 834,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 21,
"path": "/utils/automation.py",
"repo_name": "doluk/Event_Discord_Bot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 25 17:54:00 2020\n\n@author: Lukas\n\"\"\"\n###############################################################################\n# Imports\n###############################################################################\nimport asyncio\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\n\n###############################################################################\n# Start the scheduler\n###############################################################################\nscheduler = AsyncIOScheduler(event_loop=asyncio.get_event_loop())\n#jobstore = SQLAlchemyJobStore(url='postgresql+asyncpg://localhost:5432/team_utils')\njobstore = SQLAlchemyJobStore(url='sqlite:///jobs.db')\nscheduler.add_jobstore(jobstore)\nscheduler.start()\n\n"
},
{
"alpha_fraction": 0.49422401189804077,
"alphanum_fraction": 0.49422401189804077,
"avg_line_length": 27.428571701049805,
"blob_id": "bedbaa6451505f5e8cc3760cfa7eff79d3cd465e",
"content_id": "604e4e018d4b62695a994ebc51879ca4bdd38c34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1991,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 70,
"path": "/utils/core.py",
"repo_name": "doluk/Event_Discord_Bot",
"src_encoding": "UTF-8",
"text": "import discord\nfrom discord.ext import commands\nimport asyncio\n\n\n\n\n###############################################################################\n# Classes and Constants\n###############################################################################\n\n\n\nclass DiscordClient:\n '''a class to represent a bot instance\n Attributes\n ----------\n client: discord.Client\n the bot client\n config: Config\n the configuration\n Methods\n -------\n _buffer_emoji\n load string representations of an emoji to be used in discord messages\n get_emoji\n get an emoji by name\n '''\n\n def __init__(self):\n intents = discord.Intents(messages=True, members=True, guilds=True,\n emojis=True, reactions=True)\n from utils.config import Config, CONFIG_PATH\n self.config = Config()\n # load config from json-file\n self.config.from_json(CONFIG_PATH)\n self.client = commands.Bot(command_prefix=\",\", intents=intents,\n loop=asyncio.get_event_loop(), case_insensitive=True)\n\n\n def _buffer_emoji(self):\n '''load string representations of an emoji to be used in discord messages\n '''\n from utils.config import CONFIG_PATH\n self.config.from_json(CONFIG_PATH)\n cfg = self.config.emoji.__dict__\n for emoji in cfg:\n cfg[emoji] = str(self.client.get_emoji(cfg[emoji]))\n self.config.emoji.__dict__ = cfg\n\n\n def get_emoji(self, name: str) -> str:\n '''get an emoji by name\n '''\n\n emoji = self.config.emoji.get(name)\n if not emoji:\n self._buffer_emoji()\n emoji = self.config.emoji.get(name)\n return str(emoji)\n\n\n\n\n\n###############################################################################\n# Provide ready instances for importing\n###############################################################################\n\nbot = DiscordClient()\n\n"
},
{
"alpha_fraction": 0.5604256391525269,
"alphanum_fraction": 0.560679018497467,
"avg_line_length": 27.81751823425293,
"blob_id": "b8c85e2996a382e0dc9056c9680b246f6eaafe26",
"content_id": "91c36e44e14557617e256c6e086ac422f9255a96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3947,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 137,
"path": "/utils/config.py",
"repo_name": "doluk/Event_Discord_Bot",
"src_encoding": "UTF-8",
"text": "import json\nimport os\n\n\nclass ClassIterator:\n '''makes arbitrary classes iterable by iterating over their __dict__\n Parameters\n ----------\n class_instance\n an instance of any class\n '''\n\n def __init__(self, class_instance):\n self.iter = class_instance.__dict__.__iter__()\n\n def __next__(self):\n return self.iter.__next__()\n\n\nCONFIG_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"config.json\")\n\nclass ConfigObj:\n '''class to hold one layer of the bot configuration. It's elements may be other ConfigObj's\n Methods\n -------\n get\n alias function to mask ConfigObj.element as ConfigObj.get('element')\n to_dict\n recursively transform this class and all sub-classes to a dictionary\n '''\n\n def __init__(self, **kwargs):\n self.__dict__.update({k: ConfigObj(**v) if isinstance(v, dict) else v\n for k, v in kwargs.items()})\n\n def __iter__(self):\n ''' Returns the Iterator object '''\n return ClassIterator(self)\n\n\n def __getitem__(self, key: str):\n '''allows accessing attributes via ConfigObj[attribute]\n '''\n\n return self.__dict__[key]\n\n\n def get(self, key: str, default=None):\n '''alias function to mask ConfigObj.element as ConfigObj.get('element')\n '''\n\n return self.__dict__.get(key, default)\n\n\n def to_dict(self) -> dict:\n '''recursively transform this class and all sub-classes to a dictionary\n Returns\n -------\n dict\n '''\n\n return {k: v.to_dict() if isinstance(v, ConfigObj) else v\n for k, v in self.__dict__.items()}\n\n\nclass Config:\n '''class to represent the bot configuration\n Methods\n -------\n from_json\n read a json file and recursively represent it's entries as class attributes\n to_json\n write the current config state to a json file, overwriting the path\n '''\n\n def __init__(self):\n pass\n\n def load(self, path: str = CONFIG_PATH) -> None:\n \"\"\"read a json file and recursively represent it's entries as class attributes\n Parameters\n ----------\n path: string\n the path to the json file\n \"\"\"\n\n # read the json file\n with open(path) as fp:\n d = json.load(fp)\n\n # convert json to class attributes\n d = {k: ConfigObj(**v) if isinstance(v, dict) else v for k, v in d.items()}\n\n # clear all existing config and update with the new data. This allows for mid-run updates\n # by re-running the .load-method\n self.__dict__.clear()\n self.__dict__.update(d)\n\n def from_json(self, path: str) -> None:\n '''read a json file and recursively represent it's entries as class attributes\n Parameters\n ----------\n path: string\n the path to the json file\n '''\n\n # read the json file\n with open(path) as fp:\n d = json.load(fp)\n\n # convert json to class attributes\n d = {k: ConfigObj(**v) if isinstance(v, dict) else v for k, v in d.items()}\n\n # clear all existing config and update with the new data. This allows for mid-run updates\n # by re-running the .from_json-method\n self.__dict__.clear()\n self.__dict__.update(d)\n\n\n def to_json(self, path: str) -> None:\n '''write the current config state to a json file, overwriting the path\n Parameters\n ----------\n path: string\n the path to the json file\n '''\n\n # convert the class to a dictionary\n d = {k: v.to_dict() if isinstance(v, ConfigObj) else v\n for k, v in self.__dict__.items()}\n\n # write to a json file\n with open(path, 'w+') as outfile:\n json.dump(d, outfile, indent=2)\n\nconfig = Config()\nconfig.load(CONFIG_PATH)"
}
] | 6 |
GaneshPandey-GP/angular-blog | https://github.com/GaneshPandey-GP/angular-blog | b6888621a39e5a918276d5b96bf428bc280e15c6 | f0cb47bf901b0d19fc50bb06d726a85f95b7db99 | 9dd316070c674147e74639061cfa76a61aeeaede | refs/heads/master | 2023-06-27T16:40:41.356505 | 2021-07-22T12:39:14 | 2021-07-22T12:39:14 | 341,808,074 | 1 | 0 | null | 2021-02-24T06:56:22 | 2021-03-15T04:40:51 | 2021-03-16T13:41:29 | CSS | [
{
"alpha_fraction": 0.6303763389587402,
"alphanum_fraction": 0.6344085931777954,
"avg_line_length": 28.117647171020508,
"blob_id": "2d41fc8e8e1f3ed8a0f901e880b5a373c343fd52",
"content_id": "46d7a19c851c28c21b2ffd2b154a277af0004fb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1488,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 51,
"path": "/src/app/login/login.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { FormControl, FormGroup } from '@angular/forms';\nimport { MatSnackBar } from '@angular/material/snack-bar';\n\nimport {Router} from '@angular/router';\nimport { AdminService } from '../admin-service/admin.service';\n\n@Component({\n selector: 'app-login',\n templateUrl: './login.component.html',\n styleUrls: ['./login.component.scss']\n})\nexport class LoginComponent implements OnInit {\n userData: any = {};\n loginForm = new FormGroup({\n username: new FormControl(''),\n password: new FormControl('')\n })\n \n constructor(private router: Router, private adminService: AdminService, private _snackBar: MatSnackBar) { }\n\n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n\n \n ngOnInit() {\n this.userData = JSON.parse(localStorage.getItem(\"user\"));\n if(this.userData.length > 0) this.router.navigate([\"\"]);\n }\n\n submit(){\n this.adminService.login(this.loginForm.value.username, this.loginForm.value.password)\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error logging User... ');\n this.openSnackBar(\"Error logging User...\", \"Close\");\n } else {\n localStorage.setItem(\"user\", JSON.stringify(data));\n this.router.navigate([\"\"]);\n this.openSnackBar(\"User Created Successfully\", \"Close\");\n }\n },\n err => console.log(err)\n )\n }\n\n }\n \n"
},
{
"alpha_fraction": 0.6699029207229614,
"alphanum_fraction": 0.6962552070617676,
"avg_line_length": 29.04166603088379,
"blob_id": "6932c9b3a86c7d4494a41d33fe140e1f56f294f7",
"content_id": "43f42e3d3e25478bc166b6cf6f2651ba5369177f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 721,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 24,
"path": "/src/app/dashboard/dashboard.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { MatCarouselSlide, MatCarouselSlideComponent } from '@ngmodule/material-carousel';\nimport { NgbCarouselConfig } from '@ng-bootstrap/ng-bootstrap';\nimport { Router } from '@angular/router';\n\n@Component({\n selector: 'app-dashboard',\n templateUrl: './dashboard.component.html',\n styleUrls: ['./dashboard.component.scss'],\n providers: [NgbCarouselConfig]\n})\nexport class DashboardComponent implements OnInit {\n images = [700, 800, 807].map((n) => `https://picsum.photos/id/${n}/900/500`);\n constructor(config: NgbCarouselConfig) {\n // \n config.interval = 2000;\n config.keyboard = true;\n config.pauseOnHover = true;\n }\n ngOnInit(): void {\n\n }\n\n}\n"
},
{
"alpha_fraction": 0.5401069521903992,
"alphanum_fraction": 0.5401069521903992,
"avg_line_length": 39.14285659790039,
"blob_id": "9afd597ccde10663a5579a6488b03f06025244e2",
"content_id": "f2d9128330c25cf054574dae9960bb38095937bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 14,
"path": "/src/app/view-subcategory/view-subcategory.component.html",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "<mat-accordion *ngFor=\"let item of categories\" >\n <mat-expansion-panel (opened)=\"panelOpenState = true\"\n (closed)=\"panelOpenState = false\" mat-raised-button>\n <mat-expansion-panel-header >\n <mat-panel-title color=\"primary\">\n {{item.name}}\n </mat-panel-title>\n </mat-expansion-panel-header>\n <p *ngFor=\"let subItem of subCategories[item.name]\">\n {{subItem.name}}\n {{item.categoryid}}\n </p>\n </mat-expansion-panel>\n</mat-accordion>"
},
{
"alpha_fraction": 0.6029132604598999,
"alphanum_fraction": 0.604179859161377,
"avg_line_length": 27.709091186523438,
"blob_id": "c5260d787b8d4691a0ed64dd942c5e654d0c27df",
"content_id": "eb1e9fa81d3449cd4a0ba966d8e68c6748be9b6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1579,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 55,
"path": "/src/app/view-subcategory/view-subcategory.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { MatBottomSheetRef } from '@angular/material/bottom-sheet';\nimport { AdminService } from '../admin-service/admin.service';\n\n@Component({\n selector: 'app-view-subcategory',\n templateUrl: './view-subcategory.component.html',\n styleUrls: ['./view-subcategory.component.scss'],\n})\nexport class ViewSubcategoryComponent implements OnInit {\n categories: any = [];\n subCategories: any = [];\n innerCats: any = [];\n i: any = 0;\n constructor(\n private adminService: AdminService,\n private _bottomSheetRef: MatBottomSheetRef<ViewSubcategoryComponent>\n ) {}\n\n openLink(event: MouseEvent): void {\n this._bottomSheetRef.dismiss();\n event.preventDefault();\n }\n panelOpenState = false;\n\n ngOnInit(): void {\n this.adminService.viewCategories().subscribe(\n (data) => {\n if (data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.categories = data;\n for (var item of this.categories) {\n console.log(item);\n\n this.adminService.viewSubCategories(item.categoryid).subscribe(\n (data) => {\n this.subCategories[item.name] = data;\n console.log(this.subCategories[item.name]);\n },\n (err) => console.log(err)\n );\n }\n }\n },\n (err) => console.log(err)\n );\n }\n\n getSubCategoryInfo(categoryid: any) {\n this.innerCats = this.subCategories.filter(\n (item) => item.categoryid === categoryid\n );\n }\n}\n"
},
{
"alpha_fraction": 0.7648318409919739,
"alphanum_fraction": 0.7648318409919739,
"avg_line_length": 39.25,
"blob_id": "38bd45a741263f67abebfd479bf9f2d1bae54c16",
"content_id": "b56086129b5b811fc162a00451c4ae582a54c193",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 4669,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 116,
"path": "/src/app/app.module.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { BrowserModule } from '@angular/platform-browser';\nimport { NgModule } from '@angular/core';\nimport { HttpClientModule } from '@angular/common/http';\nimport { BrowserAnimationsModule } from '@angular/platform-browser/animations';\nimport { MatCarouselModule } from '@ngmodule/material-carousel';\nimport {FormsModule, ReactiveFormsModule} from '@angular/forms';\nimport { AppRoutingModule } from './app-routing.module';\nimport { AppComponent } from './app.component';\n\nimport {MatToolbarModule} from '@angular/material/toolbar';\nimport {MatButtonModule} from '@angular/material/button';\nimport {MatIconModule} from '@angular/material/icon';\nimport {MatGridListModule} from '@angular/material/grid-list';\nimport {MatSlideToggleModule} from '@angular/material/slide-toggle';\nimport {FlexLayoutModule} from \"@angular/flex-layout\";\nimport { MatSelectModule } from '@angular/material/select';\nimport { MatFormFieldModule } from '@angular/material/form-field';\nimport {MatMenuModule} from '@angular/material/menu';\nimport {MatCardModule} from '@angular/material/card';\nimport {MatChipsModule} from '@angular/material/chips';\nimport {MatExpansionModule} from '@angular/material/expansion';\nimport {MatDividerModule} from '@angular/material/divider';\nimport {MatDialogModule} from '@angular/material/dialog';\nimport {MatRadioModule} from '@angular/material/radio';\nimport {MatInputModule} from '@angular/material/input';\nimport {MatSnackBarModule} from '@angular/material/snack-bar';\nimport {MatBottomSheetModule} from '@angular/material/bottom-sheet';\nimport {MatListModule} from '@angular/material/list';\nimport { FroalaEditorModule, FroalaViewModule } from 'angular-froala-wysiwyg';\nimport {MatTreeModule} from '@angular/material/tree';\nimport {MatProgressSpinnerModule} from '@angular/material/progress-spinner';\n\nimport { DashboardComponent } from './dashboard/dashboard.component';\nimport { HeaderComponent } from './header/header.component';\nimport { TrendingComponent } from './trending/trending.component';\nimport { AdminComponent } from './admin/admin.component';\nimport { AdminHeadComponent } from './admin-head/admin-head.component';\nimport { CreateCategoryComponent } from './create-category/create-category.component';\nimport { ViewCategoriesComponent } from './view-categories/view-categories.component';\nimport { CreateSubcategoryComponent } from './create-subcategory/create-subcategory.component';\nimport { ViewSubcategoryComponent } from './view-subcategory/view-subcategory.component';\nimport { UpdateCategoryComponent } from './update-category/update-category.component';\nimport { CreateBlogComponent } from './create-blog/create-blog.component';\nimport { ViewBlogsComponent } from './view-blogs/view-blogs.component';\nimport { EditorComponent } from './editor/editor.component';\nimport { UpdateBlogComponent } from './update-blog/update-blog.component';\nimport { BlogsComponent } from './blogs/blogs.component';\nimport { NgbModule } from '@ng-bootstrap/ng-bootstrap';\nimport { SidebarComponent } from './sidebar/sidebar.component';\nimport { FooterComponent } from './footer/footer.component';\nimport { BlogdetailComponent } from './blogdetail/blogdetail.component';\nimport { LoginComponent } from './login/login.component';\nimport { RegisterComponent } from './register/register.component';\n\n@NgModule({\n declarations: [\n AppComponent,\n DashboardComponent,\n HeaderComponent,\n TrendingComponent,\n AdminComponent,\n AdminHeadComponent,\n CreateCategoryComponent,\n ViewCategoriesComponent,\n CreateSubcategoryComponent,\n ViewSubcategoryComponent,\n UpdateCategoryComponent,\n CreateBlogComponent,\n ViewBlogsComponent,\n EditorComponent,\n UpdateBlogComponent,\n BlogsComponent,\n SidebarComponent,\n FooterComponent,\n BlogdetailComponent,\n LoginComponent,\n RegisterComponent\n ],\n imports: [\n BrowserModule,\n AppRoutingModule,\n HttpClientModule,\n BrowserAnimationsModule,\n MatToolbarModule,\n FormsModule,\n ReactiveFormsModule,\n BrowserAnimationsModule,\n MatButtonModule,\n MatIconModule,\n FroalaEditorModule.forRoot(),\n FroalaViewModule.forRoot(),\n MatGridListModule,\n MatSlideToggleModule,\n FlexLayoutModule,\n MatFormFieldModule,\n MatSelectModule,\n MatMenuModule,\n MatCardModule,\n MatCarouselModule,\n MatChipsModule,\n MatExpansionModule,\n MatDividerModule,\n MatDialogModule,\n MatRadioModule,\n MatInputModule,\n MatSnackBarModule,\n MatBottomSheetModule,\n MatListModule,\n MatTreeModule,\n NgbModule,\n MatProgressSpinnerModule\n ],\n providers: [],\n bootstrap: [AppComponent]\n})\nexport class AppModule { }\n"
},
{
"alpha_fraction": 0.7418919205665588,
"alphanum_fraction": 0.7418919205665588,
"avg_line_length": 32.6363639831543,
"blob_id": "8aa0721f3346b6a86c1555b5560421749defd585",
"content_id": "89a34f483dc57fccee4a61afcd8a064276c892ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 740,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/src/app/app-routing.module.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { NgModule } from '@angular/core';\nimport { Routes, RouterModule } from '@angular/router';\n\nimport { DashboardComponent } from \"./dashboard/dashboard.component\";\n\nimport { AdminComponent } from \"./admin/admin.component\";\nimport { BlogdetailComponent } from './blogdetail/blogdetail.component';\nimport { LoginComponent } from './login/login.component';\nimport { RegisterComponent } from './register/register.component';\n\nconst routes: Routes = [\n {path:'', component:DashboardComponent},\n {path:'category/:categoryid', component:DashboardComponent},\n\n {path:'blog/:createUrl', component:BlogdetailComponent},\n];\n\n@NgModule({\n imports: [RouterModule.forRoot(routes)],\n exports: [RouterModule]\n})\nexport class AppRoutingModule { }\n"
},
{
"alpha_fraction": 0.5489335060119629,
"alphanum_fraction": 0.5533249974250793,
"avg_line_length": 25.566667556762695,
"blob_id": "42f9b6dbc2ba24b92a98341bd0d586894bbc195a",
"content_id": "a52a035832f3d9efb55a47fce2f43abcb4c3764a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1594,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 60,
"path": "/src/app/sidebar/sidebar.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { AdminService } from '../admin-service/admin.service';\n\n@Component({\n selector: 'app-sidebar',\n templateUrl: './sidebar.component.html',\n styleUrls: ['./sidebar.component.scss']\n})\nexport class SidebarComponent implements OnInit {\n categories: any = [];\n allBlogs: any = [];\n recentBlogs: any = [];\n blogs: any = [];\n\n constructor(private adminService:AdminService ) { }\n setLocal(blogid: any) {\n localStorage.setItem('blogid', blogid);\n location.reload();\n }\n ngOnInit(): void {\n this.adminService.getBlogsList(0).subscribe(\n (data) => {\n if (data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.allBlogs = data;\n this.recentBlogs = [this.allBlogs[this.allBlogs.length-1], this.allBlogs[this.allBlogs.length-2], this.allBlogs[this.allBlogs.length-3]];\n }\n },\n (err) => console.log(err)\n );\n this.adminService\n .getCategories()\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.categories=data;\n }\n },\n err => console.log(err)\n );\n }\n handleCategory(value: any) {\n this.adminService\n .filterByCategory(value)\n .subscribe(\n data => {\n if (data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.blogs = data;\n }\n },\n err => console.log(err)\n );\n }\n\n}\n"
},
{
"alpha_fraction": 0.5958656072616577,
"alphanum_fraction": 0.5989664196968079,
"avg_line_length": 33.55356979370117,
"blob_id": "d26e479da942a598baa1e801693331d03c6fa5ad",
"content_id": "d2f11441ca4fe3fe3ba1125d2923900cebdddc51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1935,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 56,
"path": "/src/app/blogdetail/blogdetail.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { AdminService } from '../admin-service/admin.service';\nimport { Router, ActivatedRoute } from '@angular/router';\nimport { Title, Meta } from '@angular/platform-browser';\n\n@Component({\n selector: 'app-blogdetail',\n templateUrl: './blogdetail.component.html',\n styleUrls: ['./blogdetail.component.scss']\n})\nexport class BlogdetailComponent implements OnInit {\n blog: any;\n constructor(private adminService:AdminService, private route: ActivatedRoute, private titleService: Title, private metaService: Meta ) { }\n public setTitle( newTitle: string) {\n this.titleService.setTitle( newTitle );\n }\n loadScript() {\n let node = document.createElement('script'); // creates the script tag\n node.src = 'assets/js/gtm.js'; // sets the source (insert url in between quotes)\n node.type = 'text/javascript'; // set the script type\n node.async = true; // makes script run asynchronously\n node.charset = 'utf-8';\n // append to head of document\n document.getElementsByTagName('head')[0].appendChild(node);\n }\n ngOnInit() {\n let createUrl = this.route.snapshot.params.createUrl;\n this.adminService.getBlog(createUrl)\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.blog=data;\n console.log(this.blog)\n this.setTitle(this.blog[0].createPageTitle);\n this.metaService.updateTag(\n {\n name: 'description',\n content: this.blog[0].createMetaDescription\n }\n );\n this.metaService.updateTag(\n {\n name: 'keyword',\n content: this.blog[0].createMetaKeywords\n }\n );\n this.loadScript();\n }\n },\n err => console.log(err)\n );\n }\n\n}\n"
},
{
"alpha_fraction": 0.716843843460083,
"alphanum_fraction": 0.716843843460083,
"avg_line_length": 26.921875,
"blob_id": "f3ba26e82d8c41dc15846f38b9cd72584843e176",
"content_id": "d2be5cbbc8aa2a4014e33d42eebc9bb798b28e7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1787,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 64,
"path": "/src/app/admin-head/admin-head.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import {Component, OnInit} from '@angular/core';\nimport {MatDialog} from '@angular/material/dialog';\nimport { CreateCategoryComponent } from \"../create-category/create-category.component\";\nimport { ViewCategoriesComponent } from \"../view-categories/view-categories.component\";\nimport { CreateSubcategoryComponent } from \"../create-subcategory/create-subcategory.component\";\nimport { ViewSubcategoryComponent } from \"../view-subcategory/view-subcategory.component\";\nimport {MatBottomSheet} from '@angular/material/bottom-sheet';\nimport {ViewBlogsComponent} from \"../view-blogs/view-blogs.component\";\nimport { CreateBlogComponent } from '../create-blog/create-blog.component';\n@Component({\n selector: 'app-admin-head',\n templateUrl: './admin-head.component.html',\n styleUrls: ['./admin-head.component.scss']\n})\nexport class AdminHeadComponent implements OnInit {\n\n constructor(public dialog: MatDialog, private _bottomSheet: MatBottomSheet) { }\n\n ngOnInit(): void {\n }\n viewCategory(){\n this._bottomSheet.open(ViewCategoriesComponent);\n }\n createCategory(){\n const dialogRef = this.dialog.open(CreateCategoryComponent);\n\n // dialogRef.afterClosed().subscribe(result => {\n // console.log('Dialog result:');\n // });\n\n }\n viewCategoryStatistics(){\n\n }\n viewSubCategory(){\n this._bottomSheet.open(ViewSubcategoryComponent);\n\n }\n \n createSubCategory(){\n const dialogRef = this.dialog.open(CreateSubcategoryComponent);\n }\n\n viewSubCategoryStatistics(){\n\n }\n\n viewBlogs(){\n this._bottomSheet.open(ViewBlogsComponent);\n }\n \n createBlog() {\n const dialogRef = this.dialog.open(CreateBlogComponent);\n\n dialogRef.afterClosed().subscribe(result => {\n console.log(`Dialog result: ${result}`);\n });\n }\n\n viewBlogStatistics(){\n \n }\n\n}\n"
},
{
"alpha_fraction": 0.688524603843689,
"alphanum_fraction": 0.6891100406646729,
"avg_line_length": 36.9555549621582,
"blob_id": "afc84b1e75cf199b19be11ef3c4abba8dc86cde1",
"content_id": "45423f50e78a23e1078de3cd66ca1dc5da974152",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1708,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 45,
"path": "/src/app/header/header.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { OverlayContainer } from '@angular/cdk/overlay';\nimport { Router } from '@angular/router';\n\n@Component({\n selector: 'app-header',\n templateUrl: './header.component.html',\n styleUrls: ['./header.component.scss']\n})\nexport class HeaderComponent implements OnInit {\n userData: any;\n isLoggedIn: any = false;\n constructor(private overlay: OverlayContainer, private router: Router) { }\n\n ngOnInit(): void {\n this.userData = JSON.parse(localStorage.getItem(\"user\"));\n if(this.userData!=undefined){\n if(this.userData.length > 0) this.isLoggedIn = true; else this.isLoggedIn = false;}\n }\n toggleTheme(): void {\n if (this.overlay.getContainerElement().classList.contains(\"dark-theme\")) {\n this.overlay.getContainerElement().classList.remove(\"dark-theme\");\n this.overlay.getContainerElement().classList.add(\"light-theme\");\n } else if (this.overlay.getContainerElement().classList.contains(\"light-theme\")) {\n this.overlay.getContainerElement().classList.remove(\"light-theme\");\n this.overlay.getContainerElement().classList.add(\"dark-theme\");\n } else {\n this.overlay.getContainerElement().classList.add(\"light-theme\");\n }\n if (document.body.classList.contains(\"dark-theme\")) {\n document.body.classList.remove(\"dark-theme\");\n document.body.classList.add(\"light-theme\");\n } else if (document.body.classList.contains(\"light-theme\")) {\n document.body.classList.remove(\"light-theme\");\n document.body.classList.add(\"dark-theme\");\n } else {\n document.body.classList.add(\"light-theme\");\n }\n }\n\n logout() {\n localStorage.clear()\n this.router.navigate([\"/login\"]);\n }\n}\n"
},
{
"alpha_fraction": 0.4887382388114929,
"alphanum_fraction": 0.5242043733596802,
"avg_line_length": 39.938072204589844,
"blob_id": "8574fd71dcbff8f811b045847eab47e98a488138",
"content_id": "7c5721b4c2bd3677b37f7b534b6065f58852e369",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 17877,
"license_type": "no_license",
"max_line_length": 454,
"num_lines": 436,
"path": "/src/app/dashboard/dummy.js",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport {\n SafeAreaView,\n StyleSheet,\n View,\n ImageBackground,\n ScrollView,\n Text,\n Image,\n TouchableOpacity\n} from 'react-native';\nimport { Calendar, CalendarList, Agenda } from 'react-native-calendars';\n\nimport MapView from 'react-native-maps';\n\n// Third Party Library\nimport { FlatListSlider } from 'react-native-flatlist-slider';\nimport { FlatGrid } from 'react-native-super-grid';\n\n// Components\nimport LineSeperator from '../Components/LineSeperator';\nimport ItemDescriptionNavigationBar from '../Components/ItemDescriptionNavigationBar';\nimport ItemGridView from '../Components/ItemGridView';\n// Dummy Data\nimport DummyData from '../DummyData/DummyData';\n\n\n\n/**\n *\n * To show the the slider image, it will only work if the image are hosted on the server, but not on the local storage\n */\nfunction SliderView({ imageData }) {\n return (\n <View style={styles.sliderView}>\n <FlatListSlider\n data={imageData}\n timer={5000}\n onPress={item => alert(JSON.stringify(item))}\n indicatorContainerStyle={{ position: 'absolute', bottom: 15 }}\n indicatorActiveColor={'rgb(14,127,201)'}\n indicatorInActiveColor={'#ffffff'}\n indicatorActiveWidth={7}\n animation\n >\n </FlatListSlider>\n </View>\n );\n}\n\n\n/**\n *\n * To show the attribute of the product, these data is passed from its paraent class\n */\nfunction TitleView({ title, rating, reviewCount, dayPrice, weekPrice, monthPrice }) {\n return (\n <View style={styles.titleView}>\n <View style={{ padding: 25 }}>\n <Text style={{ fontSize: 22, fontWeight: '700', color: 'rgba(184,34,70,1.0)' }}>{title}</Text>\n <View style={{ flexDirection: 'row' }}>\n <Text style={{ color: 'black' }}>{rating}</Text>\n <Text style={{ color: 'rgba(184,34,70,1.0)' }}> ★ </Text>\n <Text style={{ color: 'black' }}>{'(' + reviewCount + ')'}</Text>\n </View>\n <View style={{ height: 90, flexDirection: 'row', justifyContent: 'space-around', alignItems: 'center' }}>\n <View style={{ alignItems: 'center' }}>\n <Text style={{ fontSize: 20, fontWeight: '900', color: 'rgba(90,90,90,1.0)' }}>{dayPrice + '€'}</Text>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(190,190,190,1.0)' }}>Day</Text>\n </View>\n\n <View style={{ alignItems: 'center' }}>\n <Text style={{ fontSize: 20, fontWeight: '900', color: 'rgba(90,90,90,1.0)' }}>{weekPrice + '€'}</Text>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(190,190,190,1.0)' }}>Week</Text>\n </View>\n\n <View style={{ alignItems: 'center' }}>\n <Text style={{ fontSize: 20, fontWeight: '900', color: 'rgba(90,90,90,1.0)' }}>{monthPrice + '€'}</Text>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(190,190,190,1.0)' }}>Month</Text>\n </View>\n </View>\n <LineSeperator />\n </View>\n </View>\n );\n}\n\n/**\n *\n * To render the user details including rating, item it host and response time\n */\nfunction UserDetailView({ imageURL, name, rating, reviewCount, responseTime, totolItemHolding }) {\n return (\n <View style={[styles.userDetailView]}>\n <View style={{ padding: 25, width: '100%', height: '100%', flexDirection: 'row', justifyContent: 'flex-start', alignItems: 'center' }}>\n <View style={{ width: '100%', flexDirection: 'row', justifyContent: 'space-between', alignItems: 'center' }}>\n <View style={{ flexDirection: 'row' }}>\n <View style={{ flexDirection: 'row', justifyContent: 'center', alignItems: 'flex-end' }}>\n <Image style={{ width: 55, height: 55 }} source={require('../Images/camera.png')} />\n <Image style={{ width: 20, height: 20, marginLeft: -15 }} source={require('../Images/userBottom.png')} />\n </View>\n <View style={{ paddingLeft: 10 }} >\n <Text style={{ fontSize: 18, fontWeight: '700', color: 'rgba(90,90,90,1.0)' }}>{name}</Text>\n <Text style={{ fontSize: 13, fontWeight: '400', color: 'rgba(90,90,90,1.0)' }}>{'Normally respond in ' + responseTime + ' hour'}</Text>\n <Text style={{ fontSize: 13, fontWeight: '400', color: 'rgba(90,90,90,1.0)' }}>{'+ ' + totolItemHolding + ' more items'}</Text>\n </View>\n </View>\n <View >\n <Text style={{ fontSize: 13, fontWeight: '400', color: 'rgba(90,90,90,1.0)' }}>{rating + ' ★ ' + '(' + reviewCount + ')'}</Text>\n </View>\n </View>\n </View>\n <View style={{ paddingHorizontal: 25 }}>\n <LineSeperator />\n </View>\n\n </View>\n );\n}\n\n\n/**\n *\n * Retrurn the heading of the container view\n */\n\nfunction Heading({ title }) {\n return (\n <Text style={{ fontSize: 18, fontWeight: '700', color: 'rgba(32,32,32,1.0)' }}>{title.toUpperCase()}</Text>\n );\n}\n\n/**\n * Shows the product description\n */\nfunction ItemDescriptionView({ description }) {\n return (\n <View style={[styles.itemDescriptionView]}>\n <View style={{ padding: 25, width: '100%' }}>\n <Heading title='Description' />\n <Text style={{ fontSize: 15, fontWeight: '400', color: 'rgba(90,90,90,1.0)', marginTop: 10 }}>{description}</Text>\n </View>\n <View style={{ paddingHorizontal: 25 }}>\n <LineSeperator />\n </View>\n </View>\n );\n}\n\n/**\n *\n * Shows the current location on map and distance from the logged in user\n */\nfunction LocationView({ city, distance }) {\n return (\n <View style={[styles.locationView]}>\n <View style={{ padding: 25, width: '100%' }}>\n <Heading title='Location' />\n <View style={{ flexDirection: 'row', justifyContent: 'space-between', marginTop: 10 }}>\n <Text style={{ fontSize: 15, fontWeight: '400', color: 'rgba(90,90,90,1.0)' }}>{city}</Text>\n <Text style={{ fontSize: 15, fontWeight: '400', color: 'rgba(90,90,90,1.0)' }}>{distance} Km from you </Text>\n </View>\n <View style={{ height: 250, backgroundColor: 'lightgray', marginTop: 10, alignItems: 'center', justifyContent: 'center' }}>\n <Text style={{ fontSize: 15, fontWeight: '900', color: '#FFF' }}>Map will render here.</Text>\n\n </View>\n </View>\n <View style={{ paddingHorizontal: 25 }}>\n <LineSeperator />\n </View>\n </View>\n );\n}\n\n\n/**\n * Shows the calender to change the availablity time/days\n */\nfunction CalenderView({ returnDay }) {\n return (\n <View style={[styles.locationView]}>\n <View style={{ padding: 25, width: '100%' }}>\n <Heading title='Availability' />\n </View>\n </View>\n );\n}\n\n/**\n * Shows the other item from the same owner, 4 item will be display here, if owner has more item then user will see all the items by tapping on 'Show More' button\n */\n\nfunction OtherItemView({ }) {\n return (\n <View style={[styles.locationView]}>\n <View style={{ padding: 25, width: '100%', }}>\n <Heading title='Other items of the owner' />\n <ItemGridView />\n <TouchableOpacity style={{ justifyContent: 'center', alignItems: 'center' }}>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(90,90,90,1.0)', marginTop: 0 }}>Show more </Text>\n </TouchableOpacity>\n </View>\n </View>\n );\n}\n\n\n/**\n * Shows the comment of the item, there is a button on the bottom that will navigate to the new screen to show all the comment of the item\n */\nfunction CommentView({ navigation, userName, date, rating, comment, imageURL }) {\n\n const navigateToTheScreen = () => {\n navigation.push('SubmitRatingScreen')\n }\n\n return (\n <View style={[styles.locationView]}>\n <View style={{ padding: 25, width: '100%' }}>\n <Heading title='Comment' />\n <View style={{ flexDirection: 'row', paddingTop: 20 }}>\n <Image style={{ marginTop: 5, width: 60, height: 60 }} source={require('../Images/camera.png')} />\n <View style={{ paddingRight: 60, marginLeft: 10 }}>\n <View style={{ flexDirection: 'row', justifyContent: 'space-between', alignItems: 'center', paddingTop: 6, }}>\n <Text style={{ fontSize: 22, fontWeight: '700', color: 'rgba(90,90,90,1.0)' }}>{userName}</Text>\n <Text style={{ fontSize: 15, fontWeight: '400', color: 'rgba(90,90,90,1.0)' }}>{date}</Text>\n </View>\n <Text style={{ fontSize: 20, fontWeight: '400', color: 'rgba(184,34,70,1.0)', paddingVertical:6}}>★★★★★</Text>\n <Text style={{ fontSize: 15, fontWeight: '400', color: 'rgba(90,90,90,1.0)', }}>{comment}</Text>\n </View>\n </View>\n <TouchableOpacity style={{ justifyContent: 'center', alignItems: 'center' }} onPress={navigateToTheScreen}>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(90,90,90,1.0)', marginTop: 10 }}>Show other comments </Text>\n </TouchableOpacity>\n </View>\n <View style={{ paddingHorizontal: 25 }}>\n <LineSeperator />\n </View>\n </View>\n );\n}\n\n/**\n * Render edit item and availability button\n */\nfunction ButtonView({ navigation }) {\n\n navigateToEditITemScreen = () => {\n navigation.push('EditItemScreen');\n }\n\n navigateAvilablityScreen = () => {\n navigation.push('AvailablityScreen');\n }\n\n return (\n <View style={{ width: '100%', flexDirection: 'row', justifyContent: 'space-evenly', alignItems: 'center' }}>\n <TouchableOpacity style={styles.secondaryButton} onPress={navigateToEditITemScreen}>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(90,90,90,1.0)' }}>Edit Item</Text>\n </TouchableOpacity>\n\n <TouchableOpacity style={styles.secondaryButton} onPress={navigateAvilablityScreen}>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(90,90,90,1.0)' }}>Edit Availability</Text>\n </TouchableOpacity>\n </View>\n );\n}\n\n// Screen 15\nclass ItemDisplayScreen extends React.Component {\n\n // Button Action\n\n /**\n * To navigate to the 'Login' screen\n */\n constructor(props){\n super(props)\n this.state = ({\n data:{\n itemTitle:\"test\"\n },\n loading:false\n });\n }\n handleBackButtonAction() {\n this.props.navigation.pop()\n }\n componentDidMount() {\n\n this.getItemDetails().then((resolve) => {\n this.setState({ loading: false})\n }, (err) => {\n console.log(err)\n })\n }\n getItemDetails(){\n let params={};\n params.userID=\"1\";\n params.itemID=\"38\";\n params.latitude=\"\";\n params.longitude=\"\";\n // console.log(\"testsetsetsetsetestes\");\n // console.log(params);\n return new Promise( (resolve, reject) => {\n fetch('https://contestcottage.com:5000/getItemDetails', {\n method: 'POST', // or 'PUT'\n headers: {\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(params),\n })\n .then(response => response.json())\n .then(collection => {\n this.setState({data:collection.data});\n // resolve(collection.data);\n }).catch((error)=>{\n alert('error call ', error);\n reject(error);\n });\n })\n }\n render() {\n return (\n <>\n <SafeAreaView style={styles.background}>\n <ItemDescriptionNavigationBar title=\"\" onClickCallback={this.handleBackButtonAction.bind(this)} />\n <View style={styles.container}>\n <ScrollView style={styles.scrollView} showsVerticalScrollIndicator={false} contentContainerStyle={{ flexGrow: 1, }} contentInsetAdjustmentBehavior='automatic'>\n\n <SliderView imageData={imageSldierData} >\n\n </SliderView>\n <TitleView title={this.state.data.itemTitle} rating={this.state.data.rating} reviewCount={this.state.data.reviewscount} dayPrice={this.state.data.perdayPrice} weekPrice={this.state.data.weekPrice} monthPrice={this.state.data.monthPrice} />\n <UserDetailView imageURL={this.state.data.user_profile_photo} name={this.state.data.firstName+' '+this.state.data.lastName} rating={this.state.data.userratings} reviewCount={this.state.data.userreviewscount} responseTime='2' totolItemHolding='27' />\n <ItemDescriptionView description={this.state.data.itemDescription} />\n <LocationView city={this.state.data.location} distance={this.state.data.distance} />\n <CalenderView returnDay='31' />\n <CalendarList\n\n // Callback which gets executed when visible months change in scroll view. Default = undefined\n onVisibleMonthsChange={(months) => { console.log('now these months are visible', months); }}\n // Max amount of months allowed to scroll to the past. Default = 50\n pastScrollRange={0}\n // Max amount of months allowed to scroll to the future. Default = 50\n futureScrollRange={0}\n // Enable or disable scrolling of calendar list\n scrollEnabled={true}\n // Enable or disable vertical scroll indicator. Default = false\n showScrollIndicator={true}\n // Collection of dates that have to be colored in a special way. Default = {}\n markedDates={{\n '2021-01-21': { selected: true, startingDay: true, color: 'rgb(210,37,80)' },\n '2021-01-22': { selected: true, endingDay: true, color: 'rgb(210,37,80)', textColor: 'white' },\n }}\n // Date marking style [simple/period/multi-dot/custom]. Default = 'simple'\n markingType={'period'}\n />\n\n <View style={{ paddingHorizontal: 25 }}>\n <Text style={{ fontSize: 15, fontWeight: '600', color: 'rgba(90,90,90,1.0)', marginTop: 10 }}>🔔 22 is the return day</Text>\n <View style={{ width: 20, height: 20 }}></View>\n <LineSeperator />\n </View>\n <OtherItemView />\n\n <CommentView navigation={this.props.navigation} userName='Lucia San' date='13/06/2013' comment='I got my CMD-1500 and I have to say it’s a great convertor!! I bought it for my PAL Phillips Videopac G7400 (Odyssey3) Videogame system to play on my NTSC T.V. and it works awesome!!Peter and his helpful staff stands by their word, buy with confidence from World-Import if they don’t have it, you don’t need it!!! Christopher D., Westford Ma.\n'/>\n </ScrollView>\n </View>\n <View style={styles.buttonContainer}>\n <ButtonView navigation={this.props.navigation} />\n </View>\n </SafeAreaView>\n </>\n );\n }\n}\n\nexport default ItemDisplayScreen;\n\nconst styles = StyleSheet.create({\n container: {\n flex: 1,\n flexDirection: 'column',\n backgroundColor: 'rgba(255,255,255,1.0)',\n alignItems: 'center',\n },\n background: {\n flex: 1,\n backgroundColor: 'rgba(255,255,255,1.0)',\n },\n scrollView: {\n width: '100%',\n height: '100%'\n },\n buttonContainer: {\n backgroundColor: 'rgba(255,255,255,1.0)',\n justifyContent: 'center',\n alignItems: 'center',\n paddingBottom: 0,\n height: 90\n },\n sliderView: {\n width: '100%',\n height: 230,\n },\n titleView: {\n width: '100%',\n height: 160,\n backgroundColor: '#FFF'\n },\n userDetailView: {\n width: '100%',\n height: 100,\n },\n itemDescriptionView: {\n width: '100%',\n },\n locationView: {\n width: '100%',\n },\n secondaryButton: {\n width: 140,\n height: 44,\n backgroundColor: 'rgba(255,255,255,1.0)',\n borderRadius: 22,\n borderWidth: 0.5,\n borderColor: 'rgba(220,220,220,1.0)',\n justifyContent: 'center',\n alignItems: 'center',\n shadowOffset: { width: 8, height: 7, },\n shadowColor: 'rgba(127,127,127,1.0)',\n shadowOpacity: 0.3,\n },\n});"
},
{
"alpha_fraction": 0.6407136917114258,
"alphanum_fraction": 0.6428764462471008,
"avg_line_length": 31.734512329101562,
"blob_id": "5a20347e0b5e4f06bb599718af83f97c832b57ff",
"content_id": "c45c483f309f05eb20ccccd19883e96c9ca36356",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 3699,
"license_type": "no_license",
"max_line_length": 380,
"num_lines": 113,
"path": "/src/app/update-blog/update-blog.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { FormBuilder, FormControl, FormGroup, Validators } from '@angular/forms';\nimport { MatDialog, MatDialogRef } from '@angular/material/dialog';\nimport { MatSnackBar } from '@angular/material/snack-bar';\nimport { AdminService } from '../admin-service/admin.service';\nimport {MAT_DIALOG_DATA} from '@angular/material/dialog';\nimport {Inject} from '@angular/core';\n\n@Component({\n selector: 'app-update-blog',\n templateUrl: './update-blog.component.html',\n styleUrls: ['./update-blog.component.scss']\n})\nexport class UpdateBlogComponent implements OnInit {\n categories: any = []\n subCategories: any = []\n\n updateBlogForm: FormGroup;\n fetchedThumbnail: any;\n constructor(@Inject(MAT_DIALOG_DATA) public blogInfo: any, private formBuilder: FormBuilder, private adminService:AdminService, private _snackBar: MatSnackBar, public dialog: MatDialog, public dialogRef: MatDialogRef<UpdateBlogComponent>){}\n\n data:any={};\n success(res:any){\n this.data=res;\n }\n view(item:any){\n return(item)\n }\n ngOnInit() {\n\n this.adminService\n .viewCategories()\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.categories=data;\n }\n },\n err => console.log(err)\n );\n this.fetchedThumbnail=this.blogInfo.blogInfo.thumbnail;\n this.updateBlogForm = this.formBuilder.group({\n category: this.blogInfo.blogInfo.category,\n subCategory: this.blogInfo.blogInfo.subCategory,\n categoryid: this.blogInfo.blogInfo.categoryid,\n subCategoryid: this.blogInfo.blogInfo.subCategoryid,\n title: this.blogInfo.blogInfo.title,\n desc: this.blogInfo.blogInfo.desc,\n content: this.blogInfo.blogInfo.content,\n thumbnail: this.blogInfo.blogInfo.thumbnail\n });\n }\n\n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n\n selectFormControl = new FormControl('', Validators.required);\n\n getSubCategory(event: any) {\n this.adminService\n .viewSubCategories(event.value.categoryid)\n .subscribe(\n data => {\n console.log(data)\n if(data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.subCategories=data;\n }\n },\n err => console.log(err)\n );\n }\n\n handleImgChange = (event: any) => {\n let file = event.files[0];\n let reader = new FileReader();\n reader.readAsDataURL(file);\n reader.onload = () =>\n this.updateBlogForm.patchValue({\n thumbnail: reader.result\n })\n\n,\n reader.onerror = function (error) {\n console.log('Error: ', error);\n };\n };\n\n submit() {\n console.log(this.updateBlogForm.value)\n this.adminService\n .updateBlog(this.blogInfo.blogInfo.blogid, this.updateBlogForm.value.subCategory.category, this.updateBlogForm.value.subCategory.categoryid, this.updateBlogForm.value.subCategory.name, this.updateBlogForm.value.subCategory.subCategoryid, this.updateBlogForm.value.title, this.updateBlogForm.value.desc, this.updateBlogForm.value.content, this.updateBlogForm.value.thumbnail)\n .subscribe(\n data => {\n console.log('data ', data);\n if(data.status == 1) {\n this.success(data);\n this.openSnackBar(\"Blog Updated Successfully\", \"Close\");\n } else {\n throw new Error('Error Creating Blog... ');\n this.openSnackBar(\"Error Updating Blog\", \"Close\");\n }\n },\n err => console.log(err)\n );\n }\n}\n"
},
{
"alpha_fraction": 0.5302006602287292,
"alphanum_fraction": 0.5317845940589905,
"avg_line_length": 19.676855087280273,
"blob_id": "af34a1df677347845a4a2e7e73bc6df8249bbfc1",
"content_id": "290d580a78986e565515cb1df20b8494635d0294",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 9470,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 458,
"path": "/src/app/admin-service/admin.service.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Injectable } from '@angular/core';\nimport { HttpClient } from '@angular/common/http';\nimport { Observable } from 'rxjs';\nimport { map,catchError } from 'rxjs/operators';\n\n@Injectable({\n providedIn: 'root'\n})\nexport class AdminService {\n\n apiPath = 'https://sce-cms.shivalikcollege.edu.in';\n\n\n getCategoriesApiPath = this.apiPath + '/getCategories';\n getSubCategoriesApiPath = this.apiPath + '/getSubCategories';\n getBlogsListApiPath = this.apiPath + '/getBlogsList2';\n getBlogApiPath = this.apiPath + '/getBlogDetailsUrl';\n\n loginApiPath = this.apiPath+'/login';\n createApiPath = this.apiPath+'/create';\n readApiPath = this.apiPath+'/fetch';\n updateApiPath = this.apiPath+'/update';\n paginationFetch = this.apiPath+'/fetchWithLimit';\n\n blogs: any = [];\n\n constructor(private http: HttpClient) { }\n\n register(username:any, email: any, password: any){\n return this.http.post<any>(this.createApiPath,{\n database:\"ShivalikCollege\",\n collection:\"user\",\n sequenceType:\"userSequence\",\n idType:\"userid\",\n document:{\n username:username,\n email:email,\n password:password,\n isActive:1,\n isVisible:true\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n login(username: any, password: any){\n return this.http.post<any>(this.readApiPath,{\n database:\"ShivalikCollege\",\n collection:\"user\",\n Filter:{\n username:username,\n password:password,\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n console.log(res)\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n createCategory(name:any){\n return this.http.post<any>(this.createApiPath,{\n database:\"ShivalikCollege\",\n collection:\"category\",\n sequenceType:\"categorySequence\",\n idType:\"categoryid\",\n document:{\n name:name,\n isActive:1,\n isVisible:true\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n createSubCategory(name:any, category: any, categoryid: any){\n return this.http.post<any>(this.createApiPath,{\n database:\"ShivalikCollege\",\n collection:\"subCategory\",\n sequenceType:\"subCategorySequence\",\n idType:\"subCategoryid\",\n document:{\n name:name,\n category: category,\n categoryid: categoryid,\n isActive:1,\n isVisible:true\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n createBlog(category:any, categoryid: any, subCategory: any, subCategoryid: any, date: any, pageTitle: any, metaKeywords: any, metaDescription: any, title: any, desc: any, content: any, thumbnail: any){\n return this.http.post<any>(this.createApiPath,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n sequenceType:\"blogSequence\",\n idType:\"blogid\",\n document:{\n category: category,\n categoryid: categoryid,\n subCategory: subCategory,\n subCategoryid: subCategoryid,\n date: date,\n pageTitle: pageTitle,\n metaKeywords: metaKeywords,\n metaDescription: metaDescription,\n title: title,\n desc: desc,\n content: content,\n thumbnail: thumbnail,\n isActive:1,\n isVisible:true\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n viewCategories(){\n return this.http.post<any>(this.readApiPath,{\n database:\"ShivalikCollege\",\n collection:\"category\",\n Filter:{\n isActive:1\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n viewSubCategories(categoryid: any){\n return this.http.post<any>(this.readApiPath,{\n database:\"ShivalikCollege\",\n collection:\"subCategory\",\n Filter:{\n categoryid: categoryid,\n isActive:1\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n viewBlogs(){\n return this.http.post<any>(this.readApiPath,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Filter:{\n isActive:1\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n updateBlog(blogid: any, category:any, categoryid: any, subCategory: any, subCategoryid: any, title: any, desc: any, content: any, thumbnail: any){\n return this.http.post<any>(this.updateApiPath,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Filter:{\n blogid: blogid\n },\n DataToBeUpdated: {\n category: category,\n categoryid: categoryid,\n subCategory: subCategory,\n subCategoryid: subCategoryid,\n title: title,\n desc: desc,\n content: content,\n thumbnail: thumbnail\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n this.viewBlogs()\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n getBlog2(createUrl: any){\n return this.http.post<any>(this.readApiPath,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Filter:{\n createUrl: createUrl,\n isActive:1\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n // getBlog(blogid: any){\n // return this.http.post<any>(this.readApiPath,{\n // database:\"ShivalikCollege\",\n // collection:\"blog\",\n // Filter:{\n // blogid: blogid,\n // isActive:1\n // }\n // })\n // .pipe(\n // map((res: any) => {\n // return res;\n // }),\n // catchError((err) => {\n // return err;\n // })\n // );\n // }\n\n updateCategory(categoryid: any, name: any){\n return this.http.post<any>(this.updateApiPath,{\n database:\"ShivalikCollege\",\n collection:\"category\",\n Filter:{\n categoryid: categoryid\n },\n DataToBeUpdated: {\n name: name,\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n this.viewCategories()\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n updateSubCategory(subCategoryid: any, name: any){\n return this.http.post<any>(this.updateApiPath,{\n database:\"ShivalikCollege\",\n collection:\"category\",\n Filter:{\n subCategoryid: subCategoryid\n },\n DataToBeUpdated: {\n name: name,\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n this.viewSubCategories('')\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n\n\npagination(number:any,id:any){\n if(id==undefined){\n return this.http.post<any>(this.paginationFetch,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Limit:4,\n Skip:number,\n Filter:{\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n else{\n console.log({\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Limit:4,\n Skip:number,\n Filter:{\n createCategory:id\n }\n });\n\n return this.http.post<any>(this.paginationFetch,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Limit:4,\n Skip:number,\n Filter:{\n createCategory:id\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n }\n}\n\nfilterByCategory(categoryid: any){\n return this.http.post<any>(this.readApiPath,{\n database:\"ShivalikCollege\",\n collection:\"blog\",\n Filter:{\n categoryid: categoryid\n }\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n}\ngetCategories(): any{\n return this.http.post<any>(this.getCategoriesApiPath, {})\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n}\n\ngetSubCategories(id: number): any{\n return this.http.post<any>(this.getSubCategoriesApiPath, {\n category: id\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n}\n\ngetBlogsList(id: number): any{\n return this.http.post<any>(this.getBlogsListApiPath, {\n category: id\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n}\ngetBlog(url: string): any{\n return this.http.post<any>(this.getBlogApiPath, {\n createUrl: url\n })\n .pipe(\n map((res: any) => {\n return res;\n }),\n catchError((err) => {\n return err;\n })\n );\n}\n\n\n\n// getRecentBlog(){\n// return this.http.post<any>(this.readApiPath,{\n// database:\"ShivalikCollege\",\n// collection:\"sequences\",\n// Filter:{\n// blogid: blogid,\n// isActive:1\n// }\n// })\n// .pipe(\n// map((res: any) => {\n// return res;\n// }),\n// catchError((err) => {\n// return err;\n// })\n// );\n// }\n\n\n}\n"
},
{
"alpha_fraction": 0.5884119272232056,
"alphanum_fraction": 0.6043102145195007,
"avg_line_length": 42.213741302490234,
"blob_id": "81687bbdb141ce9d809f749259e402031ead1198",
"content_id": "81f6c2679158e158fbe6c29fea88e0cd25e71e82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5661,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 131,
"path": "/blogserver.py",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, json, Response, jsonify\nfrom pymongo import MongoClient\nfrom flask_cors import CORS, cross_origin\nclass MongoAPI:\n def __init__(self, data):\n self.client = MongoClient(\"mongodb://root:[email protected]:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&ssl=false\")\n database = data['database']\n collection = data['collection']\n cursor = self.client.get_database(database)\n self.collection = cursor[collection]\n self.data = data\n def write(self, data):\n new_document = data['document']\n response = self.collection.insert_one(new_document)\n output = {'Status': '1',\n 'Document_ID': str(response.inserted_id)}\n return output\n def read(self):\n documents = self.collection.find()\n output = [{item: data[item] for item in data if item != '_id'} for data in documents]\n return output\n def readWithFilter(self):\n filt = self.data['Filter']\n documents = self.collection.find(filt)\n output = [{item: data[item] for item in data if item != '_id'} for data in documents]\n return output\n def readWithFilter2(self):\n filt = self.data['Filter']\n lim = self.data['Limit']\n sk = self.data['Skip']\n documents = self.collection.find(filt).limit(lim).skip(sk)\n output = [{item: data[item] for item in data if item != '_id'} for data in documents]\n return output\n def login(self):\n documents = self.collection.find({\"email\": self.data['email'],\"pwd\":self.data['password'],\"isActive\":1})\n output = [{item: data[item] for item in data if item != '_id'} for data in documents]\n return output\n def update(self):\n filt = self.data['Filter']\n updated_data = {\"$set\": self.data['DataToBeUpdated']}\n response = self.collection.update_one(filt, updated_data)\n output = {'Status': '1' if response.modified_count > 0 else \"Nothing was updated.\"}\n return output\n def getSequences(self):\n documents = self.collection.find()\n output = [{item: data[item] for item in data if item != '_id'} for data in documents]\n return output\napp = Flask(__name__)\ncors = CORS(app)\[email protected]('/',methods=['GET'])\ndef index():\n return(\"API Server Running\")\[email protected]('/create', methods=['POST'])\ndef create():\n data = request.json\n data2=json.loads('{\"database\":\"ShivalikCollege\",\"collection\":\"sequences\"}')\n obj2 = MongoAPI(data2)\n sequenceType=data['sequenceType']\n idType=data['idType']\n print(obj2.getSequences()[0].get(sequenceType))\n cid=(obj2.getSequences()[0].get(sequenceType))\n print(cid)\n data['document'][idType]=cid\n if data is None or data == {}:\n return Response(response=json.dumps({\"Error\": \"Please provide connection information\"}),\n status=400,\n mimetype='application/json')\n obj1 = MongoAPI(data)\n response = obj1.write(data)\n cid2=cid+1\n data3=json.loads('{\"database\":\"ShivalikCollege\",\"collection\":\"sequences\",\"Filter\":{\"'+sequenceType+'\":'+str(cid)+'},\"DataToBeUpdated\":{\"'+sequenceType+'\":'+str(cid2)+'}}')\n obj3 = MongoAPI(data3)\n obj3.update()\n return Response(response=json.dumps(response),\n status=200,\n mimetype='application/json')\[email protected]('/login', methods=['POST'])\ndef login_class():\n data = request.json\n if data is None or data == {}:\n return Response(response=json.dumps({\"Error\": \"Please provide connection information\"}),\n status=400,\n mimetype='application/json')\n obj1 = MongoAPI(data)\n response = obj1.login()\n return Response(response=json.dumps(response),\n status=200,\n mimetype='application/json')\[email protected]('/fetch', methods=['POST'])\ndef fetch():\n data = request.json\n if data is None or data == {}:\n return Response(response=json.dumps({\"Error\": \"Please provide connection information\"}),\n status=400,\n mimetype='application/json')\n obj1 = MongoAPI(data)\n response = obj1.readWithFilter()\n return Response(response=json.dumps(response),\n status=200,\n mimetype='application/json')\[email protected]('/update', methods=['POST'])\ndef update():\n data = request.json\n if data is None or data == {} or 'DataToBeUpdated' not in data:\n return Response(response=json.dumps({\"Error\": \"Please provide connection information\"}),\n status=400,\n mimetype='application/json')\n obj1 = MongoAPI(data)\n response = obj1.update()\n return Response(response=json.dumps(response),\n status=200,\n mimetype='application/json')\[email protected]('/fetchWithLimit', methods=['POST'])\ndef fetchWithLimit():\n data = request.json\n if data is None or data == {}:\n return Response(response=json.dumps({\"Error\": \"Please provide connection information\"}),\n status=400,\n mimetype='application/json')\n obj1 = MongoAPI(data)\n response = obj1.readWithFilter2()\n return Response(response=json.dumps(response),\n status=200,\n mimetype='application/json')\nif __name__ == '__main__':\n data={}\n # app.run(use_reloader=False, debug=True, port=5001, host='127.0.0.1')\n from gevent.pywsgi import WSGIServer\n app.debug = True\n http_server = WSGIServer(('', 5002), app)\n http_server.serve_forever()\n"
},
{
"alpha_fraction": 0.6510892510414124,
"alphanum_fraction": 0.6531974673271179,
"avg_line_length": 33.28915786743164,
"blob_id": "7e23bab9fc06c27e26366b0db28091a16765a475",
"content_id": "463347d0974893de31097588b43631651125f0e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 2846,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 83,
"path": "/src/app/create-subcategory/create-subcategory.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import {Component} from '@angular/core';\nimport {FormControl, FormGroupDirective, NgForm, Validators, FormGroup} from '@angular/forms';\nimport {ErrorStateMatcher} from '@angular/material/core';\nimport { AdminService } from \"src/app/admin-service/admin.service\";\nimport { Router } from \"@angular/router\";\nimport {MatSnackBar} from '@angular/material/snack-bar';\nimport {MatDialog, MatDialogRef} from '@angular/material/dialog';\n\nexport class MyErrorStateMatcher implements ErrorStateMatcher {\n isErrorState(control: FormControl | null, form: FormGroupDirective | NgForm | null): boolean {\n const isSubmitted = form && form.submitted;\n return !!(control && control.invalid && (control.dirty || control.touched || isSubmitted));\n }\n}\n@Component({\n selector: 'app-create-subcategory',\n templateUrl: './create-subcategory.component.html',\n styleUrls: ['./create-subcategory.component.scss']\n})\n\nexport class CreateSubcategoryComponent {\n subCategoryForm = new FormGroup({\n category: new FormControl(''),\n subCategory: new FormControl('')\n })\n categoryNameControl = new FormControl('', [\n Validators.required,\n ]);\n\n selectFormControl = new FormControl('', Validators.required);\n\n matcher = new MyErrorStateMatcher();\n\n constructor(private adminService:AdminService, private router: Router, private _snackBar: MatSnackBar, public dialogRef: MatDialogRef<CreateSubcategoryComponent>){}\n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n categories: any = [];\n ngOnInit() {\n this.adminService.viewCategories().subscribe(\n (data) => {\n if (data.length == 0) {\n throw new Error('Error Fetching States... ');\n } else {\n this.categories = data;\n }\n },\n (err) => console.log(err)\n );\n }\n data:any={};\n success(res:any){\n this.data=res;\n }\n submit() {\n console.log(this.subCategoryForm.value);\n\n if(this.subCategoryForm.value.subCategory!='' && this.subCategoryForm.value.category!=''){\n this.adminService\n .createSubCategory(this.subCategoryForm.value.subCategory, this.subCategoryForm.value.category.name, this.subCategoryForm.value.category.categoryid)\n .subscribe(\n data => {\n console.log('data ', data);\n if(data.length == 0) {\n throw new Error('Error Creating Sub-Category... ');\n this.openSnackBar(\"Error Registering User\", \"Close\");\n } else {\n this.success(data);\n this.openSnackBar(\"Sub-Category Created Successfully\", \"Close\");\n }\n },\n err => console.log(err)\n );\n this.openSnackBar(\"Sub-Category Created Successfully\", \"Close\");\n this.close();\n }\n }\n close(): void {\n this.dialogRef.close();\n }\n}\n"
},
{
"alpha_fraction": 0.6716015934944153,
"alphanum_fraction": 0.6738447546958923,
"avg_line_length": 36.79661178588867,
"blob_id": "29e474b14a9910d9d1ac156a53036877542d8080",
"content_id": "2ab610e18d2872d5ab6cdaaadfd7bc5b490508b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 2229,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 59,
"path": "/src/app/update-category/update-category.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import {Component, Inject} from '@angular/core';\nimport {FormControl, FormGroupDirective, NgForm, Validators, FormGroup} from '@angular/forms';\nimport {ErrorStateMatcher} from '@angular/material/core';\nimport { AdminService } from \"src/app/admin-service/admin.service\";\nimport { Router } from \"@angular/router\";\nimport {MatSnackBar} from '@angular/material/snack-bar';\nimport {MatDialog, MatDialogRef, MAT_DIALOG_DATA} from '@angular/material/dialog';\n\nexport class MyErrorStateMatcher implements ErrorStateMatcher {\n isErrorState(control: FormControl | null, form: FormGroupDirective | NgForm | null): boolean {\n const isSubmitted = form && form.submitted;\n return !!(control && control.invalid && (control.dirty || control.touched || isSubmitted));\n }\n}\n@Component({\n selector: 'app-update-category',\n templateUrl: './update-category.component.html',\n styleUrls: ['./update-category.component.scss']\n})\nexport class UpdateCategoryComponent {\n categoryNameControl = new FormControl('', [\n Validators.required,\n ]);\n\n matcher = new MyErrorStateMatcher();\n updateCategoryForm = new FormGroup({\n name: new FormControl(this.categoryInfo.categoryInfo.name)\n });\n\n constructor(@Inject(MAT_DIALOG_DATA) public categoryInfo: any, private adminService:AdminService, private router: Router, private _snackBar: MatSnackBar,public dialogRef: MatDialogRef<UpdateCategoryComponent>){}\n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n updateCategory() {\n if(this.updateCategoryForm.value.name!=''){\n this.adminService\n .updateCategory(this.categoryInfo.categoryInfo.categoryid, this.updateCategoryForm.value.name)\n .subscribe(\n data => {\n console.log('data ', data);\n if(data.length == 0) {\n throw new Error('Error Updating Category... ');\n this.openSnackBar(\"Error Updating Category...\", \"Close\");\n } else {\n this.close();\n this.openSnackBar(\"Category Updated Successfully\", \"Close\");\n }\n },\n err => console.log(err)\n );\n\n }\n }\n close(): void {\n this.dialogRef.close();\n }\n}"
},
{
"alpha_fraction": 0.6341666579246521,
"alphanum_fraction": 0.6358333230018616,
"avg_line_length": 26.9069766998291,
"blob_id": "2d4f64b96032b7c65b40c0055046096a617df41d",
"content_id": "942e508b55b4627980fecb4d622e7ffb85ce500e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1200,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 43,
"path": "/src/app/view-blogs/view-blogs.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { ThrowStmt } from '@angular/compiler';\nimport { Component, OnInit } from '@angular/core';\nimport { MatDialog } from '@angular/material/dialog';\nimport { AdminService } from '../admin-service/admin.service';\nimport { CreateBlogComponent } from '../create-blog/create-blog.component';\nimport { UpdateBlogComponent } from '../update-blog/update-blog.component';\n\n@Component({\n selector: 'app-view-blogs',\n templateUrl: './view-blogs.component.html',\n styleUrls: ['./view-blogs.component.scss']\n})\nexport class ViewBlogsComponent implements OnInit {\n\n constructor(private adminService:AdminService, public dialog: MatDialog) { }\n blogs: any = [];\n\n ngOnInit() {\n this.adminService.getBlogsList(0)\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.blogs=data;\n }\n console.log(data)\n },\n err => console.log(err)\n );\n }\n\n editBlog(blog: any) {\n const dialogRef = this.dialog.open(UpdateBlogComponent, {\n data: {blogInfo: blog},\n });\n \n dialogRef.afterClosed().subscribe(result => {\n console.log(`Dialog result: ${result}`);\n });\n }\n\n}\n"
},
{
"alpha_fraction": 0.6252231001853943,
"alphanum_fraction": 0.6276026368141174,
"avg_line_length": 30.129629135131836,
"blob_id": "4f880e7e84d3baa84e2d97033fc7fb6f0ef0f0cf",
"content_id": "450cd8e4523f0349333c44b2a8f5aa445d4bf17e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 3362,
"license_type": "no_license",
"max_line_length": 501,
"num_lines": 108,
"path": "/src/app/create-blog/create-blog.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { FormControl, FormGroup, Validators } from '@angular/forms';\nimport { MatDialog, MatDialogRef } from '@angular/material/dialog';\nimport { MatSnackBar } from '@angular/material/snack-bar';\nimport { AdminService } from '../admin-service/admin.service';\n\n@Component({\n selector: 'app-create-blog',\n templateUrl: './create-blog.component.html',\n styleUrls: ['./create-blog.component.scss']\n})\nexport class CreateBlogComponent implements OnInit {\n categories: any = []\n subCategories: any = []\n\n createBlogForm = new FormGroup({\n category: new FormControl(''),\n subCategory: new FormControl(''),\n title: new FormControl(''),\n thumbnail: new FormControl(''),\n content: new FormControl(''),\n desc: new FormControl(''),\n date: new FormControl(''),\n metaKeywords: new FormControl(''),\n metaDescription: new FormControl(''),\n pageTitle: new FormControl('')\n })\n\n constructor(private adminService:AdminService, private _snackBar: MatSnackBar, public dialog: MatDialog, public dialogRef: MatDialogRef<CreateBlogComponent>){}\n\n data:any={};\n success(res:any){\n this.data=res;\n }\n\n ngOnInit() {\n this.adminService\n .viewCategories()\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.categories=data;\n }\n },\n err => console.log(err)\n );\n }\n\n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n\n\n selectFormControl = new FormControl('', Validators.required);\n\n getSubCategory(event: any) {\n this.adminService\n .viewSubCategories(event.value.categoryid)\n .subscribe(\n data => {\n console.log(data)\n if(data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.subCategories=data;\n }\n },\n err => console.log(err)\n );\n }\n\n\n handleImgChange = (event: any) => {\n let file = event.files[0];\n let reader = new FileReader();\n reader.readAsDataURL(file);\n reader.onload = () => \n this.createBlogForm.patchValue({\n thumbnail: reader.result\n })\n,\n reader.onerror = function (error) {\n console.log('Error: ', error);\n };\n };\n\n submit() {\n this.adminService\n .createBlog(this.createBlogForm.value.subCategory.category, this.createBlogForm.value.subCategory.categoryid, this.createBlogForm.value.subCategory.name, this.createBlogForm.value.subCategory.subCategoryid, this.createBlogForm.value.date, this.createBlogForm.value.pageTitle, this.createBlogForm.value.metaKeywords, this.createBlogForm.value.metaDescription, this.createBlogForm.value.title, this.createBlogForm.value.desc, this.createBlogForm.value.content, this.createBlogForm.value.thumbnail)\n .subscribe(\n data => {\n console.log('data ', data);\n if(data.status == 1) {\n this.success(data);\n this.openSnackBar(\"Blog Created Successfully\", \"Close\");\n } else {\n throw new Error('Error Creating Blog... ');\n this.openSnackBar(\"Error Creating Blog\", \"Close\");\n }\n },\n err => console.log(err)\n );\n }\n}\n"
},
{
"alpha_fraction": 0.5297011137008667,
"alphanum_fraction": 0.5357547998428345,
"avg_line_length": 26.82105255126953,
"blob_id": "e671b6dd7930010f84a71c88f786007d36d460c8",
"content_id": "7d29f5c5bb2b49c9bfeb165c4eb626bb229688cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 2643,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 95,
"path": "/src/app/blogs/blogs.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { AdminService } from '../admin-service/admin.service';\nimport { Router, ActivatedRoute } from '@angular/router';\n\nimport { parse } from 'angular-html-parser';\n\n@Component({\n selector: 'app-blogs',\n templateUrl: './blogs.component.html',\n styleUrls: ['./blogs.component.scss'],\n})\nexport class BlogsComponent implements OnInit {\n constructor(private adminService: AdminService, private route: ActivatedRoute, private router:Router) {}\n allBlogs: any = [];\n blogs: any = [];\n pageNo: any = 1;\n noOfBlogs: any;\n currentValue: any = 0;\n\n ngOnInit() {\n let id = this.route.snapshot.params.categoryid;\n console.log(id);\n this.adminService.pagination(0,id).subscribe(\n (data) => {\n console.log(data);\n if (data.length == 0) {\n this.router.navigate(['']);\n } else {\n this.blogs = data;\n }\n },\n (err) => console.log(err)\n );\n this.adminService.getBlogsList(0).subscribe(\n (data) => {\n console.log(data)\n if (data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.allBlogs = data;\n // console.log(this.allBlogs)\n this.noOfBlogs = this.allBlogs.filter(\n (blog) => blog.isActive === 1\n ).length;\n }\n },\n (err) => console.log(err)\n );\n }\n\n setLocal(blogid: any) {\n localStorage.setItem('blogid', blogid);\n }\n\n handlePagination(value: any) {\n let id = this.route.snapshot.params.categoryid;\n console.log(id);\n if (value === 'prev') {\n if (this.currentValue > 0) {\n this.adminService.pagination(this.currentValue - 2,id).subscribe(\n (data) => {\n if (data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.blogs = data;\n this.pageNo = this.pageNo - 1;\n console.log(this.blogs)\n }\n },\n (err) => console.log(err)\n );\n\n this.currentValue = this.currentValue - 2;\n }\n } else {\n if (this.currentValue < this.noOfBlogs) {\n this.adminService.pagination(this.currentValue + 2,id).subscribe(\n (data) => {\n if (data.length == 0) {\n throw new Error('Error Fetching Blogs... ');\n } else {\n this.blogs = data;\n this.pageNo = this.pageNo + 1;\n console.log(this.blogs)\n\n }\n },\n (err) => console.log(err)\n );\n\n this.currentValue = this.currentValue + 2;\n }\n }\n }\n}\n"
},
{
"alpha_fraction": 0.6618209481239319,
"alphanum_fraction": 0.6625860929489136,
"avg_line_length": 31.674999237060547,
"blob_id": "3446740464450f07458a6a2d7a90b7776b0b21f5",
"content_id": "4a980d54bfd0224f882fd7ea91332e1faa7f35df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1307,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 40,
"path": "/src/app/view-categories/view-categories.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, } from '@angular/core';\nimport {MatBottomSheetRef} from '@angular/material/bottom-sheet';\nimport { MatDialog } from '@angular/material/dialog';\nimport { AdminService } from \"src/app/admin-service/admin.service\";\nimport { UpdateCategoryComponent } from '../update-category/update-category.component';\n\n@Component({\n selector: 'app-view-categories',\n templateUrl: './view-categories.component.html',\n styleUrls: ['./view-categories.component.scss']\n})\nexport class ViewCategoriesComponent {\n categories:any = [];\n constructor(private adminService:AdminService, public dialog: MatDialog, private _bottomSheetRef: MatBottomSheetRef<ViewCategoriesComponent>) { }\n\n openLink(event: MouseEvent): void {\n this._bottomSheetRef.dismiss();\n event.preventDefault();\n }\n ngOnInit(): void {\n this.adminService\n .getCategories()\n .subscribe(\n data => {\n if(data.length == 0) {\n throw new Error('Error Fetching Categories... ');\n } else {\n this.categories=data;\n console.log(this.categories)\n }\n },\n err => console.log(err)\n );\n }\n updateCategoryDialog(category: any){\n const dialogRef = this.dialog.open(UpdateCategoryComponent, {\n data: {categoryInfo: category},\n });\n }\n}\n"
},
{
"alpha_fraction": 0.6479524374008179,
"alphanum_fraction": 0.6512549519538879,
"avg_line_length": 30.54166603088379,
"blob_id": "d272653547002b765da7ddefa832ff620a0b7613",
"content_id": "73529969e4ecd688d6e398fa5a6e219b9ddf5001",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1514,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 48,
"path": "/src/app/register/register.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { FormControl, FormGroup } from '@angular/forms';\nimport { MatSnackBar } from '@angular/material/snack-bar';\nimport { Router } from '@angular/router';\nimport { AdminService } from '../admin-service/admin.service';\n\n@Component({\n selector: 'app-register',\n templateUrl: './register.component.html',\n styleUrls: ['./register.component.scss']\n})\nexport class RegisterComponent implements OnInit {\n\n registerForm = new FormGroup({\n username: new FormControl(''),\n email: new FormControl(''),\n password: new FormControl(''),\n confirm_password: new FormControl(''),\n })\n constructor(private adminService: AdminService, private _snackBar: MatSnackBar, private router: Router) { }\n\n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n \n ngOnInit(): void {\n }\n\n submit(){\n if(this.registerForm.value.password === this.registerForm.value.confirm_password){\n this.adminService.register(this.registerForm.value.username, this.registerForm.value.email, this.registerForm.value.password)\n .subscribe(\n data => {\n if(data.Status === \"1\") {\n this.router.navigate([\"/login\"]);\n this.openSnackBar(\"User Created Successfully\", \"Close\");\n } else {\n throw new Error('Error Creating User... ');\n this.openSnackBar(\"Error Creating User...\", \"Close\");\n }\n },\n err => console.log(err)\n )\n }\n }\n}\n"
},
{
"alpha_fraction": 0.6558868288993835,
"alphanum_fraction": 0.6584133505821228,
"avg_line_length": 33.120689392089844,
"blob_id": "99b93cde79ae67f87c4a7fc31f27e01b92ea638b",
"content_id": "d0e29522118cacd5ae1bbbbcfa24379900212b4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1979,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 58,
"path": "/src/app/create-category/create-category.component.ts",
"repo_name": "GaneshPandey-GP/angular-blog",
"src_encoding": "UTF-8",
"text": "import {Component} from '@angular/core';\nimport {FormControl, FormGroupDirective, NgForm, Validators, FormGroup} from '@angular/forms';\nimport {ErrorStateMatcher} from '@angular/material/core';\nimport { AdminService } from \"src/app/admin-service/admin.service\";\nimport { Router } from \"@angular/router\";\nimport {MatSnackBar} from '@angular/material/snack-bar';\nimport {MatDialog, MatDialogRef} from '@angular/material/dialog';\n\nexport class MyErrorStateMatcher implements ErrorStateMatcher {\n isErrorState(control: FormControl | null, form: FormGroupDirective | NgForm | null): boolean {\n const isSubmitted = form && form.submitted;\n return !!(control && control.invalid && (control.dirty || control.touched || isSubmitted));\n }\n}\n@Component({\n selector: 'app-create-category',\n templateUrl: './create-category.component.html',\n styleUrls: ['./create-category.component.scss']\n})\nexport class CreateCategoryComponent {\n matcher = new MyErrorStateMatcher();\n createCategoryForm = new FormGroup({\n name: new FormControl('')\n });\n\n constructor(private adminService:AdminService, private _snackBar: MatSnackBar,public dialogRef: MatDialogRef<CreateCategoryComponent>){}\n \n openSnackBar(message: string, action: string) {\n this._snackBar.open(message, action, {\n duration: 5000,\n });\n }\n \n close(): void {\n this.dialogRef.close();\n }\n createCategory() {\n if(this.createCategoryForm.value.name!=''){\n this.adminService\n .createCategory(this.createCategoryForm.value.name)\n .subscribe(\n data => {\n console.log('data ', data);\n if(data.length == 0) {\n throw new Error('Error Creating Category... ');\n this.openSnackBar(\"Error Creating Category...\", \"Close\");\n } else {\n this.close();\n this.openSnackBar(\"Category Created Successfully\", \"Close\");\n }\n },\n err => console.log(err)\n );\n\n }\n }\n\n}\n"
}
] | 22 |
leopepe/ExecutableHandler | https://github.com/leopepe/ExecutableHandler | ce4949ec209b56a48978ad6cb05f4bafffe122f6 | 11a58b691160999637a908712823fb85229d4574 | bdc35c2328a0d90e60dee7c59e768a85ca2d6239 | refs/heads/master | 2021-01-23T20:17:46.574745 | 2014-02-26T22:52:00 | 2014-02-26T22:52:00 | 17,132,318 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49044057726860046,
"alphanum_fraction": 0.4991687536239624,
"avg_line_length": 22.368932723999023,
"blob_id": "c6e1e9c6139cc794cbbe19eaf762ea332009b002",
"content_id": "af7219a95cfd1cd024e2e5c9650426c3193b738f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2406,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 103,
"path": "/handlers/ExecutableHandler.py",
"repo_name": "leopepe/ExecutableHandler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule Docstring\nDocstrings: http://www.python.org/dev/peps/pep-0257/\n\"\"\"\n\n__author__ = \"\"\"\n (_ )\n \\\\\\\", ) ^\n \\/, \\(\n CXC_/_)\n leo.pepe\n\"\"\"\n__license__ = \"Simplified BSD License\"\n__copyright__ = \"Copyright (c) 2013, Leonardo Pepe de Freitas\"\n__version__ = \"1.0\"\n\n# -*- coding: utf8 -*-\n\nfrom subprocess import Popen, PIPE\n\n__author__ = 'Leonardo Pepe de Freitas'\n__version__ = '1.0'\n\n\nclass Process(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, cmd):\n self.name = cmd.split('/')[-1]\n self.cmd = cmd\n self.pid = None\n self.type = None\n self.stdout = None\n self.stderr = None\n self.stdin = None\n self.returncode = None\n\n\nclass ProcessExecutionHandler(Process):\n \"\"\"\n\n \"\"\"\n def execute(self):\n \"\"\"\n\n :rtype : tuple\n \"\"\"\n # concatenate path + args in form of a list\n # ex.: /sbin/ifconfig -a\n cmd = self.cmd.split()\n print(cmd)\n #\n try:\n process = Popen(cmd, bufsize=0, stdout=PIPE, stderr=PIPE)\n # wait until process finish\n process.wait()\n process.poll()\n self.__update_process_data(pid=process.pid,\n returncode=process.returncode,\n stdout=process.stdout.read(),\n stderr=process.stderr.read())\n return self.returncode, self.stdout, self.stderr\n except IOError:\n raise OSError\n\n def __update_process_data(self, **kwargs):\n \"\"\"\n\n :rtype : dict\n \"\"\"\n self.__dict__.update(kwargs)\n\n def get_process_data(self):\n \"\"\"\n :rtype: dict\n \"\"\"\n #\n # self.data must exist to be returned\n # raise AttributeErro if not\n if self.__dict__:\n return self.__dict__\n else:\n raise AttributeError\n\n\ndef main():\n # instance object ExecHandler()\n ls = ProcessExecutionHandler(cmd='/bin/ls -l /tmp/')\n ls.execute()\n process_result = ls.get_process_data()\n print(' PROCESS: {0},\\n'\n ' PID: {1}\\n'\n ' RETURN CODE: {2}\\n'\n ' STDOUT: {3}'.format(process_result['cmd'], process_result['pid'], process_result['returncode'], process_result['stdout'])\n )\n print('{0}'.format(process_result))\n\nif __name__ == '__main__':\n main()"
}
] | 1 |
nodamu/TorchFusion | https://github.com/nodamu/TorchFusion | 437f5a044675d283c32c4e77c39eb976f6808e49 | f7b8f7f3b6cbe61dc7cc89404377a67444455297 | bd1f6e4baedce75b2d01bc13481d2a227c1f4fa0 | refs/heads/master | 2020-04-05T13:18:43.651093 | 2018-10-16T22:23:41 | 2018-10-16T22:23:41 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7818182110786438,
"alphanum_fraction": 0.7818182110786438,
"avg_line_length": 17.33333396911621,
"blob_id": "63f6e504cac60c9032249a4de41bc2db04185e01",
"content_id": "8c74466ff0d9ee518929ab68e295d8744bd5d599",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 3,
"path": "/torchfusion/vae/__init__.py",
"repo_name": "nodamu/TorchFusion",
"src_encoding": "UTF-8",
"text": "import torch\nimport torh.nn as nn\nfrom ..layers import "
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 22,
"blob_id": "6176998c18d81bc04008a55bd56bfb677c2cda54",
"content_id": "c78c1017fa1dce47f5988bfab394a324a3e13e6e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 2,
"path": "/torchfusion/vae/applications/applications.py",
"repo_name": "nodamu/TorchFusion",
"src_encoding": "UTF-8",
"text": "from ...layers import *\nimport torch.nn as nn"
},
{
"alpha_fraction": 0.6733668446540833,
"alphanum_fraction": 0.6733668446540833,
"avg_line_length": 24,
"blob_id": "058175d6e8edb0498fc2a15520b0b1ec39addb1f",
"content_id": "2e001a3c267dc403d024f2e74b97f4736fbb6284",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 199,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 8,
"path": "/torchfusion/utils/mp.py",
"repo_name": "nodamu/TorchFusion",
"src_encoding": "UTF-8",
"text": "import torch\n\ndef half(model):\n params_copy = [param.clone() for param in model.parameters()]\n for param in params_copy:\n param.requires_grad = True\n\n return model.half(), params_copy"
}
] | 3 |
yjleeeeee/laygo2 | https://github.com/yjleeeeee/laygo2 | 4f6b07efff423022ad7a5743ef245520272b6bcc | db5b83034cbb9cd4f32a5666ace6b2e5174cafe5 | deb5a2683771c81e490aee6de5ec882c61d52c96 | refs/heads/master | 2023-07-12T11:32:16.046599 | 2021-08-20T12:40:13 | 2021-08-20T12:40:13 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6255411505699158,
"alphanum_fraction": 0.6385281682014465,
"avg_line_length": 22.149999618530273,
"blob_id": "df8c684d973e7325023f0d3ac7b16c5a008f80a3",
"content_id": "75643b60a42a4fd5390a374c0650048a7d1e2e74",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 20,
"path": "/setup.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='laygo2',\n version='0.1.0',\n description='LAYout with Gridded Object 2 - A Python Package for Scripted Layout Generation Flow',\n long_description='readme',\n author='Jaeduk Han',\n url='',\n license=license,\n package=find_packages(exclude=('test', 'docs'))\n)"
},
{
"alpha_fraction": 0.6533536314964294,
"alphanum_fraction": 0.6567073464393616,
"avg_line_length": 44.54166793823242,
"blob_id": "86d73c6950a5a231a2a2421e166ee3df32e646c4",
"content_id": "3a86326d4b5ee3be9020f3994f8c50ff7466cc64",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3280,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 72,
"path": "/laygo2/interface/yaml.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements interfaces with yaml files.\n\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nimport yaml\nimport os.path\nimport laygo2\n\ndef export_template(template, filename, mode='append'):\n libname = template.libname\n cellname = template.cellname\n pins = template.pins()\n\n db = dict()\n if mode == 'append': # in append mode, the template is appended to 'filename' if the file exists.\n if os.path.exists(filename):\n with open(filename, 'r') as stream:\n db = yaml.load(stream, Loader=yaml.FullLoader)\n if libname not in db:\n db[libname] = dict()\n db[libname][cellname] = template.export_to_dict()\n with open(filename, 'w') as stream:\n yaml.dump(db, stream)\n\n#filename=libname+'_templates.yaml'\n\ndef import_template(filename):\n # load yaml file\n if os.path.exists(filename):\n with open(filename, 'r') as stream:\n db = yaml.load(stream, Loader=yaml.FullLoader)\n libname = list(db.keys())[0] # assuming there's only one library defined in each file.\n # create template library\n tlib = laygo2.object.database.TemplateLibrary(name=libname)\n # read out the yaml file entries and build template objects\n for tn, tdict in db[libname].items():\n pins = dict()\n if 'pins' in tdict:\n for pinname, pdict in tdict['pins'].items():\n pins[pinname] = laygo2.object.Pin(xy=pdict['xy'], layer=pdict['layer'], netname=pdict['netname'])\n t = laygo2.object.NativeInstanceTemplate(libname=libname, cellname=tn, bbox=tdict['bbox'], pins=pins)\n tlib.append(t)\n return tlib\n\n"
},
{
"alpha_fraction": 0.6538461446762085,
"alphanum_fraction": 0.6538461446762085,
"avg_line_length": 8.625,
"blob_id": "8f00b44c149cc67b97fca7cdb53ddc622b3cad21",
"content_id": "0efa4d8360438f57d3dc5f760b78a2ce28ed289c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 78,
"license_type": "permissive",
"max_line_length": 17,
"num_lines": 8,
"path": "/laygo2/object/brics.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "\n\nclass brick:\n pass\n\nclass brickboard:\n pass\n\nclass baseplate:\n pass"
},
{
"alpha_fraction": 0.5159491300582886,
"alphanum_fraction": 0.545503556728363,
"avg_line_length": 37.028690338134766,
"blob_id": "4a3ae45756023b079e2ce7d7b218d47015c99101",
"content_id": "ca93ee14fa63a06513340ee9c7f194475c5c5f18",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37118,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 976,
"path": "/laygo2/interface/gds.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements interfaces with gds files.\nImplemented by Eric Jan.\n\n\"\"\"\n\n__author__ = \"Eric Jan\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\n# TODO: Implement import functions (similar to load in python-gdsii)\n\nimport logging\nimport pprint\n\nimport numpy as np\n\nimport laygo2.object\nfrom laygo2.interface.gds_helper import *\n\n\nclass Library:\n def __init__(self, version, name, physicalUnit, logicalUnit):\n \"\"\"\n Initialize Library object\n\n Parameters\n ----------\n version : int\n GDSII file version. 5 is used for v5.\n name : str\n library name.\n physicalUnit : float\n physical resolution of the data.\n logicalUnit : float\n logical resolution of the data.\n \"\"\"\n self.version = version\n self.name = name\n self.units = [logicalUnit, physicalUnit]\n self.structures = dict()\n assert physicalUnit > 0 and logicalUnit > 0\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n file stream to be written\n \"\"\"\n stream.write(pack_data(\"HEADER\", self.version))\n stream.write(pack_bgn(\"BGNLIB\"))\n stream.write(pack_data(\"LIBNAME\", self.name))\n stream.write(pack_data(\"UNITS\", self.units))\n for strname, struct in self.structures.items():\n struct.export(stream)\n stream.write(pack_no_data(\"ENDLIB\"))\n\n def add_structure(self, name):\n \"\"\"\n Add a structure object to library\n\n Parameters\n ----------\n name : str\n name of structure\n\n Returns\n -------\n laygo2.GDSIO.Structure\n created structure object\n \"\"\"\n s=Structure(name)\n self.structures[name]=s\n return s\n\n def add_boundary(self, strname, layer, dataType, points):\n \"\"\"\n Add a boundary object to specified structure\n\n Parameters\n ----------\n strname : str\n structure name to insert the boundary object\n layer : int\n layer name of the boundary object\n dataType : int\n layer purpose of the boundary object\n points : 2xn integer array list\n point array of the boundary object\n ex) [[x0, y0], [x1, x1], ..., [xn-1, yn-1], [x0, y0]]\n\n Returns\n -------\n laygo2.GDSIO.Boundary\n created boundary object\n \"\"\"\n return self.structures[strname].add_boundary(layer, dataType, points)\n\n def add_path(self, strname, layer, dataType, points, width, pathtype=4, bgnextn=0, endextn=0):\n \"\"\"\n Add a boundary object to specified structure\n\n Parameters\n ----------\n layer : int\n layer name\n dataType : int\n layer purpose\n points : list\n layer coordinates\n width : int\n the width of the path\n pathtype : int, optional\n the type of path. 0 for flushing endpoints, 1 for round-ended paths, 2 for half-square ended paths,\n 4 for custom square-end extensions (set by bgnextn and endextn)\n\n Examples\n --------\n add_path('test', 50, 0, [[1000, 1000], [1000, 0], [0, 0], [0, 1000], [1000, 1000]], 10)\n\n Returns\n -------\n laygo2.GDSIO.Path\n generated path object\n \"\"\"\n return self.structures[strname].add_path(layer, dataType, points, width, pathtype, bgnextn, endextn)\n\n def add_instance(self, strname, cellname, xy, transform='R0'):\n \"\"\"\n Add an instance object to specified structure\n\n Parameters\n ----------\n strname : str\n structure name to insert the instance\n cellname : str\n instance cellname\n xy : [int, int]\n instance cooridnate\n transform : str\n transform parameter\n\n Returns\n -------\n laygo2.GDSIO.Instance\n created instance object\n \"\"\"\n return self.structures[strname].add_instance(cellname, xy, transform)\n\n def add_instance_array(self, strname, cellname, n_col, n_row, xy, transform='R0'):\n \"\"\"\n Add an instance array to specified structure\n\n Parameters\n ----------\n strname : str\n structure name to insert the instance\n cellname : str\n instance cellname\n n_col : int\n number of columns\n n_row : int\n number of rows\n xy : [int, int]\n instance coordinate\n transform : str\n transform parameter\n\n Returns\n -------\n laygo2.GDSIO.InstanceArray\n instance array object\n \"\"\"\n return self.structures[strname].add_instance_array(cellname, n_col, n_row, xy, transform)\n\n def add_text(self, strname, layer, textType, xy, string, textHeight=100):\n \"\"\"\n Add a text object to specified structure\n\n Parameters\n ----------\n strname : str\n structure name to insert the text object\n layer : int\n layer name of the text object\n textType : int\n layer purpose of the text object\n xy : [int, int]\n text coordinate\n string : str\n text string\n textHeight : int\n text height\n\n Returns\n -------\n laygo2.GDSIO.Text\n text object\n \"\"\"\n return self.structures[strname].add_text(layer, textType, xy, string, textHeight)\n\n\nclass Structure(list):\n\n def __init__(self, name):\n \"\"\"\n initialize Structure object\n\n Parameters\n ----------\n name : str\n structure name\n \"\"\"\n list.__init__(self)\n self.name = name\n self.elements = []\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n file stream to be written\n \"\"\"\n stream.write(pack_bgn(\"BGNSTR\"))\n stream.write(pack_data(\"STRNAME\", self.name))\n for element in self.elements:\n element.export(stream)\n stream.write(pack_no_data(\"ENDSTR\"))\n\n def add_boundary(self, layer, dataType, points):\n \"\"\"\n Add a boundary object to structure\n\n Parameters\n ----------\n layer : int\n layer name\n dataType : int\n layer purpose\n points : list\n layer coordinates\n\n Examples\n --------\n add_boundary('test', 50, 0, [[1000, 1000], [1000, 0], [0, 0], [0, 1000], [1000, 1000]])\n\n Returns\n -------\n laygo2.GDSIO.Boundary\n generated boundary object\n \"\"\"\n elem = Boundary(layer, dataType, points)\n self.elements.append(elem)\n return elem\n\n def add_path(self, layer, dataType, points, width, pathtype=4, bgnextn=0, endextn=0):\n \"\"\"\n Add a boundary object to structure\n\n Parameters\n ----------\n layer : int\n layer name\n dataType : int\n layer purpose\n points : list\n layer coordinates\n width : int\n the width of the path\n pathtype : int, optional\n the type of path. 0 for flushing endpoints, 1 for round-ended paths, 2 for half-square ended paths,\n 4 for custom square-end extensions (set by bgnextn and endextn)\n\n Examples\n --------\n add_path('test', 50, 0, [[1000, 1000], [1000, 0], [0, 0], [0, 1000], [1000, 1000]], 10)\n\n Returns\n -------\n laygo2.GDSIO.Path\n generated path object\n \"\"\"\n elem = Path(layer, dataType, points, width, pathtype, bgnextn, endextn)\n self.elements.append(elem)\n return elem\n\n def add_instance(self, cellname, xy, transform='R0'):\n \"\"\"\n Add an instance object to structure\n\n Parameters\n ----------\n cellname : str\n cell name\n xy : [int, int]\n xy coordinate\n transform : str\n transform parameter\n\n Returns\n -------\n laygo2.GDSIO.Instance\n generated instance object\n \"\"\"\n elem = Instance(cellname, xy, transform)\n self.elements.append(elem)\n return elem\n\n def add_instance_array(self, cellname, n_col, n_row, xy, transform='R0'):\n \"\"\"\n Add an instance array object to structure\n\n Parameters\n ----------\n cellname : str\n cell name\n n_col : int\n number of columns\n n_row : int\n number of rows\n xy : [int, int]\n xy coordinate\n transform : str\n transform parameter\n\n Examples\n --------\n new_lib.add_instance_array('test2', 'test', 2, 3, [[3000, 3000], [3000 + 2 * 2000, 3000], [3000, 3000 + 3 * 3000]])\n\n Returns\n -------\n laygo2.GDSIO.InstanceArray\n generated instance array object\n \"\"\"\n elem = InstanceArray(cellname, n_col, n_row, xy, transform)\n self.elements.append(elem)\n return elem\n\n def add_text(self, layer, textType, xy, string, textHeight=100):\n \"\"\"\n Add a text object to structure\n\n Parameters\n ----------\n layer : int\n layer name\n textType : int\n layer purpose\n xy : list\n xy coordinate\n string : str\n text string\n textHeight : int\n text height\n\n Returns\n -------\n laygo2.GDSIO.Text\n generated text object\n \"\"\"\n elem = Text(layer, textType, xy, string, textHeight)\n self.elements.append(elem)\n return elem\n\n\nclass Element:\n \"\"\"Base class for GDSIO objects\"\"\"\n possible_transform_parameters = {'R0': (None, None),\n 'R90': (0, 90),\n 'R180': (0, 180),\n 'R270': (0, 270),\n 'MX': (32768, 0),\n 'MY': (32768, 180)\n }\n \"\"\"dict: transform parameter dictionary\"\"\"\n\n def set_transform_parameters(self, transform):\n \"\"\"\n initialize transform parameters\n\n Parameters\n ----------\n transform : str\n transform parameter,\n 'R0' : default, no transform,\n 'R90' : rotate by 90-degree,\n 'R180' : rotate by 180-degree,\n 'R270' : rotate by 270-degree,\n 'MX' : mirror across X axis,\n 'MY' : mirror across Y axis\n \"\"\"\n if transform not in self.possible_transform_parameters:\n raise Exception(\"enter a viable transform parameter\\npossible_transform_parameters = ['R0', 'R90', 'R180', 'R270', 'MX', 'MY']\")\n self.strans, self.angle = self.possible_transform_parameters[transform]\n\n\nclass Boundary (Element):\n \"\"\"Boundary object for GDSIO\"\"\"\n\n def __init__(self, layer, dataType, points):\n \"\"\"\n initialize Boundary object\n\n Parameters\n ----------\n layer : int\n Layer id\n dataType : int\n Layer purpose\n points : list\n xy coordinates for Boundary object\n \"\"\"\n if len(points) < 2:\n raise Exception(\"not enough points\")\n if len(points) >= 2 and points[0] != points[len(points) - 1]:\n raise Exception(\"start and end points different\")\n temp_xy = []\n for point in points:\n if len(point) != 2:\n raise Exception(\"error for point input: \" + str(point))\n temp_xy += point\n self.layer = layer\n self.dataType = dataType\n self.xy = list(temp_xy)\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n File stream to be written\n \"\"\"\n stream.write(pack_no_data(\"BOUNDARY\"))\n stream.write(pack_data(\"LAYER\", self.layer))\n stream.write(pack_data(\"DATATYPE\", self.dataType))\n stream.write(pack_data(\"XY\", self.xy))\n stream.write(pack_no_data(\"ENDEL\"))\n\n\nclass Path (Element):\n \"\"\"Path object for GDSIO\"\"\"\n\n def __init__(self, layer, dataType, points, width, pathtype, bgnextn, endextn):\n \"\"\"\n initialize Boundary object\n\n Parameters\n ----------\n layer : int\n Layer id\n dataType : int\n Layer purpose\n points : list\n xy coordinates for Path object\n width : int\n the width of the path\n pathtype : int, optional\n the type of path. 0 for flushing endpoints, 1 for round-ended paths, 2 for half-square ended paths,\n 4 for custom square-end extensions (set by bgnextn and endextn)\n \"\"\"\n if len(points) < 2:\n raise Exception(\"not enough points\")\n temp_xy = []\n for point in points:\n if len(point) != 2:\n raise Exception(\"error for point input: \" + str(point))\n temp_xy += point\n self.layer = layer\n self.dataType = dataType\n self.xy = list(temp_xy)\n self.width = width\n self.pathtype = pathtype\n self.bgnextn = bgnextn\n self.endextn = endextn\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n File stream to be written\n \"\"\"\n stream.write(pack_no_data(\"PATH\"))\n stream.write(pack_data(\"LAYER\", self.layer))\n stream.write(pack_data(\"DATATYPE\", self.dataType))\n stream.write(pack_data(\"PATHTYPE\", self.pathtype))\n stream.write(pack_data(\"WIDTH\", self.width))\n stream.write(pack_data(\"BGNEXTN\", self.bgnextn))\n stream.write(pack_data(\"ENDEXTN\", self.endextn))\n stream.write(pack_data(\"XY\", self.xy))\n stream.write(pack_no_data(\"ENDEL\"))\n\n\nclass Instance (Element):\n \"\"\"Instance object for GDSIO\"\"\"\n\n def __init__(self, sname, xy, transform='R0'):\n \"\"\"\n initialize Instance object\n\n Parameters\n ----------\n sname : str\n Instance name\n xy : array\n xy coordinate of Instance Object\n transform : str\n transform parameter,\n 'R0' : default, no transform,\n 'R90' : rotate by 90-degree,\n 'R180' : rotate by 180-degree,\n 'R270' : rotate by 270-degree,\n 'MX' : mirror across X axis,\n 'MY' : mirror across Y axis\n \"\"\"\n Element.__init__(self)\n self.sname = sname\n l = len(xy)\n if l > 1:\n raise Exception(\"too many points provided\\ninstance should only be located at one point\")\n elif l < 1:\n raise Exception(\"no point provided\\ninstance should be located at a point\")\n self.xy = list(xy[0])\n Element.set_transform_parameters(self, transform)\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n File stream to be written\n \"\"\"\n stream.write(pack_no_data(\"SREF\"))\n stream.write(pack_data(\"SNAME\", self.sname))\n pack_optional(\"STRANS\", self.strans, stream)\n pack_optional(\"ANGLE\", self.angle, stream)\n stream.write(pack_data(\"XY\", self.xy))\n stream.write(pack_no_data(\"ENDEL\"))\n\n\nclass InstanceArray (Element):\n \"\"\"InstanceArray object for GDSIO\"\"\"\n\n def __init__(self, sname, n_col, n_row, xy, transform='R0'):\n \"\"\"\n Initialize Instance Array object\n\n Parameters\n ----------\n sname : str\n InstanceArray name\n n_col: int\n Number of columns\n n_row : int\n Number of rows\n xy : array\n xy coordinates for InstanceArray Object,\n should be organized as: [(x0, y0), (x0+n_col*sp_col, y_0), (x_0, y0+n_row*sp_row)]\n transform : str\n Transform parameter,\n 'R0' : default, no transform,\n 'R90' : rotate by 90-degree,\n 'R180' : rotate by 180-degree,\n 'R270' : rotate by 270-degree,\n 'MX' : mirror across X axis,\n 'MY' : mirror across Y axis\n \"\"\"\n l = len(xy)\n if l != 3:\n s = \"\\nxy: [(x0, y0), (x0+n_col*sp_col, y_0), (x_0, y0+n_row*sp_row)]\"\n if l > 3:\n s = \"too many points provided\" + s\n else:\n s = \"not enough points provided\" + s\n raise Exception(s)\n self.sname = sname\n self.colrow = [n_col, n_row]\n temp_xy = []\n for point in xy:\n if len(point) != 2:\n raise Exception(\"error for point input: \" + str(point))\n temp_xy += point\n self.xy = list(temp_xy)\n # self.xy = [num for pt in xy for num in pt]\n Element.set_transform_parameters(self, transform)\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n File stream to be written\n \"\"\"\n stream.write(pack_no_data(\"AREF\"))\n stream.write(pack_data(\"SNAME\", self.sname))\n pack_optional(\"STRANS\", self.strans, stream)\n pack_optional(\"ANGLE\", self.angle, stream)\n stream.write(pack_data(\"COLROW\", self.colrow))\n stream.write(pack_data(\"XY\", self.xy))\n stream.write(pack_no_data(\"ENDEL\"))\n\n\nclass Text (Element):\n \"\"\"Text object for GDSIO\"\"\"\n\n def __init__(self, layer, textType, xy, string, textHeight=100):\n \"\"\"\n Initialize Text object\n\n Parameters\n ----------\n layer : int\n Layer id\n textType : int\n I'm not really sure what this is\n xy : array\n xy coordinates for Text Object\n string : str\n Text object display string\n \"\"\"\n l = len(xy)\n if l > 1:\n raise Exception(\"too many points provided\\ninstance should only be located at one point\")\n elif l < 1:\n raise Exception(\"no point provided\\ninstance should be located at a point\")\n self.layer = layer\n self.textType = textType\n self.xy = xy[0]\n self.string = string\n self.strans = 0\n self.mag = textHeight\n\n def export(self, stream):\n \"\"\"\n Export to stream\n\n Parameters\n ----------\n stream : stream\n File stream to be written\n \"\"\"\n stream.write(pack_no_data(\"TEXT\"))\n stream.write(pack_data(\"LAYER\", self.layer))\n stream.write(pack_data(\"TEXTTYPE\", self.textType))\n stream.write(pack_data(\"STRANS\", self.strans))\n #stream.write(pack_data(\"ANGLE\", self.angle))\n stream.write(pack_data(\"MAG\", self.mag))\n stream.write(pack_data(\"XY\", self.xy))\n stream.write(pack_data(\"STRING\", self.string))\n stream.write(pack_no_data(\"ENDEL\"))\n\n\n#export functions\ndef export_from_laygo(db, filename, cellname=None, scale = 1e-9, layermapfile=\"default.layermap\",\n physical_unit=1e-9, logical_unit=0.001, pin_label_height=0.1,\n pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n abstract_instances = False, abstract_instances_layer = ['prBoundary', 'drawing']):\n \"\"\"\n Export specified laygo2 object(s) to a GDS file\n\n Parameters\n ----------\n db : laygo.object.database.Library\n a library object that designs to be exported.\n filename : str\n the name of the output file.\n cellname : list or str or None.\n the name of cells to be exported. If None, all cells in the libname are exported.\n scale : float\n the scaling factor that converts integer coordinates to actual ones (mostly 1e-6, to convert 1 to 1um).\n layermapfile : str\n the name of layermap file.\n physical_unit : float, optional\n GDS physical unit.\n logical_unit : float, optional\n GDS logical unit.\n pin_label_height : float, optional\n the height of pin label.\n pin_annotate_layer : [str, str], optional\n pin annotate layer name (used when pinname is different from netname).\n text_height : float, optional\n the height of text\n \"\"\"\n scl = round(1/scale*physical_unit/logical_unit)\n # 1um in phy\n # 1um/1nm = 1000 in laygo2 if scale = 1e-9 (1nm)\n # 1000/1nm*1nm/0.001 = 1000000 in gds if physical_unit = 1e-9 (1nm) and logical_unit = 0.001\n layermap = load_layermap(layermapfile) # load layermap information\n logging.debug('ExportGDS: Library:' + db.name)\n lib_export = Library(5, str.encode(db.name), physical_unit, logical_unit)\n\n cellname = db.keys() if cellname is None else cellname # export all cells if cellname is not given.\n cellname = [cellname] if isinstance(cellname, str) else cellname # convert to a list for iteration.\n for sn in cellname:\n s = db[sn]\n logging.debug('ExportGDS: Structure:' + sn)\n s_export = lib_export.add_structure(sn)\n # export objects\n for objname, obj in s.items():\n _convert_laygo_object(objname=objname, obj=obj, scl=scl, layermap=layermap,\n lib_export=lib_export, sn=sn, pin_label_height=pin_label_height,\n pin_annotate_layer=pin_annotate_layer, text_height=text_height,\n abstract_instances=abstract_instances,\n abstract_instances_layer=abstract_instances_layer)\n with open(filename, 'wb') as stream:\n lib_export.export(stream)\n\n\ndef export(db, filename, cellname=None, scale = 1e-9, layermapfile=\"default.layermap\",\n physical_unit=1e-9, logical_unit=0.001, pin_label_height=0.1,\n pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n abstract_instances=False, abstract_instances_layer=['prBoundary', 'drawing']):\n \"\"\"See laygo2.interface.gds.export_from_laygo for details.\"\"\"\n export_from_laygo(db, filename, cellname, scale, layermapfile,\n physical_unit, logical_unit, pin_label_height,\n pin_annotate_layer, text_height,\n abstract_instances, abstract_instances_layer)\n\n\ndef _convert_laygo_object(objname, obj, scl, layermap, lib_export, sn, pin_label_height=0.1,\n pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n abstract_instances=False, abstract_instances_layer=['prBoundary', 'drawing']):\n \"\"\"Convert laygo objects to gds objects.\n\n virtual_instance_type: str\n instance\n uniquified_instance\n flattened\n\n \"\"\"\n # TODO: this code is not very readable. Refactor it.\n\n if obj.__class__ == laygo2.object.Rect:\n xy = obj.xy * scl\n hext = obj.hextension * scl # extensions for routing wires.\n vext = obj.vextension * scl\n bx1, bx2 = sorted(xy[:, 0].tolist()) # really need to sort the coordinates?\n by1, by2 = sorted(xy[:, 1].tolist())\n ll = np.array([bx1, by1]) # lower-left\n ur = np.array([bx2, by2]) # upper-right\n _xy = np.vstack([ll,ur])\n c = [[round(_xy[0][0]-hext), round(_xy[0][1]-vext)], [round(_xy[0][0]-hext), round(_xy[1][1]+vext)],\n [round(_xy[1][0]+hext), round(_xy[1][1]+vext)], [round(_xy[1][0]+hext), round(_xy[0][1]-vext)],\n [round(_xy[0][0]-hext), round(_xy[0][1]-vext)]] # build list\n l = layermap[obj.layer[0]][obj.layer[1]]\n lib_export.add_boundary(sn, l[0], l[1], c)\n logging.debug('ExportGDS: Rect:' + objname + ' layer:' + str(l) + ' xy:' + str(c))\n elif obj.__class__ == laygo2.object.Path:\n xy = obj.xy * scl\n width = obj.width * scl\n extn = obj.extension * scl\n l = layermap[obj.layer[0]][obj.layer[1]]\n lib_export.add_path(sn, l[0], l[1], xy.tolist(), width, pathtype=4, bgnextn=extn, endextn=extn)\n logging.debug('ExportGDS: Path:' + objname + ' layer:' + str(l) + ' xy:' + str(xy))\n elif obj.__class__ == laygo2.object.Pin:\n if obj.elements is None:\n _objelem = [obj]\n else:\n _objelem = obj.elements\n for idx, _obj in np.ndenumerate(_objelem):\n xy = _obj.xy * scl\n bx1, bx2 = sorted(xy[:,0].tolist()) # again, let's check this.\n by1, by2 = sorted(xy[:,1].tolist())\n ll = np.array([bx1, by1]) # lower-left\n ur = np.array([bx2, by2]) # upper-right\n _xy = np.vstack([ll,ur])\n c = [[round(_xy[0][0]), round(_xy[0][1])], [round(_xy[0][0]), round(_xy[1][1])],\n [round(_xy[1][0]), round(_xy[1][1])], [round(_xy[1][0]), round(_xy[0][1])],\n [round(_xy[0][0]), round(_xy[0][1])]] # build list\n l = layermap[_obj.layer[0]][_obj.layer[1]]\n lib_export.add_boundary(sn, l[0], l[1], c)\n lib_export.add_text(sn, l[0], l[1], [[(_xy[0][0]+_xy[1][0])//2, (_xy[0][1]+_xy[1][1])//2]],\n string=_obj.netname, textHeight=pin_label_height * scl)\n if not _obj.name == _obj.netname: # if netname is different from pinname, create an annotate text\n if _obj.name is not None:\n l_ann = layermap[pin_annotate_layer[0]][pin_annotate_layer[1]]\n lib_export.add_text(sn, l_ann[0], l_ann[1],\n [[(_xy[0][0]+_xy[1][0])//2, (_xy[0][1]+_xy[1][1])//2]],\n string=_obj.name, textHeight=pin_label_height * scl)\n logging.debug('ExportGDS: Pin:' + objname + ' net:' + _obj.netname + ' layer:' + str(l) + ' xy:' + str(c))\n elif obj.__class__ == laygo2.object.physical.Text:\n xy = obj.xy * scl\n l = layermap[obj.layer[0]][obj.layer[1]]\n _xy = [round(_xy0) for _xy0 in xy]\n lib_export.add_text(sn, l[0], l[1], [_xy], string=obj.text, textHeight=round(text_height * scl))\n logging.debug('ExportGDS: Text:' + objname + ' text:' + obj.text + ' layer:' + str(l) + ' xy:' + str(_xy))\n elif obj.__class__ == laygo2.object.Instance:\n _convert_laygo_object_instance(lib_export, sn, objname, obj, scl, abstract_instances, abstract_instances_layer,\n layermap)\n elif obj.__class__ == laygo2.object.VirtualInstance: # virtual instance\n virt_struc_name = sn + '_VirtualInst_' + objname\n s_virt = lib_export.add_structure(virt_struc_name)\n for en, e in obj.native_elements.items():\n _convert_laygo_object(objname=objname+'_'+en, obj=e, scl=scl, layermap=layermap, lib_export=lib_export,\n sn=virt_struc_name, pin_label_height=pin_label_height, pin_annotate_layer=pin_annotate_layer,\n text_height=text_height, abstract_instances=abstract_instances,\n abstract_instances_layer=abstract_instances_layer)\n xy = obj.xy * scl\n xyl = xy.tolist()\n if np.array_equal(obj.shape, np.array([1, 1])) or (obj.shape is None): # single instance\n lib_export.add_instance(sn, virt_struc_name, [xyl], obj.transform)\n logging.debug('ExportGDS: VirtualInstance:' + objname + ' cellname:' + obj.cellname + ' xy:' + str(xy))\n else: # mosaic\n xy_mosaic = [[round(xyl[0]), round(xyl[1])],\n [round(xyl[0] + obj.shape[0] * (obj.spacing[0] * scl)), round(xyl[1])],\n [round(xyl[0]), round(xyl[1] + obj.shape[1] * (obj.spacing[1] * scl))]]\n\n lib_export.add_instance_array(sn, virt_struc_name, obj.shape[0], obj.shape[1], xy_mosaic,\n obj.transform)\n logging.debug('ExportGDS: VirtualInstance:' + objname + ' cellname:' + obj.cellname + ' xy:' + str(xy_mosaic)\n + ' shape:' + str(obj.shape.tolist()) + ' spacing:' + str(obj.spacing.tolist()))\n\n\ndef _convert_laygo_object_instance(lib_export, sn, objname, obj, scl, abstract_instances, abstract_instances_layer, layermap):\n \"\"\"Internal function of the instance conversion.\"\"\"\n if abstract_instances: # export abstract\n _xy = obj.bbox * scl\n c = [[round(_xy[0][0]), round(_xy[0][1])], [round(_xy[0][0]), round(_xy[1][1])],\n [round(_xy[1][0]), round(_xy[1][1])], [round(_xy[1][0]), round(_xy[0][1])],\n [round(_xy[0][0]), round(_xy[0][1])]] # build list\n l = layermap[abstract_instances_layer[0]][abstract_instances_layer[1]]\n lib_export.add_boundary(sn, l[0], l[1], c)\n else:\n xy = obj.xy * scl\n xyl = xy.tolist()\n if np.array_equal(obj.shape, np.array([1, 1])) or (obj.shape is None): # single instance\n lib_export.add_instance(sn, obj.cellname, [xyl], obj.transform)\n logging.debug('ExportGDS: Instance:' + objname + ' cellname:' + obj.cellname + ' xy:' + str(xy))\n else: # mosaic\n xy_mosaic = [[round(xyl[0]), round(xyl[1])],\n [round(xyl[0] + obj.shape[0] * (obj.spacing[0] * scl)), round(xyl[1])],\n [round(xyl[0]), round(xyl[1] + obj.shape[1] * (obj.spacing[1] * scl))]]\n\n lib_export.add_instance_array(sn, obj.cellname, obj.shape[0], obj.shape[1], xy_mosaic,\n obj.transform)\n logging.debug('ExportGDS: Instance:' + objname + ' cellname:' + obj.cellname + ' xy:' + str(xy_mosaic)\n + ' shape:' + str(obj.shape.tolist()) + ' spacing:' + str(obj.spacing.tolist()))\n\n\n# TODO: implement export_to_laygo function.\n\n# test\nif __name__ == '__main__':\n test_raw = True\n test_laygo = True\n # Test1 - creating a GDS file using raw access functions.\n if test_raw:\n # Create a new library\n new_lib = Library(5, b'MYLIB', 1e-9, 0.001)\n # Add a new structure to the new library\n struc = new_lib.add_structure('test')\n # Add a boundary object\n new_lib.add_boundary('test', 50, 0, [[0, 0], [0, 100000], [100000, 100000], [100000, 0], [0, 0]])\n # Add a path object\n new_lib.add_path('test', 50, 0, [[0, 0], [0, 100000], [100000, 100000], [100000, 0], [0, 0]], 50000, 4, 10000, 20000)\n # Add a new structure to the new library\n struc2 = new_lib.add_structure('test2')\n # Add an instance\n new_lib.add_instance('test2', 'test', [[0, 0]])\n # Add an array instance\n new_lib.add_instance_array('test2', 'test', 2, 3,\n [[300000, 300000], [300000 + 2 * 200000, 300000], [300000, 300000 + 3 * 300000]])\n # rotations\n # original Instance\n new_lib.add_instance('test2', 'test', [[0, -200000]])\n # rotate by 90\n new_lib.add_instance('test2', 'test', [[200000, -200000]], \"R90\") # ANGLE 90, STRANS 0\n # rotate by 180\n new_lib.add_instance('test2', 'test', [[400000, -200000]], \"R180\") # 180, 0\n # rotate by 270\n new_lib.add_instance('test2', 'test', [[600000, -200000]], \"R270\") # 270, 0\n # mirror across x-axis\n new_lib.add_instance('test2', 'test', [[800000, -500000]], \"MX\") # 0, 32768\n # mirror across y-axis\n new_lib.add_instance('test2', 'test', [[1000000, -500000]], \"MY\") # 180, 32768\n # Add a text object\n new_lib.add_text('test2', 45, 0, [[0, 0]], 'mytext')\n\n # Export to a GDS file\n with open('GDS_raw_test1.gds', 'wb') as stream:\n new_lib.export(stream)\n\n # Import the GDS file back and display\n with open('GDS_raw_test1.gds', 'rb') as stream:\n pprint.pprint(readout(stream, scale=1))\n\n # Test2 - creating a GDS file from laygo2 object and export_from_laygo function.\n if test_laygo:\n import laygo2.object\n lib0 = laygo2.object.Library(name='MYLIB')\n dsn0 = laygo2.object.Design(name='test')\n rect0 = laygo2.object.Rect(name='R0', xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'])\n dsn0.append(rect0)\n path0 = laygo2.object.Path(name='PT0', xy=[[0, 0], [0, 100], [100, 100], [100, 0], [0, 0]], width=50,\n extension=20, layer=['M1', 'drawing'])\n dsn0.append(path0)\n lib0.append(dsn0)\n dsn1 = laygo2.object.Design(name='test2')\n inst0 = laygo2.object.Instance(name='I0', xy=[0, 0], libname='MYLIB', cellname='test', transform='R0')\n dsn1.append(inst0)\n inst1 = laygo2.object.Instance(name='I1', xy=[300, 300], libname='MYLIB', cellname='test', shape=[2, 3],\n pitch=[200, 300], transform='R0')\n dsn1.append(inst1)\n inst2 = laygo2.object.Instance(name='I2', xy=[0, -200], libname='MYLIB', cellname='test', transform='R0')\n dsn1.append(inst2)\n inst3 = laygo2.object.Instance(name='I3', xy=[200, -200], libname='MYLIB', cellname='test', transform='R90')\n dsn1.append(inst3)\n inst4 = laygo2.object.Instance(name='I4', xy=[400, -200], libname='MYLIB', cellname='test', transform='R180')\n dsn1.append(inst4)\n inst5 = laygo2.object.Instance(name='I5', xy=[600, -200], libname='MYLIB', cellname='test', transform='R270')\n dsn1.append(inst5)\n inst6 = laygo2.object.Instance(name='I6', xy=[800, -500], libname='MYLIB', cellname='test', transform='MX')\n dsn1.append(inst6)\n inst7 = laygo2.object.Instance(name='I7', xy=[1000, -500], libname='MYLIB', cellname='test', transform='MY')\n dsn1.append(inst7)\n text0 = laygo2.object.Text(name='T0', xy=[0, 0], layer=['text', 'drawing'], text='mytext')\n dsn1.append(text0)\n inst8_pins = dict()\n inst8_pins['in'] = laygo2.object.Pin(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'], netname='in')\n inst8_pins['out'] = laygo2.object.Pin(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'], netname='out')\n inst8_native_elements = dict()\n inst8_native_elements['R0'] = laygo2.object.Rect(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'])\n inst8_native_elements['R1'] = laygo2.object.Rect(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'])\n inst8_native_elements['R2'] = laygo2.object.Rect(xy=[[0, 0], [100, 100]], layer=['prBoundary', 'drawing'])\n inst8 = laygo2.object.VirtualInstance(name='I8', xy=[500, 500], native_elements=inst8_native_elements,\n shape=[3, 2], pitch=[100, 100], unit_size=[100, 100], pins=inst8_pins,\n transform='R0')\n dsn1.append(inst8)\n lib0.append(dsn1)\n\n # Export to a GDS file\n export_from_laygo(lib0, filename='GDS_raw_test2.gds', cellname=None, scale = 1e-9,\n layermapfile=\"gds_default.layermap\", physical_unit=1e-9, logical_unit=0.001, pin_label_height=0.1,\n pin_annotate_layer=['text', 'drawing'], text_height=0.1)\n\n # Import the GDS file back and display\n with open('GDS_raw_test2.gds', 'rb') as stream:\n pprint.pprint(readout(stream, scale=1e-9))\n\n\n"
},
{
"alpha_fraction": 0.7149389982223511,
"alphanum_fraction": 0.7169715166091919,
"avg_line_length": 32.931034088134766,
"blob_id": "37f41f9e4d065cfe4bfb5b2331ffb3a7b607c956",
"content_id": "af7748d87a12896844fce736ad87fcd8c80905b4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3388,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 58,
"path": "/docs_workspace/user_guide_kor/6_technology.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# 신규 공정에서의 Laygo2 셋업\n\n신규 공정에서 Laygo2를 설치하기 위해서는, 해당 공정에 대한 다음 파일들을 파이썬 패키지의 형태로 준비하여, \nlaygo2 제네레이터 코드에서 관련 Paython module들(template, grid)를 import할 수 있도록 한다.\n\n* ***(technology_name)*_example.layermap**: 공정 PDK에 의해 정의된 레이아웃 레이어 정보\n* ***(technology_name)*_example.lyp**: (optional) KLayout용 레이어 디스플레이 정보\n* ***(technology_name)*_templates.py**: Template 정의 파이선 코드\n* ***(technology_name)*_grids.py**: Grid 정의 파이선 코드\n* ***(technology_name)*_example.yaml**: (_template.py, _grid.py파일들에 의하여 사용될 시) 레이아웃 관련 파라미터들\n* ***__init__.py***: 패키지 정의 파이선 코드\n\nLaygo2의 공정 패키지 예제는 [다음 경로](../../examples/technology_example)에서 찾을 수 있다.\n\n공정 패키지를 구성하는 각각의 파일들에 대한 설명은 아래에 기술되어 있다.\n\n\n## *(technology_name)*_example.layermap\n\n사용되는 공정 PDK에서 사용되는 layer 맵핑 정보를 가지고 있는 파일이며, 해당 layer정보들은 \n내부 레이아웃 개체 생성 및 변환, GDS생성, Skill script출력 등에 사용된다. \n\n해당 layermap 파일은 일반적으로 공정 PDK에서 제공된다. \n사용자가 직접 layermap파일을 만들 경우, 행마다 레이어 정보를 정의하는 다음 형식의 파일을 생성하면 된다 \n(상세한 내용은 예제 layermap 파일 참조).\n\n*layername layerpurpose stream_layer_number datatype*\n\n\n## *(technology_name)*_templates.py\n\n공정 패키지에서 제공되는 템플릿들을 정의하고 있는 파이썬 코드이며, 해당 파일의 load_templates()라는 함수가\n호출되면, 공정에서 사용될 다양한 템플릿 개체들(MOS, CAP, RES등)을 생성하여 템플릿 라이브러리(TemplateLibrary)\n개체로 묶어 반환하는 작업을 수행한다.\n\n템플릿의 경우 NativeInstanceTemplate(고정 인스턴스용), ParameterizedInstanceTemplate(PCell용), \nUserDefinedTemplate(사용자 정의형-PCell in Python)의 세가지 클래스를 기본으로 제공한다.\n\n\n## *(technology_name)*_grids.py\n\n공정 패키지에서 제공되는 배치(placement)/배선(routing) 그리드들을 정의하고 있는 파이썬 코드이며, 해당 파일의 \nload_grids()라는 함수가호출되면, 공정에서 사용될 다양한 그리드 개체들을 생성하여 그리드 라이브러리(GridLibrary)\n개체로 묶어 반환하는 작업을 수행한다.\n\n\n## *(technology_name)*_example.yaml\n\n공정 패키지의 템플릿 및 그리드 정의 파일들 (_templates.py, _grids.py)에서 사용되는 다양한 파라미터들을\n모아놓은 파일. 해당 파일은 필수적으로 요구되는 것은 아니며, 실제 templates.py, grids.py파일을 작성하는 \n형태에 따라 필요하지 않을 수 있다. 예제 공정 패키지에서는 해당 파일에서 템플릿의 크기(unit_size), 핀 정보(pins)\n구조 정보들 (rects), 그리고 그리드의 종류, 크기, 좌표들, 라우팅 그리드 정보들 (레이어, 방향, via 등)을 \n저장하고 있다.\n\n\n## __init__.py\n\n패키지 로드시 load_templates / load_grids 함수들을 읽을 수 있도록 하는 코드가 들어 있다.\n"
},
{
"alpha_fraction": 0.5780023336410522,
"alphanum_fraction": 0.5893347859382629,
"avg_line_length": 42.09513854980469,
"blob_id": "c9bcedc01cb655fd4d65d5c01da26ecdef56ef0f",
"content_id": "ac47ca1b7dea793d35c16b1203c3ffcbf8921e33",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20384,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 473,
"path": "/laygo2/object/template.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements classes for various layout template objects.\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nfrom abc import *\nimport numpy as np\nimport laygo2.object\n\n\nclass Template(metaclass=ABCMeta):\n \"\"\"\n An abstract class of templates. Templates are defined by inheriting this class and implementing the following\n core functions in their specific ways.\n xy(params): returns the x and y coordinates of the template for the parameters given by params.\n pins(params): returns a dictionary that contains Pin objects of the template for the input parameters.\n generate(params): returns a generated instance for the parameters given by params.\n\n Three representative templates are implemented in this module, which cover most usage cases:\n 1. NativeInstanceTemplate: generates a single, non-parameterized instance.\n 2. ParameterizedInstanceTemplate: generates a single, parameterized instance (p-cell)\n 3. UserDefinedInstanceTemplate: generates a virtual instance (composed of multiple objects) from its custom generate(params) function.\n\n Or users can just inherit this Template class and implement abstract functions to build a new template.\n\n \"\"\"\n\n name = None\n \"\"\"str: The name of this template.\"\"\"\n\n def __init__(self, name=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str or None, optional.\n The name of this template.\n \"\"\"\n self.name = name\n\n def __str__(self):\n \"\"\"Returns a string corresponding to this object's information.\"\"\"\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the template information.\"\"\"\n return self.__repr__() + \" \" \\\n \"name: \" + self.name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"\"\n\n def height(self, params=None):\n \"\"\"int: Returns the height of the template.\"\"\"\n return abs(self.bbox(params=params)[0, 1] - self.bbox(params=params)[1, 1])\n\n def width(self, params=None):\n \"\"\"int: returns the width of the template.\"\"\"\n return abs(self.bbox(params=params)[0, 0] - self.bbox(params=params)[1, 0])\n\n def size(self, params=None):\n \"\"\"numpy.array(dtype=int): returns the size of the template.\"\"\"\n return np.array([self.width(params=params), self.height(params=params)])\n\n @abstractmethod\n def bbox(self, params=None):\n \"\"\"\n Computes the xy-coordinates of the bounding box of this template, corresponding to params.\n\n Parameters\n ----------\n params : dict() or None, optional.\n The dictionary that contains the parameters of the bounding box computed.\n\n Returns\n -------\n numpy.ndarray(dtype=int) : A 2x2 integer array that contains the bounding box coordinates.\n \"\"\"\n pass\n\n @abstractmethod\n def pins(self, params=None):\n \"\"\"\n Returns the dictionary that contains the pins of this template, corresponding to params.\n\n Parameters\n ----------\n params : dict() or None, optional.\n The dictionary that contains the parameters of the pins.\n\n Returns\n -------\n Dict[laygo2.object.physical.Pin] : A dictionary that contains pins of this template, with their names as keys.\n \"\"\"\n pass\n\n @abstractmethod\n def generate(self, name=None, shape=None, pitch=None, transform='R0', params=None):\n \"\"\"\n Generates an instance from this template.\n\n Parameters\n ----------\n name : str or None, optional.\n The name of the instance to be generated.\n shape : numpy.ndarray(dtype=int) or List[int] or None, optional.\n The shape of the instance to be generated.\n pitch : numpy.ndarray(dtype=int) or List[int], optional.\n The pitch between sub-elements of the generated instance.\n transform : str, optional.\n The transform parameter of the instance to be generated.\n params : dict() or None, optional.\n The dictionary that contains the parameters of the instance to be generated.\n\n Returns\n -------\n laygo2.object.physical.Instance or laygo2.object.physical.VirtualInstance: the generated instance.\n \"\"\"\n pass\n\n\nclass NativeInstanceTemplate(Template):\n \"\"\"A basic template object that generates a vanilla instance.\"\"\"\n libname = None\n \"\"\"str: The library name of the template.\"\"\"\n\n cellname = None\n \"\"\"str: The cellname of the template.\"\"\"\n\n _bbox = np.array([[0, 0], [0, 0]])\n \"\"\"numpy.array(dtype=int): A 2x2 numpy array that specifies the bounding box of the template.\"\"\"\n\n _pins = None\n \"\"\"Dict[laygo2.object.Pin] or None: A dictionary that contains pin information.\"\"\"\n\n def __init__(self, libname, cellname, bbox=np.array([[0, 0], [0, 0]]), pins=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n libname : str\n The library name of the template.\n cellname : str\n The cell name of the template.\n bbox : List[int] or numpy.ndarray(dtype=int)\n The xy-coordinates of the template or the function that returns the xy-coordinates of the template.\n pins : Dict[laygo2.object.Pin]\n A dict that contains the pin information of the template or a function that returns the dict.\n The pin dictionary.\n \"\"\"\n self.libname = libname\n self.cellname = cellname\n self._bbox = None if bbox is None else np.asarray(bbox)\n self._pins = pins\n Template.__init__(self, name=cellname)\n\n def summarize(self):\n \"\"\"Returns the summary of the template information.\"\"\"\n return self.__repr__() + \" \" \\\n \"name: \" + self.name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"bbox: \" + str(self.bbox().tolist()) + \", \" + \\\n \"pins: \" + str(self.pins()) + \", \" + \\\n \"\"\n\n # Core template functions\n def bbox(self, params=None):\n \"\"\"\n Computes the xy-coordinates of the bounding box of this template, corresponding to params.\n See laygo2.object.template.Template.bbox() for details.\n \"\"\"\n return self._bbox\n\n def pins(self, params=None):\n \"\"\"\n Returns the dictionary that contains the pins of this template, corresponding to params.\n See laygo2.object.template.Template.pins() for details.\n \"\"\"\n return self._pins\n\n def generate(self, name=None, shape=None, pitch=None, transform='R0', params=None):\n \"\"\"\n Creates an instance from this template. See laygo2.object.template.Template.generate() for details.\n\n Parameters\n ----------\n name : str or None, optional.\n The name of the instance to be generated.\n shape : numpy.ndarray(dtype=int) or List[int] or None, optional.\n The shape of the instance to be generated.\n pitch : numpy.ndarray(dtype=int) or List[int], optional.\n The pitch between sub-elements of the generated instance.\n transform : str, optional.\n The transform parameter of the instance to be generated.\n params : dict() or None, optional.\n The dictionary that contains the parameters of the instance to be generated.\n\n Returns\n -------\n laygo2.object.physical.Instance or laygo2.object.physical.VirtualInstance: the generated instance.\n \"\"\"\n return laygo2.object.physical.Instance(libname=self.libname, cellname=self.cellname, xy=np.array([0, 0]),\n shape=shape, pitch=pitch, unit_size=self.size(params), pins=self.pins(params),\n transform=transform, name=name, params=params)\n\n # I/O functions\n def export_to_dict(self):\n db = dict()\n db['libname'] = self.libname\n db['cellname'] = self.cellname\n db['bbox'] = self.bbox().tolist()\n db['pins'] = dict()\n for pn, p in self.pins().items():\n db['pins'][pn] = p.export_to_dict()\n return db\n\n\nclass ParameterizedInstanceTemplate(Template):\n \"\"\"A parameterized-instance-based template that helps users to define the templates without implementing\n the instantiation functions.\"\"\"\n\n libname = None\n \"\"\"str: The library name of the template.\"\"\"\n\n cellname = None\n \"\"\"str: The cellname of the template.\"\"\"\n\n _bbox = None\n \"\"\"callable(params=dict()): Returns the x and y coordinates of the template. Should be replaced with a user-defined \n function.\"\"\"\n\n _pins = None\n \"\"\"callable(params=dict()): Returns a dictionary that contains the pin information. Should be replaced with a \n user-defined function.\"\"\"\n\n def __init__(self, libname, cellname, bbox_func=None, pins_func=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n libname : str\n The library name of the template.\n cellname : str \n The cell name of the template. \n bbox_func : callable(params=dict())\n The function that returns the xy-coordinates for the template boundary.\n #xy_offset : numpy.ndarray(dtype=int) or callable(params=dict())\n # The offset of the generated instance's x and y coordinates from the template's x and y coordinates.\n # This is used to specify the difference between the boundary of the template and the actual instance's origin.\n pins_func : callable(params=dict)\n The function that returns a dict that contains the pin information of the template.\n \"\"\"\n self.libname = libname\n self.cellname = cellname\n self._bbox = bbox_func\n self._pins = pins_func\n Template.__init__(self, name=cellname)\n\n # Core template functions\n def bbox(self, params=None):\n \"\"\"\n Computes the xy-coordinates of the bounding box of this template, corresponding to params.\n See laygo2.object.template.Template.bbox() for details.\n \"\"\"\n return self._bbox(params=params)\n\n def pins(self, params=None):\n \"\"\"\n Returns the dictionary that contains the pins of this template, corresponding to params.\n See laygo2.object.template.Template.pins() for details.\n \"\"\"\n return self._pins(params=params)\n\n def generate(self, name=None, shape=None, pitch=None, transform='R0', params=None):\n \"\"\"\n Creates an instance from this template. See laygo2.object.template.Template.generate() for details.\n \"\"\"\n #xy = xy + np.dot(self.xy(params)[0], tf.Mt(transform).T)\n return laygo2.object.physical.Instance(libname=self.libname, cellname=self.cellname, xy=np.array([0, 0]),\n shape=shape, pitch=pitch, unit_size=self.size(params),\n pins=self.pins(params), transform=transform, name=name, params=params)\n\n\nclass UserDefinedTemplate(Template):\n \"\"\"A virtual-instance-based template that produce subelements by calling user-defined functions.\n \"\"\"\n\n _bbox = None\n \"\"\"callable(params=dict()): Returns the bounding box of the template. Should be replaced with a user-defined \n function.\"\"\"\n\n _pins = None\n \"\"\"callable(params=dict()): Returns a dictionary that contains the pin information. Should be replaced with a \n user-defined function.\"\"\"\n\n _generate = None\n \"\"\"callable(name, shape, pitch, transform, params): \n Returns a generated instance based on the input arguments.\n \n Should be mapped to a used-defined function that follows the definition format below:\n \n def generate_function_name(name=None, shape=None, pitch=np.array([0, 0]), transform='R0', params=None):\n body_of_generate_function\n return generated_instance\n \"\"\"\n\n def __init__(self, bbox_func, pins_func, generate_func, name=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n bbox_func: callable\n A function that computes the bounding box coordinates of the template.\n pins_func: callable\n A function that produces a dictionary that contains pin information of the template.\n generate_func: callable\n A fucntion that generates a (virtual) instance from the template.\n name : str\n The name of the template.\n \"\"\"\n self._bbox = bbox_func\n self._pins = pins_func\n self._generate = generate_func\n Template.__init__(self, name=name)\n\n # Core template functions\n def bbox(self, params=None):\n \"\"\"\n Computes the xy-coordinates of the bounding box of this template, corresponding to params.\n See laygo2.object.template.Template.bbox() for details.\n \"\"\"\n return self._bbox(params=params)\n\n def pins(self, params=None):\n \"\"\"\n Returns the dictionary that contains the pins of this template, corresponding to params.\n See laygo2.object.template.Template.pins() for details.\n \"\"\"\n return self._pins(params=params)\n\n def generate(self, name=None, shape=None, pitch=None, transform='R0', params=None):\n \"\"\"\n Creates an instance from this template. See laygo2.object.template.Template.generate() for details.\n \"\"\"\n return self._generate(name=name, shape=shape, pitch=pitch, transform=transform, params=params)\n\n\n# Test\nif __name__ == '__main__':\n test_native_template = True\n test_pcell_template = True\n test_user_template = True\n\n import laygo2.object\n\n if test_native_template:\n print(\"NativeInstanceTemplate test\")\n # define pins\n nat_temp_pins = dict()\n nat_temp_pins['in'] = laygo2.object.Pin(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'], netname='in')\n nat_temp_pins['out'] = laygo2.object.Pin(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'], netname='out')\n # create a template\n nat_temp = NativeInstanceTemplate(libname='mylib', cellname='mynattemplate', bbox=[[0, 0], [100, 100]],\n pins=nat_temp_pins)\n # generate\n nat_inst = nat_temp.generate(name='mynatinst', shape=[2, 2], pitch=[100, 100], transform='R0')\n # display\n print(nat_temp)\n print(nat_inst)\n\n if test_pcell_template:\n print(\"ParameterizedInstanceTemplate test\")\n\n # define the bbox computation function.\n def pcell_bbox_func(params):\n return np.array([[0, 0], [100 * params['mult'], 100]])\n\n # define the pin generation function.\n def pcell_pins_func(params):\n template_pins = dict()\n for i in range(params['mult']):\n template_pins['in' + str(i)] = laygo2.object.Pin(xy=[[i * 100 + 0, 0], [i * 100 + 10, 10]],\n layer=['M1', 'drawing'], netname='in' + str(i))\n template_pins['out' + str(i)] = laygo2.object.Pin(xy=[[i * 100 + 90, 90], [i * 100 + 90, 100]],\n layer=['M1', 'drawing'], netname='out' + str(i))\n return template_pins\n\n # create a template.\n pcell_temp = ParameterizedInstanceTemplate(libname='mylib', cellname='mypcelltemplate',\n bbox_func=pcell_bbox_func, pins_func=pcell_pins_func)\n # generate based on the parameter assigned.\n pcell_inst_params = {'mult': 4}\n pcell_inst_size = pcell_temp.size(params=pcell_inst_params)\n pcell_inst = pcell_temp.generate(name='mypcellinst', shape=[2, 2], pitch=pcell_inst_size, transform='R0',\n params=pcell_inst_params)\n # display\n print(pcell_temp)\n print(pcell_inst)\n\n if test_user_template:\n print(\"UserDefinedTemplate test\")\n\n # define the bbox computation function.\n def user_bbox_func(params):\n return np.array([[0, 0], [100 * params['mult'], 100]])\n\n # define the pin generation function.\n def user_pins_func(params):\n template_pins = dict()\n for i in range(params['mult']):\n template_pins['in' + str(i)] = laygo2.object.Pin(xy=[[i * 100 + 0, 0], [i * 100 + 10, 10]],\n layer=['M1', 'drawing'], netname='in' + str(i))\n template_pins['out' + str(i)] = laygo2.object.Pin(xy=[[i * 100 + 90, 90], [i * 100 + 90, 100]],\n layer=['M1', 'drawing'], netname='out' + str(i))\n return template_pins\n\n # define the instance generation function.\n def user_generate_func(name=None, shape=None, pitch=np.array([0, 0]), transform='R0', params=None):\n m = params['mult']\n shape = np.array([1, 1]) if shape is None else np.asarray(shape)\n\n inst_pins = user_pins_func(params)\n inst_native_elements = dict()\n for i in range(m):\n ofst = i * 100\n inst_native_elements['R0_' + str(i)] = laygo2.object.Rect(xy=[[ofst, 0], [ofst + 10, 10]],\n layer=['M1', 'drawing'])\n inst_native_elements['R1_' + str(i)] = laygo2.object.Rect(xy=[[ofst + 90, 90], [ofst + 100, 100]],\n layer=['M1', 'drawing'])\n inst_native_elements['R2'] = laygo2.object.Rect(xy=[[0, 0], [m * 100, 100]],\n layer=['prBoundary', 'drawing'])\n inst = laygo2.object.VirtualInstance(name=name, libname='mylib', cellname='myvinst', xy=np.array([0, 0]),\n native_elements=inst_native_elements, shape=shape,\n pitch=pitch, unit_size=[m * 100, 100], pins=inst_pins,\n transform=transform, params=params)\n return inst\n\n user_temp = UserDefinedTemplate(name='myusertemplate', bbox_func=user_bbox_func, pins_func=user_pins_func,\n generate_func=user_generate_func)\n user_inst = user_temp.generate(name='myuserinst', shape=[2, 1], params={'mult': 5})\n print(user_temp)\n print(user_inst)\n print(user_inst.bbox)\n"
},
{
"alpha_fraction": 0.39319512248039246,
"alphanum_fraction": 0.4055880904197693,
"avg_line_length": 58.14666748046875,
"blob_id": "5095ef5377cc344c1ec5a6ee1d663d24fc048f12",
"content_id": "3d0c5bc8040d6d780b860edabcedd7cbaedadc0f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6996,
"license_type": "permissive",
"max_line_length": 1920,
"num_lines": 75,
"path": "/docs_workspace/user_guide_kor/README.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# Laygo2 사용자 안내서 (한글) \n**Laygo User Manual (Korean)**\n\n작성자: 한재덕 (jdhan at hanyang dot ac dot kr)\n\n## 서론\n이 사용자 안내서는 레이아웃 스크립팅 및 자동생성 프레임웍인 Laygo2의 사용을 돕기 위하여 작성되었다.\nLaygo는 본래 커스텀 집적회로 자동생성 프레임웍인 Berkeley Analog Generator 2 (BAG2)의 \n레이아웃 생성 엔진으로 개발되었으며 \\[[참고자료](https://ieeexplore.ieee.org/document/9314047)\\], Laygo의 기능을 보완 확장하여 Laygo2가 개발되었다.\nLaygo2는 단독 또는 BAG2와 결합한 형태로 실행이 모두 가능하다.\n\n**Laygo2는 다음과 같은 환경에서의 IC레이아웃 작업에 최적화되어 있다.**\n\n1. 레이아웃 설계 과정의 자동화 및 재사용\n1. 파라미터 기반 레이아웃 생성\n1. FinFET등의 미소 공정용 레이아웃\n1. 코드 기반 레이아웃 작업\n\n**기존 Laygo와 Laygo2가 공통적으로 갖는 특징들은 다음과 같다.**\n1. **템플릿과 그리드 기반 소자 배치**: 공정 별로 고유의 소자 인스턴스 및 좌표값들(physical \ncoordinates) 직접 사용하지 않고, 템플릿 형태로 추상화 된 인스턴스 및 추상화된 격자(그리드, grid)의 \n사용으로, 레이아웃 생성 코드의 이식성 및 재사용성을 높였다.\n1. **상대적인 정보에 기반한 배치**: 생성된 인스턴스들을 배치할 때, 인스턴스 간의 상대적인 정보\n(relative information)을 활용하여, 매우 높은 수준의 추상화를 달성하여 코드의 공정 이식성을 극대화하였다.\n1. **그리드 기반 배선**: 라우팅 배선들도 공정 독립적인 추상화 된 격자들 위에 배치하도록 배선 함수들을\n구현함으로서, 배선 작업의 공정 이식성을 향상하였다. \n\n**기존 Laygo와 비교하여 Laygo2가 갖는 장점은 다음과 같다.**\n1. **객체 지향 요소의 강화**: 모듈간 의존성을 최소화하여, 이식성과 재사용성을 증대하고, \n객체 지향 프로그래밍 기법을 활용하여 레이아웃 생성 과정을 기술할 수 있도록 하였다.\n일례로, Physical 모듈의 클래스들은 다른 모듈들과 독립적으로 사용이 가능하며, Template 클래스들은 \n기존에 정의된 클래스들을 상속하는 방식으로 재정의 및 확장이 가능하도록 설계되어 있다.\n1. **정수 기반 좌표 연산**: 실수 기반 좌표 연산에서 정수 기반 좌표 연산으로 변경하여 실수 연산 \n과정에서의 오동작 가능성을 최소화하였다. 정수 기반으로 계산된 좌표들은 최종 입출력 시에만 scale 값에\n따라 실제 값들로 변환된다.\n1. **향상된 템플릿 생성 기능 제공**: 기존의 싱글 인스턴스 기반 템플릿 (NativeInstanceTemplate)에 \n추가로, PCell기반 템플릿 또는, 사용자 정의형 템플릿 클래스 (UserdefinedTemplate)을 제공하여 좀 더 \n다양한 형태의 템플릿 정의 및 인스턴스 생성이 가능해졌다. 이외에 사용자가 추상화 클래스를 \n상속하는 방법으로 새로운 템플릿 클래스를 생성할 수도 있다. \n1. **향상된 인스턴스 및 그리드 인덱싱 시스템 제공**: 이제 인스턴스와 그리드 객체들은 Numpy array에 \n더욱 밀접하게 통합되어 더 쉬운 인덱싱 및 슬라이싱 기능을 제공한다. 그리드의 경우는 기존 Numpy array를 \n확장하여 그리드가 정의된 범위를 넘어선 제한 없는 인덱싱이 가능하다. Pandas에서 사용된 conditional indexing \n방식을 이용해 좌표 역연산 기능을 구현하였다.\n1. 멀티 패터닝 관련 기능(color, cut)을 지원한다.\n1. 코드 전반에 걸친 개선이 이루어졌다.\n\n## 쉬운 예제들\nLaygo2의 구조 및 동작 방식에 대해 본격적인 설명에 앞서, Laygo2의 대략적인 동작에 대한 이해를 돕기 위한 예제들이\n[이 문서](2_examples.md)에 소개되어 있다.\n\n## Laygo2의 구조\nLaygo2를 구성하는 패키지 및 모듈들의 구조가 [이 문서](3_structure.md)에 서술되어 있다.\n\n\n## Laygo2의 레이아웃 생성 절차\n1. 공정 파라미터, 기존 템플릿, 그리드 불러오기\n1. 템플릿에서 인스턴스 생성\n1. 생성된 인스턴스의 배치\n1. 인스턴스간 Wire 및 Via 라우팅\n1. 핀 생성\n1. 생성된 레이아웃을 적절한 포맷으로 출력\n1. (선택사항) 생성된 레이아웃을 새로운 템플릿으로 저장\n\n## 신규 공정에서의 Laygo2 설치\n신규 공정에서 Laygo2용 공정 패키지를 제작하는 방법이 [이 문서](6_technology.md)에 서술되어 있다.\n\n## 주요 기여자들\n1. 한재덕 (한양대학교 융합전자공학부 조교수)\n2. 이동준 (한양대학교)\n3. 신태호 (한양대학교)\n4. 김동휘 (한양대학교)\n5. 성개륜 (한양대학교)\n\n## 라이센싱 및 배포\nLaygo2는 BSD라이센스 형태로 배포되며, 오픈소스의 형태로 개발된다.\n\n\n"
},
{
"alpha_fraction": 0.7854077219963074,
"alphanum_fraction": 0.7974785566329956,
"avg_line_length": 53,
"blob_id": "886041a69670e08218b65a9ecbb4db1d95ca387a",
"content_id": "5ee0a3b6197aa62db1c0eebfae9f46c81f4b1ad4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3728,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 69,
"path": "/docs_workspace/user_guide/README.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# Laygo2 User Guide (English)\n\nJaeduk Han (jdhan at hanyang dot ac dot kr)\n \n## Introduction\nThis user guide is provided to help the use of LAYGO2, a layout scripting and automatic generation framework.\nLAYGO, the previous version of LAYGO2, is developed for one of layout generation engines in \nBerkeley Analog Generator2 (BAG2), and LAYGO2 extends the LAYGO's functions and capabilities.\nLAYGO2 can be launched either in standalone mode or in combination with BAG2.\n\n**LAYGO2 is optimized to IC layout generations in the following conditions.**\n\n1. Automations and reuse of layout design flow.\n1. Parameter-based layout generation.\n1. Layout in advanced processes (e.g. FinFET CMOS).\n1. Code-based layout generation.\n\n**The major features supported by both LAYGO and LAYGO2 are summarized below:**\n\n1. **Template and grid-based device placements**: LAYGO enhances the portability and reusability of layout \ngeneration process by using abstract grids and instances (templates), without directly dealing with physical \ninstances and coordinates.\n1. **Instance placements based on relative information**: The process portability is maximized by enhancing \nthe level of abstraction by utilizing relative information between instances for their placements.\n1. **Grid-based wire routing**: The wire routing process is abstracted as well, by placing the interconnects \non process-independent routing grids. \n\n**Laygo2 has the following improvement items over the original laygo.**\n1. **Enhanced object-oriented features**: The module dependency is minimized to enhance the process portability \nand reusability. The layout generation process is fully described in object-oriented ways.\nFor examples, classes defined in the physical module can be used independently, and new template types can be \neasily implemented by inheriting the base Template class.\n1. **Integer-based grid computations**: laygo originally used real-number-based grid systems. laygo2 converted \nall grid systems to integer-based ones to avoid floating-point operations. The integer number coordinates are \nconverted to real numbers during the final export phase.\n1. **Advanced template generation**: In additional to the single-instance-based template (NativeInstanceTemplate),\nlaygo2 implements various ways of template types, including parameterized-instance-based template\n(ParameterizedInstanceTemplate) and user-defined templates (UserDefinedTemplate). laygo2 supports inheritance \nof the primitive template class (Template) for new template definitions.\n1. **Advanced instanced and grid indexing**: Instance and grid objects are tightly integrated to Numpy \narray objects, to support advanced indexing and slicing functions. The grid objects especially extend \nthe Numpy array to implement unlimited indexing over their defined ranges.\n1. **Code quality enhancement and refactoring**\n1. (on-going) **More documents and tests added**\n1. (on-going) **Generator code separated from the main framework**\n1. (on-going) **Supports multi-patterning**\n\n## Simple Examples\nSeveral examples are introduced [here](2_examples.md) to help users understand the basic behavior of laygo2.\n\n## Laygo2 structure\nThe structures of packages and modules in laygo2 are described [here](3_structure.md).\n\n\n\n## Laygo2 design flow\n1. Load technology parameters, primitive templates, and grids.\n1. Generate instances from templates.\n1. Place the generated instances.\n1. Route wires and vias between the instances' terminals.\n1. Pin creation.\n1. Export the generated design in proper formats.\n1. (Optional) export the design as a new template.\n\n## Laygo2 setup for a new technology\n\n## Developers\n\n## License\n\n\n"
},
{
"alpha_fraction": 0.5559976100921631,
"alphanum_fraction": 0.565962016582489,
"avg_line_length": 38.66642761230469,
"blob_id": "77cac260ae1e670dcb9b67161da0ef70ad1acb18",
"content_id": "6a98c355c45cbe6435551575394524f2624334b9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55297,
"license_type": "permissive",
"max_line_length": 175,
"num_lines": 1394,
"path": "/laygo2/object/grid.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements classes for grid operations, for placement and routing of layout objects.\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nimport numpy as np\nimport laygo2.object\n#import laygo2.util.conversion as cv\n\n\n# Internal functions.\ndef _extend_index_dim(input_index, new_index, new_index_max):\n \"\"\"\n A helper function to be used for the multi-dimensional circular array indexing.\n It extends the dimension of the input array (input_index) that contains indexing information, with the additional\n indexing variable (new_index) provided. The new_index_max variable is specified in case of the new_index does not\n contain the maximum index information (perhaps when an open-end slice is given for the new_index).\n \"\"\"\n # Construct an iterator from new_index\n if isinstance(new_index, (int, np.integer)):\n it = [new_index]\n else:\n if isinstance(new_index, slice):\n # slices don't work very well with multi-dimensional circular mappings.\n it = _conv_slice_to_list(slice_obj=new_index, stop_def=new_index_max)\n else:\n it = new_index\n # Index extension\n if input_index is None:\n output = []\n for i in it:\n output.append(tuple([i]))\n return output\n else:\n output = []\n for _i in input_index:\n output_row = []\n for i in it:\n output_row.append(tuple(list(_i) + [i]))\n output.append(output_row)\n return output\n\n\ndef _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n \"\"\"Convert slice to list\"\"\"\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))\n\n\ndef _conv_bbox_to_array(bbox):\n \"\"\"\n Convert a bbox object to a 2-d array.\n \"\"\"\n array = list()\n for r in range(bbox[0, 1], bbox[1, 1] + 1):\n row = list()\n for c in range(bbox[0, 0], bbox[1, 0] + 1):\n row.append([c, r])\n array.append(row)\n return np.array(array)\n\n\ndef _conv_bbox_to_list(bbox):\n \"\"\"\n Convert a bbox object to a 2-d array.\n \"\"\"\n array = list()\n for r in range(bbox[0, 1], bbox[1, 1] + 1):\n for c in range(bbox[0, 0], bbox[1, 0] + 1):\n array.append([c, r])\n return array\n\n\n# Internal classes\nclass CircularMapping:\n \"\"\"\n A one-dimensional mapping class, circulating over the defined range.\n \"\"\"\n _elements = None\n \"\"\"numpy.ndarray: the internal variable of elements.\"\"\"\n dtype = np.int\n \"\"\"type_like: the type of elements.\"\"\"\n\n def get_elements(self):\n \"\"\"numpy.ndarray: gets the elements.\"\"\"\n return self._elements\n\n def set_elements(self, value):\n \"\"\"numpy.ndarray: sets the elements.\"\"\"\n self._elements = np.asarray(value, dtype=self.dtype)\n\n elements = property(get_elements, set_elements)\n \"\"\"numpy.ndarray: the array that contains the physical coordinates of the grid.\"\"\"\n\n @property\n def shape(self):\n \"\"\"numpy.ndarray: the shape of the mapping.\"\"\"\n return np.array(self.elements.shape)\n\n def __init__(self, elements=np.array([0]), dtype=np.int):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n elements : numpy.ndarray or list\n The elements of the circular mapping object.\n dtype : type_like\n The data type of the circular mapping object.\n \"\"\"\n self.dtype = dtype\n self.elements = np.asarray(elements, dtype=dtype)\n\n # indexing and slicing\n def __getitem__(self, pos):\n \"\"\"\n Returns elements corresponding to the indices given by pos, assuming the circular indexing.\n\n Parameters\n ----------\n pos : int or tuple, or list of int or tuple\n The index of elements to be returned.\n \"\"\"\n if isinstance(pos, (int, np.integer)):\n return self.elements[pos % self.shape[0]]\n elif isinstance(pos, slice):\n return self.__getitem__(pos=_conv_slice_to_list(slice_obj=pos, stop_def=self.shape[0]))\n elif isinstance(pos, np.ndarray):\n return np.array([self.__getitem__(pos=p) for p in pos])\n elif isinstance(pos, list):\n return [self.__getitem__(pos=p) for p in pos]\n elif pos is None:\n return None\n else:\n raise TypeError(\"CircularMapping received an invalid index:%s\" % str(pos))\n\n # Iterators\n def __iter__(self):\n \"\"\"Iterator function. Directly mapped to the object's elements.\"\"\"\n return self.elements.__iter__()\n\n def __next__(self):\n \"\"\"Iterator function. Directly mapped to the object's elements.\"\"\"\n # Check if numpy.ndarray implements __next__()\n return self.elements.__next__()\n\n # Informative functions\n def __str__(self):\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return self.__repr__() + \" \" \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"elements: \" + str(self.elements)\n\n\nclass CircularMappingArray(CircularMapping):\n \"\"\"\n A multi-dimensional circular mapping. Split from the original circular mapping class to reduce complexity.\n \"\"\"\n def __getitem__(self, pos):\n \"\"\"\n Returns elements corresponding to the indices given by pos, assuming the circular indexing.\n\n Parameters\n ----------\n pos : int or tuple, or list of int or tuple\n The index of elements to be returned.\n \"\"\"\n if isinstance(pos, list): # pos is containing multiple indices as a list\n return [self.__getitem__(pos=p) for p in pos]\n elif pos is None:\n return None\n elif np.all(np.array([isinstance(p, (int, np.integer)) for p in pos])):\n # pos is mapped to a single element (pos is composed of integer indices).\n # just do rounding.\n idx = []\n for p, s in zip(pos, self.shape):\n idx.append(p % s)\n return self.elements[tuple(idx)]\n else:\n # pos is mapped to multiple indices. (possible examples include ([0:5, 3], [[1,2,3], 3], ...).\n # Create a list containing the indices to iterate over, and return a numpy.ndarray containing items\n # corresponding to the indices in the list.\n # When the indices don't specify the lower boundary (e.g., [:5]), it iterates from 0.\n # When the indices don't specify the upper boundary (e.g., [3:]), it iterates to the maximum index defined.\n idx = None\n for i, p in enumerate(pos):\n idx = _extend_index_dim(idx, p, self.shape[i])\n idx = np.asarray(idx)\n # iterate and generate the list to return\n item = np.empty(idx.shape[:-1], dtype=self.dtype) # -1 because the tuples in idx are flatten.\n for i, _null in np.ndenumerate(item):\n item[i] = self.__getitem__(pos=tuple(idx[i]))\n return np.asarray(item)\n\n\nclass _AbsToPhyGridConverter:\n \"\"\"\n An internal helper class that maps abstract coordinates to physical ones.\n \"\"\"\n\n master = None\n \"\"\"OneDimGrid or Grid: the master grid object that this converter belongs to.\"\"\"\n\n # Constructor\n def __init__(self, master):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n master: OneDimGrid or Grid\n The master grid object of the converter.\n \"\"\"\n self.master = master\n\n # Access functions.\n def __call__(self, pos):\n \"\"\"\n Returns the physical coordinate corresponding the abstract coordinate pos.\n\n Parameters\n ----------\n pos: np.ndarray(dtype=int)\n Abstract coordinate to be converted.\n\n Returns\n -------\n np.ndarray(dtype=int)\n Corresponding physical coordinate.\n \"\"\"\n return self.__getitem__(pos)\n\n def __getitem__(self, pos):\n \"\"\"\n Returns the physical coordinate corresponding the abstract coordinate pos.\n\n Parameters\n ----------\n pos: np.ndarray(dtype=int)\n Abstract coordinate to be converted.\n\n Returns\n -------\n np.ndarray(dtype=int)\n Corresponding physical coordinate.\n \"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None\n\n def _getitem_1d(self, pos):\n \"\"\"An internal function of __getitem__() for 1-d grids.\"\"\"\n # Check if pos has multiple elements.\n if isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element. Compute quotient and modulo for grid extension.\n quo = 0\n mod = int(round(pos))\n if pos >= self.master.shape[0]:\n mod = int(round(pos % self.master.shape[0]))\n quo = int(round((pos-mod) / self.master.shape[0]))\n elif pos < 0:\n mod = int(round(pos % self.master.shape[0]))\n quo = int(round((pos-mod)) / self.master.shape[0])\n return quo * self.master.range[1] + self.master.elements[mod]\n # the following command cannot handle the size extension of the grid, disabled.\n # return self.master.elements.take(pos, mode='wrap')\n\n def _getitem_2d(self, pos):\n \"\"\"An internal function of __getitem__() for 2-d grids.\"\"\"\n if isinstance(pos, list):\n if isinstance(pos[0], (int, np.integer)): # single point\n return self[pos[0], pos[1]]\n else:\n return [self[p] for p in pos]\n elif isinstance(pos, np.ndarray):\n if isinstance(pos[0], (int, np.integer)): # single point\n return np.array(self[pos[0], pos[1]])\n else:\n return np.array([self[p] for p in pos])\n # compute coordinates from OneDimGrids of its master.\n x = self.master.x[pos[0]]\n y = self.master.y[pos[1]]\n # TODO: Refactor the following code to avoid the use of double for loops and list comprehensions.\n if (not isinstance(x, np.ndarray)) and (not isinstance(y, np.ndarray)): # x and y are scalars.\n return np.array([x, y])\n if not isinstance(x, np.ndarray): # x is a scalar.\n return np.array([np.array([x, _y]) for _y in y])\n elif not isinstance(y, np.ndarray): # y is a scalar.\n return np.array([np.array([_x, y]) for _x in x])\n else:\n xy = []\n for _x in x: # vectorize this operation.\n row = []\n for _y in y:\n row.append(np.array([_x, _y]))\n xy.append(np.array(row))\n return np.array(xy)\n\n # Reverse-access operators (comparison operators are used for reverse-access).\n def __eq__(self, other):\n \"\"\"Returns the absolute coordinate corresponding to other (Inverse-mapping of __getitem__(pos)).\"\"\"\n return self.master.phy2abs(pos=other)\n\n def __lt__(self, other):\n \"\"\"Returns thi abstract coordinate corresponding to the physical coordinate that is the largest but less than\n other.\"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._lt_1d(other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._lt_2d(other)\n else:\n return None\n\n @staticmethod\n def _phy2abs_operator( other, elements, width, shape, op):\n def phy2abs( x ):\n if x > 0:\n quo_coarce = 0 + x // width\n msb_sub = 1\n else:\n quo_coarce = 0 + x // width\n msb_sub = 0\n\n remain = x % width # positive\n msb = quo_coarce * shape - 1\n for i, e in np.ndenumerate(elements):\n # print(\"e: %d r:%d, m:%d, i:%d off:%d phy:%d \" %(e, remain, msb + i[0], i[0], lsb_offset, quo_coarce*width + e ))\n # print(comp( e , remain ))\n\n if comp(e, remain) == True: # find maximum less then remain , e < r\n pass\n else: # when it is False, latest true index\n return msb + i[0] + lsb_offset\n\n return msb + shape + lsb_offset\n\n if op == \"<\": ## max lesser\n comp = lambda e, r: e < r\n lsb_offset = 0\n\n elif op == \"<=\": ## eq or max lesser eq\n comp = lambda e, r: e <= r\n lsb_offset = 0\n\n elif op == \">\": ## min greater\n comp = lambda e, r: e <= r\n lsb_offset = 1\n\n elif op == \">=\": ## eq or min greater\n comp = lambda e, r: e < r\n lsb_offset = 1\n\n if isinstance(other, (int, np.integer)):\n return phy2abs(other)\n else:\n list_return = []\n for o in other:\n list_return.append( phy2abs(o) )\n return np.array( list_return )\n\n\n def _lt_1d(self,other):\n return self._phy2abs_operator( other, self.master.elements, self.master.width, self.master.elements.shape[0], \"<\")\n\n def _lt_2d(self, other):\n if isinstance(other[0], (int, np.integer)):\n return np.array([self.master.x < other[0],\n self.master.y < other[1]])\n else:\n return np.array([self._lt_2d(o) for o in other])\n\n def __le__(self, other):\n \"\"\"Returns the abstract coordinate that is the largest but less than or equal to other.\"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._le_1d(other=other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._le_2d(other=other)\n\n def _le_1d(self, other):\n return self._phy2abs_operator(other, self.master.elements, self.master.width, self.master.elements.shape[0], \"<=\")\n\n def _le_2d(self, other):\n if isinstance(other[0], (int, np.integer)):\n return np.array([self.master.x <= other[0],\n self.master.y <= other[1]])\n else:\n return np.array([self._le_2d(o) for o in other])\n\n def __gt__(self, other):\n \"\"\"Returns the abstract coordinate that is the smallest but greater than other.\"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._gt_1d(other=other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._gt_2d(other=other)\n\n def _gt_1d(self, other):\n return self._phy2abs_operator(other, self.master.elements, self.master.width, self.master.elements.shape[0], \">\")\n\n def _gt_2d(self, other):\n if isinstance(other[0], (int, np.integer)):\n return np.array([self.master.x > other[0],\n self.master.y > other[1]])\n else:\n return np.array([self._gt_2d(o) for o in other])\n\n def __ge__(self, other):\n \"\"\"Returns the abstract coordinate that is the smallest but greater than or equal to other.\"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._ge_1d(other=other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._ge_2d(other=other)\n\n def _ge_1d(self, other):\n return self._phy2abs_operator(other, self.master.elements, self.master.width, self.master.elements.shape[0], \">=\")\n\n def _ge_2d(self, other):\n if isinstance(other[0], (int, np.integer)):\n return np.array([self.master.x >= other[0],\n self.master.y >= other[1]])\n else:\n return np.array([self._ge_2d(o) for o in other])\n\n\nclass _PhyToAbsGridConverter:\n \"\"\"\n An internal helper class that maps physical coordinates to abstract ones.\n \"\"\"\n\n master = None\n \"\"\"OneDimGrid or Grid: the master grid object that this converter belongs to.\"\"\"\n\n # Constructor\n def __init__(self, master):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n master: OneDimGrid or Grid\n The master grid object of the converter.\n \"\"\"\n self.master = master\n\n # Access functions.\n def __call__(self, pos):\n \"\"\"\n Returns the abstract coordinate corresponding the physical coordinate pos.\n\n Parameters\n ----------\n pos: np.ndarray(dtype=int)\n Physical coordinate to be converted.\n\n Returns\n -------\n np.ndarray(dtype=int)\n Corresponding abstract coordinate.\n \"\"\"\n return self.__getitem__(pos)\n\n def __getitem__(self, pos):\n \"\"\"Returns the abstract coordinate corresponding to the physical grid pos.\n\n Parameters\n ----------\n pos: np.ndarray(dtype=int)\n Physical coordinate to be converted.\n\n Returns\n -------\n np.ndarray(dtype=int)\n Corresponding abstract coordinate.\n \"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None\n\n def _getitem_1d(self, pos):\n \"\"\"An internal function of __getitem__() for 1-d grids.\"\"\"\n # Check if pos has multiple elements.\n if isinstance(pos, OneDimGrid):\n return self._getitem_1d(pos=pos.elements)\n elif isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element.\n for i, e in np.ndenumerate(self.master.elements):\n if (pos - e) % self.master.width == 0:\n return int(round((pos - e) / self.master.width)) * self.master.elements.shape[0] + i[0]\n return None # no matched coordinate\n\n def _getitem_2d(self, pos):\n \"\"\"An internal function of __getitem__() for 2-d grid.\"\"\"\n # If pos contains multiple coordinates (or objects), convert recursively.\n if isinstance(pos, list):\n if isinstance(pos[0], (int, np.integer)): # It's actually a single coordinate.\n return self[pos[0], pos[1]]\n else:\n return [self[p] for p in pos]\n elif isinstance(pos, np.ndarray):\n if isinstance(pos[0], (int, np.integer)): # It's actually a single coordinate.\n return np.array(self[pos[0], pos[1]])\n else:\n return np.array([self[p] for p in pos])\n # If pos contains only one physical object, convert its bounding box to abstract coordinates\n if (pos.__class__.__name__ == 'PhysicalObject') or (issubclass(pos.__class__, laygo2.object.PhysicalObject)):\n return self.bbox(pos)\n # If pos contains only one coordinate, convert it to abstract grid.\n m = self.master.x == pos[0]\n n = self.master.y == pos[1]\n # refactor the following code to avoid the use of double for-loops and list comprehensions.\n if (not isinstance(m, np.ndarray)) and (not isinstance(n, np.ndarray)): # x and y are scalars.\n return np.array([m, n])\n if not isinstance(m, np.ndarray): # x is a scalar.\n return np.array([np.array([m, _n]) for _n in n])\n elif not isinstance(n, np.ndarray): # y is a scalar.\n return np.array([np.array([_m, n]) for _m in m])\n else:\n mn = []\n for _m in m: # vectorize this operation.\n row = []\n for _n in n:\n row.append(np.array([_m, _n]))\n mn.append(np.array(row))\n return np.array(mn)\n\n # Reverse-access operators (comparison operators are used for reverse-access).\n def __eq__(self, other):\n \"\"\"Returns the physical coordinate corresponding to the abstract coordinate other.\"\"\"\n return self.master.abs2phy(pos=other)\n '''\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._eq_1d(other=other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._eq_2d(other=other)\n\n def _eq_1d(self, other):\n return self._getitem_1d(pos=other)\n\n def _eq_2d(self, other):\n # If other is a physical object, convert its bounding box to abstract coordinates.\n if (other.__class__.__name__ == 'PhysicalObject') or (issubclass(other.__class__, laygo2.object.PhysicalObject)):\n mn0 = self.master >= other.bbox[0]\n mn1 = self.master <= other.bbox[1]\n return np.array([mn0, mn1])\n if isinstance(other[0], (int, np.integer)):\n return np.array([self.master.m[other[0]],\n self.master.n[other[1]]])\n else:\n return np.array([self._eq_2d(o) for o in other])\n '''\n def __lt__(self, other):\n \"\"\"Returns the physical coordinate corresponding to the abstract coordinate that is the largest but less than\n other.\"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._lt_1d(other=other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._lt_2d(other=other)\n\n def _lt_1d(self, other):\n if isinstance(other, (int, np.integer)):\n return self.master.abs2phy.__getitem__(pos=other-1)\n return np.array([self._lt_1d(o) for o in other])\n\n def _lt_2d(self, other):\n if isinstance(other[0], (int, np.integer)):\n return self.master.abs2phy.__getitem__(pos=(other[0]-1, other[1]-1))\n return np.array([self._lt_2d(o) for o in other])\n\n def __le__(self, other):\n \"\"\"Returns the physical coordinate corresponding to the abstract coordinate that is the largest but less than\n or equal to other.\n Should be equivalent to __eq__.\n \"\"\"\n return self.master.abs2phy(pos=other)\n\n def __gt__(self, other):\n \"\"\"\n Returns the physical grid coordinate whose abstract coordinate is the smallest but greater than other.\n \"\"\"\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._gt_1d(other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._gt_2d(other)\n else:\n return None\n\n def _gt_1d(self, other):\n if isinstance(other, (int, np.integer)):\n return self.master.abs2phy.__getitem__(pos=other+1)\n return np.array([self._gt_1d(o) for o in other])\n\n def _gt_2d(self, other):\n if isinstance(other[0], (int, np.integer)):\n return self.master.abs2phy.__getitem__(pos=(other[0]+1, other[1]+1))\n return np.array([self._gt_2d(o) for o in other])\n\n def __ge__(self, other):\n \"\"\"\n Returns the physical grid coordinate whose abstract coordinate is the smallest but greater than or equal to\n other. Should be equivalent to __eq__.\n \"\"\"\n return self.master.abs2phy.__getitem__(pos=other)\n\n def bbox(self, obj):\n \"\"\"Returns the abstract grid coordinates corresponding to the 'internal' bounding box of obj.\n Strictly speaking, the resulting box may not be a bounding box (as the box is located inside obj if obj.bbox is\n not on grid), but the internal bounding box is more useful than the actual bounding box especially for routing\n and via purposes.\n\n Parameters\n ----------\n obj: numpy.ndarray or PhysicalObject\n A numpy array representing a bounding box in physical coordinate, or a PhysicalObject.\n \"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n obj = obj.bbox\n\n # phy -> abs\n mn0 = self.master.xy >= obj[0] ## ge than lower left\n mn1 = self.master.xy <= obj[1] ## le than upper right\\\n\n\n return np.array([mn0, mn1])\n\n def bottom_left(self, obj):\n \"\"\"Returns the bottom-left corner of an object on this grid.\"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.bottom_left(obj.bbox)\n else:\n _i = self.bbox(obj)\n return _i[0]\n\n def bottom_right(self, obj):\n \"\"\"Returns the bottom-right corner of an object on this grid.\"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.bottom_right(obj.bbox)\n else:\n _i = self.bbox(obj)\n return np.array([_i[1, 0], _i[0, 1]])\n\n def top_left(self, obj):\n \"\"\"Returns the top-left corner of an object on this grid.\"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.top_left(obj.bbox)\n else:\n _i = self.bbox(obj)\n return np.array([_i[0, 0], _i[1, 1]])\n\n def top_right(self, obj):\n \"\"\"Returns the top-right corner of an object on this grid.\"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.top_right(obj.bbox)\n else:\n _i = self.bbox(obj)\n return _i[1]\n\n def width(self, obj):\n \"\"\"Returns the width of an object on this grid.\"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.width(obj.bbox)\n else:\n _i = self.bbox(obj)\n return abs(_i[1, 0] - _i[0, 0])\n\n def height(self, obj):\n \"\"\"Returns the height of an object on this grid.\"\"\"\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.height(obj.bbox)\n else:\n _i = self.bbox(obj)\n return abs(_i[1, 1] - _i[0, 1])\n\n def height_vec(self, obj):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([0, height]).\"\"\"\n return np.array([0, self.height(obj)])\n\n def width_vec(self, obj):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([width, 0]).\"\"\"\n return np.array([self.width(obj), 0])\n\n def size(self, obj):\n \"\"\"Returns the size of an object on this grid.\"\"\"\n return np.array([self.width(obj), self.height(obj)])\n\n def crossing(self, *args):\n \"\"\"\n Returns a point on this grid, corresponding to the cross-point of bounding boxes given by args.\n This function assumes there's an overlap between input bounding boxes with any exception handlings.\n \"\"\"\n return self.overlap(*args, type='point')\n\n def overlap(self, *args, type='bbox'):\n \"\"\"\n Returns a bounding box on this grid, corresponding to the intersection of bounding boxes given by args.\n This function assumes there's an overlap between input bounding boxes with any exception handlings.\n\n Parameters\n ----------\n *args: np.ndarray or PhysicalObject\n A collection of array or physical objects where the overlap region will be computed over.\n type: str {'bbox', 'point', 'array'}\n The type of overlap region.\n If 'bbox', a 2x2 numpy array containing lower-left and upper-right corners of the overlap region is returned.\n If 'point', the lower-left corner of the overlap region is returened.\n If 'array' a 2-dimension array containing all points in the overlap region is returned.\n \"\"\"\n _ib = None\n for _obj in args:\n if _ib is None:\n _ib = self.bbox(_obj) ## shaped\n else:\n _b = self.bbox(_obj)\n _x = np.sort(np.array([_b[:, 0], _ib[:, 0]]), axis=None)\n _y = np.sort(np.array([_b[:, 1], _ib[:, 1]]), axis=None)\n _ib = np.array([[_x[1], _y[1]], [_x[2], _y[2]]])\n if type == 'bbox':\n return _ib\n elif type == 'point':\n return _ib[0]\n elif type == 'list':\n return _conv_bbox_to_list(_ib)\n elif type == 'array':\n return _conv_bbox_to_array(_ib)\n else:\n raise ValueError('overlap() should receive a valid value for its type (bbox, point, array, ...)')\n\n def union(self, *args):\n \"\"\"Returns a bounding box on this grid, corresponding to the union of two bounding boxes given by args.\"\"\"\n _ub = None\n for _obj in args:\n if _ub is None:\n _ub = self.bbox(_obj)\n else:\n _b = self.bbox(_obj)\n _x = np.sort(np.array([_b[:, 0], _ub[:, 0]]), axis=None)\n _y = np.sort(np.array([_b[:, 1], _ub[:, 1]]), axis=None)\n _ub = np.array([[_x[0], _y[0]], [_x[3], _y[3]]])\n return _ub\n\n def center(self, obj):\n \"\"\"Returns the center coordinate of an object on this grid\"\"\"\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.\n\nclass OneDimGrid(CircularMapping):\n \"\"\"\n Basic one-dimensional layout grid.\n \"\"\"\n\n # Member variables and properties\n name = None\n \"\"\"str: the name of the grid.\"\"\"\n\n range = None\n \"\"\"numpy.array(dtype=int) or None : the range where the grid coordinates to be repeated in every width of the range.\n For example, if the range is np.array([10, 50]), its base coordinates are defined over [10, 50] and the grid pattern \n is repeated in every 40 (=50-10).\"\"\"\n\n phy2abs = None\n \"\"\"_PhyToAbsGridConverter(master=self): Physical-to-abstract converter.\"\"\"\n\n abs2phy = None\n \"\"\"_AbsToPhyGridConverter(master=self): Abstract-to-physical converter.\"\"\"\n\n @property\n def width(self):\n \"\"\"float: the width of the grid.\"\"\"\n return abs(self.range[1] - self.range[0])\n\n # Constructor\n def __init__(self, name, scope, elements=np.array([0])):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name: str\n The name of the template.\n scope: numpy.ndarray(dtype=int)\n The scope of the grid where its coordinates are defined is defined. Its format is [start, stop].\n elements: numpy.ndarray or list[int]\n \"\"\"\n self.name = name\n self.range = np.asarray(scope)\n self.phy2abs = _PhyToAbsGridConverter(master=self)\n self.abs2phy = _AbsToPhyGridConverter(master=self)\n CircularMapping.__init__(self, elements=elements)\n # self.elements = np.asarray(elements) # commented out because asarray does not woke well with Object arrays.\n\n # Indexing and slicing functions\n def __getitem__(self, pos):\n \"\"\"Returns the physical coordinate corresponding to the abstract coordinate pos. \"\"\"\n return self.abs2phy([pos])\n\n # Comparison operators\n def __eq__(self, other):\n \"\"\"Returns the abstract grid coordinate that matches to other.\"\"\"\n return self.abs2phy.__eq__(other)\n\n def __lt__(self, other):\n \"\"\"Returns the abstract grid coordinate that is the largest but less than other.\"\"\"\n return self.abs2phy.__lt__(other)\n\n def __le__(self, other):\n \"\"\"Returns the index of the grid coordinate that is the largest but less than or equal to other.\"\"\"\n return self.abs2phy.__le__(other)\n\n def __gt__(self, other):\n \"\"\"Returns the abstract grid coordinate that is the smallest but greater than other.\"\"\"\n return self.abs2phy.__gt__(other)\n\n def __ge__(self, other):\n \"\"\"Returns the index of the grid coordinate that is the smallest but greater than or equal to other.\"\"\"\n return self.abs2phy.__ge__(other)\n\n # Informative functions\n def __str__(self):\n \"\"\"Returns the string representation of the object.\"\"\"\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return self.__repr__() + \" \" \\\n \"name: \" + self.name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"scope: \" + str(self.range) + \", \" + \\\n \"elements: \" + str(self.elements)\n\n # I/O functions\n def export_to_dict(self):\n \"\"\"Exports the grid information as a dictionary.\"\"\"\n export_dict = {\n 'scope': self.range.tolist(),\n 'elements': self.elements.tolist(),\n }\n return export_dict\n\n\nclass Grid:\n \"\"\"\n Basic two-dimensional layout grid.\n \"\"\"\n\n name = None\n \"\"\"str: the name of the grid.\"\"\"\n\n _xy = None\n \"\"\"List[OneDimGrid]: the list contains the 1d-grid objects for x and y axes.\"\"\"\n\n @property\n def elements(self):\n return [self._xy[0].elements, self._xy[1].elements]\n \"\"\"List[OneDimGrid]: the list contains the 1d-grid objects for x and y axes.\"\"\"\n\n phy2abs = None\n \"\"\"PhyToAbsGridConverter(master=self)\"\"\"\n\n abs2phy = None\n \"\"\"AbsToPhyGridConverter(master=self)\"\"\"\n\n @property\n def xy(self):\n return self.abs2phy\n\n @property\n def x(self):\n return self._xy[0].abs2phy\n \"\"\"None or OneDimGrid: the grid along the x-axis.\"\"\"\n\n @property\n def y(self):\n return self._xy[1].abs2phy\n \"\"\"None or OneDimGrid: the grid along the y-axis.\"\"\"\n\n @property\n def v(self):\n return self.x\n \"\"\"None or OneDimGrid: the grid along the x-axis.\"\"\"\n\n @property\n def h(self):\n return self.y\n \"\"\"None or OneDimGrid: the grid along the y-axis.\"\"\"\n\n @property\n def mn(self):\n return self.phy2abs\n\n @property\n def m(self):\n return self._xy[0].phy2abs\n\n @property\n def n(self):\n return self._xy[1].phy2abs\n\n @property\n def shape(self):\n return np.hstack([self._xy[0].shape, self._xy[1].shape])\n\n def get_range(self):\n return np.transpose(np.vstack((self._xy[0].range, self._xy[1].range)))\n\n def set_range(self, value):\n self._xy[0].range = np.transpose(value)[0]\n self._xy[1].range = np.transpose(value)[1]\n\n range = property(get_range, set_range)\n \"\"\"numpy.ndarray(dtype=int): the 2d-array that contains the range information of the x and y grids.\"\"\"\n\n @property\n def width(self):\n \"\"\"float: the width of the grid.\"\"\"\n return self._xy[0].width\n\n @property\n def height(self):\n \"\"\"float: the height of the grid.\"\"\"\n return self._xy[1].width\n\n @property\n def height_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([0, height]).\"\"\"\n return np.array([0, self.height])\n\n @property\n def width_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([width, 0]).\"\"\"\n return np.array([self.width, 0])\n\n def __init__(self, name, vgrid, hgrid):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the template.\n vgrid : laygo2.object.grid.OndDimGrid\n Vertical grid object.\n \"\"\"\n self.name = name\n self._xy = [vgrid, hgrid]\n self.phy2abs = _PhyToAbsGridConverter(master=self)\n self.abs2phy = _AbsToPhyGridConverter(master=self)\n\n @property\n def elements(self):\n \"\"\"list: returns elements of subgrids ([_xy[0].elements, _xy[1].elements]). \"\"\"\n return [self._xy[0].elements, self._xy[1].elements]\n\n # Indexing and slicing functions\n def __getitem__(self, pos):\n return self.abs2phy.__getitem__(pos)\n\n # Comparison operators\n def __eq__(self, other):\n \"\"\"Returns the physical grid coordinate that matches to other.\"\"\"\n return self.abs2phy.__eq__(other)\n\n def __lt__(self, other):\n \"\"\"Returns the index of the grid coordinate that is the largest but less than other.\"\"\"\n return self.abs2phy.__lt__(other)\n\n def __le__(self, other):\n \"\"\"Returns the index of the grid coordinate that is the largest but less than or equal to other.\"\"\"\n return self.abs2phy.__le__(other)\n\n def __gt__(self, other):\n \"\"\"Returns the index of the grid coordinate that is the smallest but greater than other.\"\"\"\n return self.abs2phy.__gt__(other)\n\n def __ge__(self, other):\n \"\"\"Returns the index of the grid coordinate that is the smallest but greater than or equal to other.\"\"\"\n return self.abs2phy.__ge__(other)\n\n def bbox(self, obj):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the 'internal' bounding box of obj.\n See _PhyToAbsGridConverter.bbox() for details.\n \"\"\"\n return self.phy2abs.bbox(obj)\n\n def bottom_left(self, obj):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the bottom-left corner of obj.\n See _PhyToAbsGridConverter.bottom_left() for details.\n \"\"\"\n return self.phy2abs.bottom_left(obj)\n\n def bottom_right(self, obj):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the bottom-right corner of obj.\n See _PhyToAbsGridConverter.bottom_right() for details.\n \"\"\"\n return self.phy2abs.bottom_right(obj)\n\n def top_left(self, obj):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the top-left corner of obj.\n See _PhyToAbsGridConverter.top_left() for details.\n \"\"\"\n return self.phy2abs.top_left(obj)\n\n def top_right(self, obj):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the top-right corner of obj.\n See _PhyToAbsGridConverter.top_right() for details.\n \"\"\"\n return self.phy2abs.top_right(obj)\n\n def crossing(self, *args):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the crossing point of args.\n See _PhyToAbsGridConverter.crossing() for details.\n \"\"\"\n return self.phy2abs.crossing(*args)\n\n def overlap(self, *args, type='bbox'):\n \"\"\"\n Returns the abstract grid coordinates corresponding to the overlap of args.\n See _PhyToAbsGridConverter.overlap() for details.\n \"\"\"\n return self.phy2abs.overlap(*args, type=type)\n\n def union(self, *args):\n \"\"\"\n Returns the abstract grid coordinates corresponding to union of args.\n See _PhyToAbsGridConverter.union() for details.\n \"\"\"\n return self.phy2abs.union(*args)\n\n def center(self, obj):\n \"\"\"\n Returns the abstrack grid coordinates corresponding to the center point of obj.\n See _PhyToAbsGridConverter.center for details.\n \"\"\"\n return self.phy2abs.center(obj)\n\n # Iterators\n def __iter__(self):\n # TODO: fix this to iterate over the full coordinates\n return np.array([self._xy[0].__iter__(), self._xy[1].__iter__()])\n\n def __next__(self):\n # TODO: fix this to iterate over the full coordinates\n return np.array([self._xy[0].__next__(), self._xy[1].__next__()])\n\n # Informative functions\n def __str__(self):\n \"\"\"Returns the string representation of the object.\"\"\"\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return self.__repr__() + \" \" \\\n \"name: \" + self.name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"scope: \" + str(self.range.tolist()) + \", \" + \\\n \"elements: \" + str(self.elements)\n\n\n# Regular classes.\nclass PlacementGrid(Grid):\n \"\"\"Placement grid class.\"\"\"\n type = 'placement'\n\n def place(self, inst, mn):\n \"\"\"Places an instance on the specified coordinate mn, on this grid.\"\"\"\n inst.xy = self[mn]\n return inst\n\n\nclass RoutingGrid(Grid):\n \"\"\"Routing grid class.\"\"\"\n type = 'routing'\n vwidth = None\n \"\"\"CircularMapping: the array containing the width of the routing wires on the vertical grid.\"\"\"\n hwidth = None\n \"\"\"CircularMapping: the array containing the width of the routing wires on the horizontal grid. \"\"\"\n vextension = None\n \"\"\"CircularMapping: the array containing the extension of the routing wires on the vertical grid.\"\"\"\n hextension = None\n \"\"\"CircularMapping: the array containing the extension of the routing wires on the horizontal grid. \"\"\"\n vextension0 = None\n \"\"\"CircularMapping: the array containing the extension of the zero-length wires on the vertical grid.\"\"\"\n hextension0 = None\n \"\"\"CircularMapping: the array containing the extension of the zero-length wires on the horizontal grid. \"\"\"\n vlayer = None\n \"\"\"CircularMapping: the array containing the layer info [name, purpose] of the routing wires on the vertical grid.\"\"\"\n hlayer = None\n \"\"\"CircularMapping: the array containing the layer info [name, purpose] of the routing wires on the horizontal grid.\"\"\"\n pin_vlayer = None\n \"\"\"CircularMapping: the array containing the pin layer info [name, purpose] of the routing wires on the vertical grid.\"\"\"\n pin_hlayer = None\n \"\"\"CircularMapping: the array containing the pin layer info [name, purpose] of the routing wires on the horizontal grid.\"\"\"\n viamap = None\n \"\"\"CircularMappingArray or None: the array containing the information of vias on the grid.\"\"\"\n primary_grid = 'vertical'\n \"\"\"str: The primary routing direction of the grid. Should be either vertical or horizontal. \n Used when the direction of the routing wire is undetermined. \"\"\"\n xcolor = None\n \"\"\"CircularMapping: the array containing the color shape of the routing wires on the vertical grid.\"\"\"\n ycolor = None\n \"\"\"CircularMapping: the array containing the color shape of the routing wires on the horizontal grid.\"\"\"\n\n def __init__(self, name, vgrid, hgrid, vwidth, hwidth, vextension, hextension, vlayer, hlayer, pin_vlayer,\n pin_hlayer, viamap, xcolor, ycolor, primary_grid='vertical', vextension0=None, hextension0=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the template.\n \"\"\"\n self.vwidth = vwidth\n self.hwidth = hwidth\n self.vextension = vextension\n self.hextension = hextension\n if vextension0 is None:\n self.vextension0 = vextension\n else:\n self.vextension0 = vextension0\n if hextension0 is None:\n self.hextension0 = hextension\n else:\n self.hextension0 = hextension0\n self.vlayer = vlayer\n self.hlayer = hlayer\n self.pin_vlayer = pin_vlayer\n self.pin_hlayer = pin_hlayer\n self.viamap = viamap\n self.primary_grid = primary_grid\n self.xcolor = xcolor\n self.ycolor = ycolor\n Grid.__init__(self, name=name, vgrid=vgrid, hgrid=hgrid)\n\n def route(self, mn, direction=None, via_tag=None):\n \"\"\"\n Creates Path and Via objects over xy-coordinates specified by mn, on this routing grid.\n\n Notes\n -----\n Initially, paths are used for implementing routing wires but they are replaced to rects, as paths cannot handle\n zero-length wires (with extensions) very well.\n \"\"\"\n mn = np.asarray(mn)\n _mn = list()\n for i in range(1, mn.shape[0]): \n # when more than two points are given,\n # create a multi-point wire compose of sub-routing wires\n # connecting the points given by mn in sequence.\n _mn.append([mn[i - 1, :], mn[i, :]])\n route = list()\n # via at the starting point\n if via_tag is not None:\n if via_tag[0] is True:\n route.append(self.via(mn=_mn[0][0], params=None))\n # routing wires\n for i, __mn in enumerate(_mn):\n xy0 = self.abs2phy[__mn[0]]\n xy1 = self.abs2phy[__mn[1]]\n _xy = np.array([[xy0[0], xy0[1]], [xy1[0], xy1[1]]])\n if np.all(xy0 == xy1): # if two points are identical, generate a metal stub on the bottom layer.\n if (direction == 'vertical') or ((direction is None) and (self.primary_grid == 'vertical')):\n width = self.vwidth[__mn[0][0]]\n hextension = int(width/2)\n vextension = self.vextension0[__mn[0][0]]\n layer = self.vlayer[__mn[0][0]]\n else:\n width = self.hwidth[__mn[0][1]]\n hextension = self.hextension0[__mn[0][1]]\n vextension = int(width/2)\n layer = self.hlayer[__mn[0][1]]\n else:\n if (xy0[0] == xy1[0]) or (direction == 'vertical'): # vertical routing\n width = self.vwidth[__mn[0][0]]\n hextension = int(width/2)\n vextension = self.vextension[__mn[0][0]]\n layer = self.vlayer[__mn[0][0]]\n color = self.xcolor[__mn[0][0]%self.xcolor.shape[0]]\n\n else: # horizontal routing\n width = self.hwidth[__mn[0][1]]\n hextension = self.hextension[__mn[0][1]]\n vextension = int(width/2)\n layer = self.hlayer[__mn[0][1]]\n color = self.ycolor[__mn[0][1]%self.ycolor.shape[0]] # ycolor is determined by its grid layer.\n p = laygo2.object.physical.Rect(xy=_xy, layer=layer, hextension=hextension, vextension=vextension, color=color)\n route.append(p)\n # via placement\n if via_tag is None:\n if (i > 0) and (i < mn.shape[0] - 1):\n route.append(self.via(mn=__mn[0], params=None))\n else:\n if via_tag[i + 1] == True:\n route.append(self.via(mn=__mn[1], params=None))\n if len(route) == 1: # not isinstance(mn[0][0], list):\n return route[0]\n else:\n return route\n\n def via(self, mn=np.array([0, 0]), params=None):\n \"\"\"Creates vias on xy-coordinates specified by mn, on this routing grid.\n\n \"\"\"\n # If mn contains multiple coordinates (or objects), place iteratively.\n if isinstance(mn, list):\n if isinstance(mn[0], (int, np.integer)): # It's actually a single coordinate.\n return self.via(mn=np.asarray(mn), params=params)\n else:\n return [self.via(mn=_mn, params=params) for _mn in mn]\n elif isinstance(mn, np.ndarray):\n if isinstance(mn[0], (int, np.integer)): # It's actually a single coordinate.\n pass\n else:\n return np.array([self.via(mn=_mn, params=params) for _mn in mn])\n if not isinstance(mn, tuple):\n mn = tuple(mn) # viamap (CircularMapping) works only with tuples\n tvia = self.viamap[mn]\n via = tvia.generate(params=params)\n via.xy = self[mn]\n return via\n \n def route_via_track(self, mn, track, via_tag=[None, True] ):\n \"\"\"\n create multi routes on one track\n Parameters\n ----------\n mn : list\n The list of abstract point for routing, [ mn, mn...]\n track : list\n abstract point of track, [x, y]\n \"\"\"\n mn = np.array( mn ) \n route = list()\n \n if track[1] != None: # x direction\n t = 0 # index of track axis\n p = 1 # index of perpendicular track\n mn_pivot = track[1]\n else: # y direction\n t = 1 \n p = 0 \n mn_pivot = track[0]\n\n mn_b = np.array([ [0,0], [0,0]] ) # 1.branch\n min_t, max_t = mn[0][t], mn[0][t]\n\n for i in range( len(mn) ) :\n mn_b[0] = mn[i]\n mn_b[1][t] = mn_b[0][t]\n mn_b[1][p] = mn_pivot\n if np.array_equal( mn_b[0] , mn_b[1] ) : #### via only\n route.append(self.via( mn= mn_b[0], params=None))\n else:\n route.append( self.route( mn= [ mn_b[0], mn_b[1] ], via_tag=via_tag ) )\n\n center_t = mn[i][t]\n if center_t < min_t:\n min_t = center_t\n elif max_t < center_t:\n max_t = center_t\n\n mn_track = np.array([ [0,0], [0,0]] ) # 2.track\n mn_track[0][t], mn_track[0][p] = min_t, mn_pivot # min\n mn_track[1][t], mn_track[1][p] = max_t, mn_pivot # max\n\n if np.array_equal( mn_track[0] , mn_track[1] ) : # Skip\n route.append(None)\n else:\n route.append( self.route( mn= mn_track ) )\n \n return route\n \n def pin(self, name, mn, direction=None, netname=None, params=None):\n #pin0 = Pin(xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], netname='net0', master=rect0,\n # params={'direction': 'input'})\n \"\"\"Creates a pin object over xy-coordinates specified by mn, on this routing grid. \"\"\"\n xy0 = self.abs2phy[mn[0]]\n xy1 = self.abs2phy[mn[1]]\n #_xy = np.array([[xy0[0], xy0[1]], [xy1[0], xy1[1]]])\n if np.all(xy0 == xy1): # if two points are identical, generate a metal stub on the bottom layer.\n if (direction == 'vertical') or ((direction is None) and (self.primary_grid == 'vertical')):\n width = self.vwidth[mn[0][0]]\n hextension = int(width / 2)\n vextension = 0\n layer = self.pin_vlayer[mn[0][0]]\n else:\n width = self.hwidth[mn[0][1]]\n hextension = 0\n vextension = int(width / 2)\n layer = self.pin_hlayer[mn[0][1]]\n else:\n if (xy0[0] == xy1[0]) or (direction == 'vertical'): # vertical routing\n width = self.vwidth[mn[0][0]]\n hextension = int(width / 2)\n vextension = 0\n layer = self.pin_vlayer[mn[0][0]]\n else: # horizontal routing\n width = self.hwidth[mn[0][1]]\n hextension = 0\n vextension = int(width / 2)\n layer = self.pin_hlayer[mn[0][1]]\n # TODO: pin.xy differ from tech.py.\n _xy = np.array([[xy0[0]-hextension, xy0[1]-vextension], [xy1[0]+hextension, xy1[1]+vextension]]) ## need to check\n p = laygo2.object.physical.Pin(name=name, xy=_xy, layer=layer, netname=netname, params=params)\n return p\n\n\nclass ParameterizedGrid(Grid):\n \"\"\"A parameterized grid to support flexible templates.\"\"\"\n # TODO: implement this.\n pass\n\nclass ParameterizedPlacementGrid(Grid):\n # TODO: implement this.\n pass\n\nclass ParameterizedRoutingGrid(Grid):\n # TODO: implement this.\n pass\n\n\n"
},
{
"alpha_fraction": 0.6800356507301331,
"alphanum_fraction": 0.6925133466720581,
"avg_line_length": 28.526315689086914,
"blob_id": "b2c08c2265b926aa6ef1ed16e5360f1ff89901ec",
"content_id": "b0ea1cd35205762b97b16d83f9971ae922bcc073",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1878,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 38,
"path": "/docs_workspace/user_guide_kor/2_examples.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# laygo2의 쉬운 예제들\n\n이 문서는 laygo2의 활용을 돕는 몇 가지의 간단한 예제들을 포함하고 있다.\n\n* **[빠른 설치 및 환경 설정](#빠른-설치-및-환경-설정)**: laygo2를 리눅스 환경에서 손쉽게 설치하고 환경 설정하는 방법을 설명한다.\n* **[공정 셋업](#공정-셋업)**: laygo2를 새로운 환경에서 셋업하는 방법을 설명한다.\n* **[simple_gates](#simple_gates)**: 간단한 로직 게이트를 laygo2를 사용하여 제작하는 방법을 설명한다.\n\n## 빠른 설치 및 환경 설정\n\nLaygo2프로젝트는 코드의 대량 수정이 필요한 초기 개발 단계에 있기 때문에 pip를 이용한 설치를 지원하지 않는다.\n대신 github repository를 clone하는 방법으로 쉽게 laygo2를 다운로드 할 수 있다. \n\n >>>> git clone https://github.com/niftylab/laygo2.git\n \n이 경우, 코드를 최신 상태로 유지하기 위하여, 주기적으로 다음 명령어를 이용하여 github에서 최신 코드를 다운로드 하기를 권장한다.\n\n >>>> git pull origin master\n\n두 번째로, 다음과 같이 PYTHONPATH를 수정하여 laygo2패키지를 참조할 수 있도록 한다. \n(이 방법은 초보자를 위한 가장 기본적인 예제로, 고급 사용자는 venv나 docker등의 가상환경을 사용하기를 권장한다.)\n\n # (csh/tcsh example) add the following command to your .cshrc\n setenv PYTHONPATH ${PYTHONPATH}:[LAYGO2_INSTALLATION_DIR]/laygo2\n\n## 공정 셋업\n\n추가 예정\n\n## simple_gates\n\n다음 커맨드를 실행함으로서 NAND gate의 레이아웃을 생성할 수 있다.\n\n >>>> run ./laygo2/examples/nand_generate.py\n \n위 스크립트를 실행하여 생성된 NAND gate의 레이아웃은 다음과 같다.\n\n\n"
},
{
"alpha_fraction": 0.5541815161705017,
"alphanum_fraction": 0.5657520890235901,
"avg_line_length": 38.068382263183594,
"blob_id": "81f6021f09e3be673a281798a4a5d92996790ed7",
"content_id": "cebb76918d2976b6eb600bfdd6d8c9849e40bc6c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 43991,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 1126,
"path": "/laygo2/object/physical.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThe physical module implements classes for various physical layout objects.\nThe types of objects supported are summarized below:\n\nPhysicalObject - a base class for physical layout objects.\n\nIterablePhysicalObject - a base class for iterable physical objects (eg. arrayed instances).\n\nPhysicalObjectGroup - defines a group of physical objects.\n\nRect - defines a rect.\n\nPath - defines a path.\n\nPin - defines a pin.\n\nText - defines a text label.\n\nInstance - defines an instance.\n\nVirtualInstance - defines a virtual instance composed of multiple physical objects.\n\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nimport numpy as np\n# from copy import deepcopy\nimport laygo2.util.transform as tf\n\nclass PhysicalObject:\n \"\"\"\n The base class of physical layout objects.\n\n Attributes\n ----------\n name\n xy\n bbox\n master\n params\n pointers\n left\n right\n top\n bottom\n center\n bottom_left\n bottom_right\n top_left\n top_right\n \"\"\"\n\n def _get_xy(self):\n \"\"\"numpy.ndarray(dtype=numpy.int): Gets the x and y coordinate values of this object.\"\"\"\n return self._xy\n\n def _set_xy(self, value):\n \"\"\"numpy.ndarray(dtype=numpy.int): Sets the x and y coordinate values of this object.\"\"\"\n self._xy = np.asarray(value, dtype=np.int)\n self._update_pointers()\n\n name = None\n \"\"\"str or None: The name of this object.\"\"\"\n\n _xy = np.zeros(2, dtype=np.int)\n \"\"\"numpy.ndarray(dtype=numpy.int): The internal variable of xy.\"\"\"\n\n xy = property(_get_xy, _set_xy)\n \"\"\"numpy.ndarray(detype=numpy.int): The coordinate of this object represented as a Numpy array [x, y].\"\"\"\n\n master = None\n \"\"\"PhysicalObject or None: The master object that this object belongs to, if exists. None if there is no master.\"\"\"\n\n params = None\n \"\"\"dict or None: The dictionary that contains the parameters of this object, with parameter names as keys.\"\"\"\n\n pointers = None\n \"\"\"dict or None: The dictionary that contains the pointers of this object, with the pointers' names as keys.\"\"\"\n\n # frequently used pointers\n left, right, top, bottom, center, bottom_left, bottom_right, top_left, top_right = None, None, None, None, None, None, None, None, None\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): The bounding box for this object, represented as a Numpy array\n [[x_ll, y_ll], [x_ur, y_ur]].\"\"\"\n\n return np.sort(np.array([self.xy[0, :], self.xy[1, :]]), axis=0)\n\n def __init__(self, xy, name=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=numpy.int)\n The coordinate of this object represented as a Numpy array [x, y].\n name : str, optional\n The name of this object.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n\n self.name = name\n # Initialize pointers.\n self.pointers = dict()\n self.pointers['left'] = np.array([0, 0], dtype=np.int)\n self.pointers['right'] = np.array([0, 0], dtype=np.int)\n self.pointers['bottom'] = np.array([0, 0], dtype=np.int)\n self.pointers['top'] = np.array([0, 0], dtype=np.int)\n self.pointers['bottom_left'] = np.array([0, 0], dtype=np.int)\n self.pointers['bottom_right'] = np.array([0, 0], dtype=np.int)\n self.pointers['top_left'] = np.array([0, 0], dtype=np.int)\n self.pointers['top_right'] = np.array([0, 0], dtype=np.int)\n self.left = self.pointers['left']\n self.right = self.pointers['right']\n self.bottom = self.pointers['bottom']\n self.top = self.pointers['top']\n self.bottom_left = self.pointers['bottom_left']\n self.bottom_right = self.pointers['bottom_right']\n self.top_left = self.pointers['top_left']\n self.top_right = self.pointers['top_right']\n\n self.params = params # deepcopy(params) # deepcopy increases the memory usage.\n self.xy = xy\n\n def __str__(self):\n \"\"\"Returns the summary of this object.\"\"\"\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of this object.\"\"\"\n name = 'None' if self.name is None else self.name\n return \\\n self.__repr__() + \" \" \\\n \"name: \" + name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"xy: \" + str(self.xy.tolist()) + \", \" + \\\n \"params: \" + str(self.params) + \", \" + \\\n \"\"\n\n def _update_pointers(self):\n \"\"\"Updates pointers of this object. Called when the xy-coordinate of this object is updated.\"\"\"\n xy_left = np.diag(np.dot(np.array([[1, 0], [0.5, 0.5]]), self.bbox)).astype(np.int)\n xy_right = np.diag(np.dot(np.array([[0, 1], [0.5, 0.5]]), self.bbox)).astype(np.int)\n xy_bottom = np.diag(np.dot(np.array([[0.5, 0.5], [1, 0]]), self.bbox)).astype(np.int)\n xy_top = np.diag(np.dot(np.array([[0.5, 0.5], [0, 1]]), self.bbox)).astype(np.int)\n xy_bottom_left = np.diag(np.dot(np.array([[1, 0], [1, 0]]), self.bbox)).astype(np.int)\n xy_bottom_right = np.diag(np.dot(np.array([[0, 1], [1, 0]]), self.bbox)).astype(np.int)\n xy_top_left = np.diag(np.dot(np.array([[1, 0], [0, 1]]), self.bbox)).astype(np.int)\n xy_top_right = np.diag(np.dot(np.array([[0, 1], [0, 1]]), self.bbox)).astype(np.int)\n xy_center = np.diag(np.dot(np.array([[0.5, 0.5], [0.5, 0.5]]), self.bbox)).astype(np.int)\n self.pointers['left'] = xy_left\n self.pointers['right'] = xy_right\n self.pointers['bottom'] = xy_bottom\n self.pointers['top'] = xy_top\n self.pointers['bottom_left'] = xy_bottom_left\n self.pointers['bottom_right'] = xy_bottom_right\n self.pointers['top_left'] = xy_top_left\n self.pointers['top_right'] = xy_top_right\n self.pointers['center'] = xy_center\n self.left = self.pointers['left']\n self.right = self.pointers['right']\n self.bottom = self.pointers['bottom']\n self.top = self.pointers['top']\n self.bottom_left = self.pointers['bottom_left']\n self.bottom_right = self.pointers['bottom_right']\n self.top_left = self.pointers['top_left']\n self.top_right = self.pointers['top_right']\n self.center = self.pointers['center']\n\n\nclass IterablePhysicalObject(PhysicalObject):\n \"\"\"\n The base class of iterable physical objects. This object's iteration feature is supported by the element attribute,\n which is given by a Numpy array object that contains child objects of this object. The ways of iterating, indexing,\n and slicing the elements of this object follow the Numpy conventions, which provide convenient ways of\n multi-dimensional indexing and advanced slicing.\n\n Attributes\n ----------\n elements\n \"\"\"\n\n elements = None\n \"\"\"numpy.array(dtype=PhysicalObject-like): The elements of this object.\"\"\"\n def _get_xy(self):\n \"\"\"numpy.ndarray(dtype=numpy.int): Gets the x and y coordinate values of this object.\"\"\"\n return self._xy\n\n def _set_xy(self, value):\n \"\"\"numpy.ndarray(dtype=numpy.int): Sets the x and y coordinate values of this object.\"\"\"\n # Update the coordinate value of its elements.\n self._update_elements(xy_ofst=value - self.xy)\n # Update the coordinate value of the object itself.\n PhysicalObject._set_xy(self, value=value)\n\n xy = property(_get_xy, _set_xy)\n\n @property\n def shape(self):\n \"\"\"numpy.ndarray(dtype=int): The shape of this object.\"\"\"\n if self.elements is None:\n return None\n else:\n return np.array(self.elements.shape, dtype=np.int)\n\n def __init__(self, xy, name=None, params=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The coordinate of this object represented as a Numpy array [x, y].\n name : str, optional\n The name of the object.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n elements : numpy.ndarray(dtype=LayoutObject) or None\n The iterable elements of the object.\n \"\"\"\n PhysicalObject.__init__(self, xy=xy, name=name, params=params)\n if elements is None:\n self.elements = None\n else:\n self.elements = np.asarray(elements)\n\n def __getitem__(self, pos):\n \"\"\"Returns the sub-elements of this object, based on the pos parameter.\"\"\"\n return self.elements[pos]\n\n def __setitem__(self, pos, item):\n \"\"\"Sets the sub-elements of this object, based on the pos and item parameter. \"\"\"\n self.elements[pos] = item\n\n def __iter__(self):\n \"\"\"Iterator function. Directly mapped to the iterator of the elements attribute of this object.\"\"\"\n return self.elements.__iter__()\n\n def __next__(self):\n \"\"\"Iterator function. Directly mapped to the iterator of the elements attribute of this object.\"\"\"\n return self.elements.__next__()\n\n def ndenumerate(self):\n \"\"\"Enumerates over the element array. Calls np.ndenumerate() of the elements of this object.\"\"\"\n return np.ndenumerate(self.elements)\n\n def _update_elements(self, xy_ofst):\n \"\"\"Updates xy-coordinates of this object's elements. An internal function for _set_xy()\"\"\"\n #print(\"aa?\")\n if np.all(self.elements is not None):\n # Update the x and y coordinate values of elements.\n for n, e in self.ndenumerate():\n if e is not None:\n e.xy = e.xy + xy_ofst\n\nclass PhysicalObjectGroup(IterablePhysicalObject):\n \"\"\"\n A grouped physical object. Intended to be generated as a group in Virtuoso (not implemented yet).\n \"\"\"\n # TODO: implement this.\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return IterablePhysicalObject.summarize(self) + \"\\n\" + \\\n \" elements: \" + str(self.elements)\n\n def __init__(self, xy, name=None, params=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The coordinate of this object represented as a Numpy array [x, y].\n name : str, optional\n The name of the object.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n elements : numpy.ndarray(dtype=LayoutObject) or None\n The iterable elements of the object.\n \"\"\"\n IterablePhysicalObject.__init__(self, xy=xy, name=name, params=params, elements=elements)\n\n\n'''\n# Deprecated as PhysicalObjectGroup can be used instead in most cases.\nclass PhysicalObjectArray(np.ndarray):\n \"\"\"LayoutObject array class for containing multiple layout objects. Subclassing ndarray to utilize advance slicing\n functions.\"\"\"\n name = None\n \"\"\"str: the name of the object.\"\"\"\n\n params = None\n \"\"\"dict or None: parameters of the object. \"\"\"\n\n _xy = None # np.zeros(2, dtype=np.int)\n \"\"\"numpy.ndarray(dtype=numpy.int): the internal variable of xy.\"\"\"\n\n def get_xy(self):\n \"\"\"numpy.ndarray(dtype=numpy.int): gets the x and y coordinate values of this object.\"\"\"\n return self._xy\n\n def set_xy(self, value):\n \"\"\"numpy.ndarray(dtype=numpy.int): sets the x and y coordinate values of this object.\"\"\"\n if value is None:\n self._xy = value\n else:\n self._xy = np.asarray(value, dtype=np.int)\n\n xy = property(get_xy, set_xy)\n \"\"\"numpy.ndarray(detype=numpy.int): the x and y coordinate values of the object.\"\"\"\n\n def moveby(self, delta):\n \"\"\"move the array and child objects by delta.\"\"\"\n self.xy = self.xy + delta\n for i in self:\n i.xy = i.xy + delta\n\n def __new__(cls, input_array, name=None, xy=None, params=None):\n \"\"\"\n Constructor for ndarray subclasses - check the NumPy manual for details.\n\n Parameters\n ----------\n input_array : np.ndarray\n An array of LayoutObject objects.\n name : str\n The name of the array.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinate of the object. The format is [x0, y0].\n params : dict\n Additional parameters of the array.\n \"\"\"\n # Input array is an already formed ndarray instance\n # We first cast to be our class type\n obj = np.asarray(input_array).view(cls)\n # add the new attribute to the created instance\n obj.name = name\n obj.xy = None if xy is None else np.asarray(xy, dtype=np.int)\n obj.params = params\n # Finally, we must return the newly created object:\n return obj\n\n def __array_finalize__(self, obj):\n \"\"\"\n Array finalizing function for subclassing ndarray - check the NumPy manual for details\n \"\"\"\n if obj is None: return\n # Transfer parameters\n self.name = getattr(obj, 'name', None)\n self.xy = getattr(obj, 'xy', None)\n self.params = getattr(obj, 'params', None)\n\n def __str__(self):\n \"\"\"Returns the summary of this object.\"\"\"\n return self.summarize()\n\n def summarize(self):\n \"\"\"Summarizes object information.\"\"\"\n return \" \" + \\\n \"name:\" + self.name + \", \" + \\\n \"class:\" + self.__class__.__name__ + \", \" + \\\n \"shape:\" + str(self.shape) + \", \" + \\\n \"xy:\" + str(self.xy) + \", \" + \\\n \"params:\" + str(self.params) + \"\\n\" + \\\n \" elements:\" + str(np.ndarray.__str__(self)) + \"\\n\"\n'''\n\nclass Rect(PhysicalObject):\n \"\"\"\n A rect object.\n\n Attributes\n ----------\n layer\n netname\n hextension\n vextension\n height\n width\n height_vec\n width_vec\n size\n bbox\n \"\"\"\n\n layer = None\n \"\"\"list(str): The layer information of this object.\"\"\"\n netname = None\n \"\"\"str: The name of the net associated with the rect.\"\"\"\n hextension = 0\n \"\"\"int or None: the extension of the Rect object in horizontal directions. Used to handle the extensions of routing\n elements.\"\"\"\n vextension = 0\n \"\"\"int or None: the extension of the Rect object in vertical directions. Used to handle the extensions of routing\n elements.\"\"\"\n color = None\n \"\"\"str: The color of the shape. One of grayColor, mask1Color, mask2Color, etc. \"\"\"\n\n @property\n def height(self):\n \"\"\"int: The height of the rect\"\"\"\n return abs(self.xy[0, 1] - self.xy[1, 1])\n\n @property\n def width(self):\n \"\"\"int: The width of the rect\"\"\"\n return abs(self.xy[0, 0] - self.xy[1, 0])\n\n @property\n def height_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([0, height]).\"\"\"\n return np.array([0, self.height])\n\n @property\n def width_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([width, 0]).\"\"\"\n return np.array([self.width, 0])\n\n @property\n def size(self):\n \"\"\"numpy.ndarray(dtype=int): The size of the rect.\"\"\"\n return np.array([self.width, self.height])\n\n def __init__(self, xy, layer, color=None, hextension=0, vextension=0, name=None, netname=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The coordinates of this object represented as a Numpy array [[x0, y0], [x1, y1]].\n layer : list(str)\n The layer information of this object. Its format is [layer, purpose]\n color : str\n The color shape of metal layer.\n name : str, optional\n The name of this object.\n netname : str, optional\n The name of net associated with this object.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n self.layer = layer\n if netname is None:\n self.netname = name\n else:\n self.netname = netname\n self.hextension = hextension\n self.vextension = vextension\n self.color = color\n PhysicalObject.__init__(self, xy=xy, name=name, params=params)\n\n def align(self, rect2):\n \"\"\"\n match the length of self and argument, assuming either width or height is zero\n Parameters\n ----------\n rect2 : Rect\n Rect which want to be matched\n \"\"\"\n index = 0\n r0 = self\n r1 = rect2\n if r0.xy[0][0] == r0.xy[1][0]: # width is zero\n index = 1\n\n pnt = np.zeros([2, 2], dtype=int)\n pnt[0][1] = r0.bbox[1][index] # tr\n pnt[1][1] = r1.bbox[1][index] # tr\n pnt[0][0] = r0.bbox[0][index] # bl\n pnt[1][0] = r1.bbox[0][index] # bl\n\n if pnt[1][1] > pnt[0][1]: # p1-top is upper then p0-top\n _xy = r0.bbox # r0 correction\n _xy[1][index] = pnt[1][1]\n r0.xy = _xy\n elif pnt[1][1] < pnt[0][1]: #p1-top is lower then p0-top\n _xy = r1.bbox # r1 correction\n _xy[1][index] = pnt[0][1]\n r1.xy = _xy\n\n if pnt[1][0] < pnt[0][0]: # p1-bottom is lower then p0-bottom\n _xy = r0.bbox # r0 correction\n _xy[0][index] = pnt[1][0]\n r0.xy = _xy\n elif pnt[1][0] > pnt[0][0]:\n _xy = r1.bbox # r1 correction\n _xy[0][index] = pnt[0][0]\n r1.xy = _xy\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return PhysicalObject.summarize(self) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"netname: \" + str(self.netname)\n\n\nclass Path(PhysicalObject):\n \"\"\"\n A path object.\n\n Attributes\n ----------\n layer\n netname\n width\n extension\n \"\"\"\n # TODO: implement pointers.\n\n layer = None\n \"\"\"list(str): Path layer. The format is [name, purpose].\"\"\"\n netname = None\n \"\"\"str: The name of the net associated with the rect.\"\"\"\n width = None\n \"\"\"int: The width of the wire\"\"\"\n extension = 0\n \"\"\"int: The amount of extension from the both edges of the wire.\"\"\"\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): The bounding box for this object, represented as a Numpy array\n [[x_ll, y_ll], [x_ur, y_ur]].\n For path objects, the bbox is computed from the first and last points of the path, which works fine for 2-point\n paths.\n \"\"\"\n return np.sort(np.array([self.xy[0], self.xy[-1]]), axis=0)\n\n def _update_pointers(self):\n \"\"\"Updates pointers of this object. Called when the value of xy of this object is updated.\"\"\"\n pass\n\n def __init__(self, xy, layer, width, extension=0, name=None, netname=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The coordinates of this object represented as a Numpy array [[x0, y0], [x1, y1], ..., [xn, yn]].\n layer : list(str)\n The layer information of the object. Its format is [layer, purpose].\n width : int\n The width of the path.\n extension : int\n The extension of the path.\n name : str, optional\n The name of the object.\n netname : str, optional\n The name of net associated with the object.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n self.layer = layer\n self.width = width\n self.extension = extension\n self.netname = netname\n PhysicalObject.__init__(self, xy=xy, name=name, params=params)\n self.pointers = dict() # Pointers are invalid for Path objects.\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return PhysicalObject.summarize(self) + \", \" + \\\n \"width: \" + str(self.width) + \", \" + \\\n \"extension: \" + str(self.extension) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"netname: \" + str(self.netname)\n\nclass Pin(IterablePhysicalObject):\n \"\"\"\n A pin object.\n\n Attributes\n ----------\n layer\n netname\n master\n \"\"\"\n\n layer = None\n \"\"\"list(str): The layer of the pin. Its format is [name, purpose].\"\"\"\n\n netname = None\n \"\"\"str: The name of the net associated with the pin.\"\"\"\n\n master = None\n \"\"\"Instance: The master instance of the pin. Used for instance pins only.\"\"\"\n\n @property\n def height(self):\n \"\"\"int: The height of the rect.\"\"\"\n return abs(self.xy[0, 1] - self.xy[1, 1])\n\n @property\n def width(self):\n \"\"\"int: The width of the rect.\"\"\"\n return abs(self.xy[0, 0] - self.xy[1, 0])\n\n @property\n def size(self):\n \"\"\"numpy.ndarray(dtype=int): The size of the rect.\"\"\"\n return np.array([self.width, self.height])\n\n @property\n def height_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([0, height]).\"\"\"\n return np.array([0, self.height])\n\n @property\n def width_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([width, 0]).\"\"\"\n return np.array([self.width, 0])\n\n def __init__(self, xy, layer, name=None, netname=None, params=None, master=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the object. Its default format is [[x0, y0], [x1, y1]].\n layer : list(str)\n Layer information. Its default format is [layer, purpose].\n name : str\n The name of the object.\n netname : str, optional\n The name of net associated with the object. If None, name is used for the net name.\n master : Instance, optional\n Master instance handle.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n self.layer = np.asarray(layer)\n if netname is None:\n netname = name\n self.netname = netname\n self.master = master\n IterablePhysicalObject.__init__(self, xy=xy, name=name, params=params, elements=elements)\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return IterablePhysicalObject.summarize(self) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"netname: \" + str(self.netname) + \", \" + \\\n \"shape: \" + str(self.shape) + \", \" + \\\n \"master: \" + str(self.master)\n\n def export_to_dict(self):\n db = dict()\n db['xy'] = self.xy.tolist()\n db['layer'] = self.layer.tolist()\n db['name'] = self.name\n db['netname'] = self.netname\n return db\n\nclass Text(PhysicalObject):\n \"\"\"\n A text object.\n\n See Also\n --------\n PhysicalObject\n \"\"\"\n layer = None\n \"\"\"list(str): The layer information of the text. Its default format is [name, purpose].\"\"\"\n text = None\n \"\"\"str: The text body.\"\"\"\n\n def __init__(self, xy, layer, text, name=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the object. Its default format is [x, y].\n layer : list(str)\n The layer information of the text. Its default format is [name, purpose].\n text : str\n The text entry.\n name : str, optional\n The name of the object.\n params : dict or None\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n self.layer = layer\n self.text = text\n\n PhysicalObject.__init__(self, xy=xy, name=name, params=params)\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return PhysicalObject.summarize(self) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"text: \" + str(self.text)\n\nclass Instance(IterablePhysicalObject):\n \"\"\"\n An iterable instance object, corresponding to a single/mosaic layout instance.\n \"\"\"\n # TODO: update (maybe) xy and sub-elements after transform property is updated.\n\n libname = None\n \"\"\"str: The library name of the instance.\"\"\"\n\n cellname = None\n \"\"\"str: The cell name of the instance.\"\"\"\n\n viewname = None\n \"\"\"str: The view name of the instance.\"\"\"\n\n shape = None\n \"\"\"np.array([int, int]) or None: The shape of the instance mosaic. None if the instance is non-mosaic.\"\"\"\n\n _pitch = None\n \"\"\"np.array([int, int]) or None: The internal variable for pitch.\"\"\"\n\n unit_size = None\n \"\"\"np.array([int, int]) or None: The unit size(shape=[1,1]) of the instance. \"\"\"\n\n transform = 'R0'\n \"\"\"str: The transform parameter of the instance.\"\"\"\n\n pins = None\n \"\"\"Dict[Pins]: The pins of the instance.\"\"\"\n\n def _update_pins(self, xy_ofst):\n \"\"\"Updates xy-coordinates of this object's pins. An internal function for _set_xy()\"\"\"\n if self.pins is not None:\n for pn, p in self.pins.items():\n if np.all(p is not None):\n # Update the x and y coordinate values of elements.\n for n, e in np.ndenumerate(p):\n if e is not None:\n e.xy = e.xy + xy_ofst\n\n def _get_xy(self):\n \"\"\"numpy.ndarray(dtype=numpy.int): Gets the x and y coordinate values of this object.\"\"\"\n return self._xy\n\n def _set_xy(self, value):\n \"\"\"numpy.ndarray(dtype=numpy.int): Sets the x and y coordinate values of this object.\"\"\"\n # Update the coordinate value of its pins.\n self._update_pins(xy_ofst=value - self.xy)\n IterablePhysicalObject._set_xy(self, value=value)\n\n xy = property(_get_xy, _set_xy)\n\n @property\n def xy0(self):\n \"\"\"numpy.ndarray(detype=numpy.int): The primary coordinate of this object represented as a Numpy array [x, y].\"\"\"\n return self.xy\n\n @property\n def xy1(self):\n \"\"\"numpy.ndarray(detype=numpy.int): The secondary coordinate of this object represented as a Numpy array [x, y].\"\"\"\n if self.size is None:\n return self.xy\n else:\n return self.xy + np.dot(self.size, tf.Mt(self.transform).T)\n\n @property\n def size(self):\n \"\"\"numpy.ndarray(dtype=int): The size of the instance, represented as a Numpy array [x_size, y_size].\"\"\"\n if self.shape is None:\n return self.unit_size\n else:\n return (self.shape - np.array([1, 1])) * self.pitch + self.unit_size\n\n def get_pitch(self):\n \"\"\"numpy.ndarray(dtype=int): Gets the pitch of the instance.\"\"\"\n if self._pitch is None:\n return self.unit_size\n else:\n return self._pitch\n\n def set_pitch(self, value):\n \"\"\"numpy.ndarray(dtype=int): Sets the pitch of the instance.\"\"\"\n self._pitch = value\n\n pitch = property(get_pitch, set_pitch)\n \"\"\"numpy.ndarray(dtype=int): The pitch of the instance. Its default format is [x_pitch, y_pitch].\n None if template size is used for the instance pitch.\"\"\"\n\n def get_spacing(self):\n return self.pitch\n\n def set_spacing(self, value):\n self.pitch = value\n\n spacing = property(get_spacing, set_spacing)\n \"\"\"numpy.ndrarray([int, int]): (deprecated) The pitch of the instance. Previously the pitch was named to spacing,\n to be compatible with GDS-II's notations.\"\"\"\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): The bounding box of the instance. Its default format is\n [[x_ll, y_ll], [x_ur, y_ur]]\"\"\"\n bbox = np.array([self.xy0, self.xy1])\n #return bbox\n #return self.xy + np.dot(self.size, tf.Mt(self.transform).T)\n return np.sort(bbox, axis=0)\n\n @property\n def height(self):\n \"\"\"int: The height of the instance.\"\"\"\n return abs(self.bbox[1][1] - self.bbox[0][1])\n\n @property\n def width(self):\n \"\"\"int: The width of the instance.\"\"\"\n return abs(self.bbox[1][0] - self.bbox[0][0])\n\n @property\n def height_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([0, height]).\"\"\"\n return np.array([0, self.height])\n\n @property\n def width_vec(self):\n \"\"\"numpy.ndarray(dtype=int): Returns np.array([width, 0]).\"\"\"\n return np.array([self.width, 0])\n\n def __init__(self, xy, libname, cellname, viewname='layout', shape=None, pitch=None, transform='R0',\n unit_size=np.array([0, 0]), pins=None, name=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The coordinates of this object represented as a Numpy array [[x0, y0], [x1, y1]].\n libname : str\n The library name of the instance.\n cellname : str\n The cell name of th instance.\n viewname : str, optional\n The view name of th instance. Default value is 'layout'.\n shape : numpy.ndarray(dtype=int) or None\n The size of the instance array. The format is [col, row].\n pitch : numpy.ndarray(dtype=int) or None\n The stride of the instance array. Its format is [x_pitch, y_pitch]. If none, the template size is used for\n the array pitch.\n transform : str\n The transform parameter. Possible values are 'R0', 'R90', 'R180', 'R270', 'MX', 'MY'.\n unit_size : List[int] or numpy.ndarray(dtype=np.int)\n The size of the unit element of this instance.\n pins : Dict[Pin]\n The dictionary that contains the pin information.\n name : str\n The name of the object.\n params : dict\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n # Assign parameters.\n xy = np.asarray(xy)\n self.libname = libname\n self.cellname = cellname\n self.viewname = viewname\n if shape is not None:\n _shape = np.asarray(shape)\n if _shape.shape != (2, ):\n raise ValueError('Instance shape should be a (2, ) numpy array or None.')\n self.shape = _shape\n if pitch is not None:\n self.pitch = np.asarray(pitch)\n self.transform = transform\n self.unit_size = np.asarray(unit_size)\n\n # Construct an array for elements.\n if shape is None:\n # elements = self # self-referencing causes recursion errors. Deprecated.\n elements = None\n else:\n _shape = tuple(shape)\n elements = np.zeros(_shape, dtype=np.object)\n # elements = LayoutObjectArray(np.zeros(_shape, dtype=np.object))\n _it = np.nditer(elements, flags=['multi_index', 'refs_ok'])\n while not _it.finished:\n _idx = _it.multi_index\n _xy = xy + np.dot(self.pitch * np.array(_idx), tf.Mt(self.transform).T)\n inst = Instance(xy=_xy, libname=libname, cellname=cellname, shape=None, pitch=pitch,\n transform=self.transform, unit_size=self.unit_size, pins=pins, name=name, params=params)\n elements[_idx] = inst\n _it.iternext()\n\n IterablePhysicalObject.__init__(self, xy=xy, name=name, params=params, elements=elements)\n # Create the pin dictionary. Can we do the same thing without generating these many Pin objects?\n self.pins = dict()\n if pins is not None:\n if not isinstance(pins, dict):\n raise ValueError(\"The pins parameter for Instance objects should be a dictionary.\")\n for pn, p in pins.items():\n _xy0 = xy + np.dot(p.xy, tf.Mt(transform).T)\n if shape is not None:\n elements = []\n for i in range(shape[0]):\n elements.append([])\n for j in range(shape[1]):\n _xy = _xy0 + np.dot(self.pitch * np.array([i, j]), tf.Mt(transform).T)\n # If p has elements, they need to be copied and transferred to the new pin.\n _pelem = None\n if p.elements is not None:\n _pelem = np.empty(p.elements.shape, dtype=object)\n for _idx, _pe in np.ndenumerate(p.elements):\n _pexy0 = xy + np.dot(_pe.xy, tf.Mt(transform).T) \\\n + np.dot(self.pitch * np.array([i, j]), tf.Mt(transform).T)\n _pelem[_idx] = Pin(xy=_pexy0, netname=_pe.netname, layer=_pe.layer, name=_pe.name, master=self)\n pin = Pin(xy=_xy, netname=p.netname, layer=p.layer, name=p.name, master=self,\n elements=_pelem) # master uses self instead of self.elements[i, j].\n elements[i].append(pin)\n elements = np.array(elements)\n else:\n # If p has elements, they need to be copied and transferred to the new pin.\n _pelem = None\n if p.elements is not None:\n _pelem = np.empty(p.elements.shape, dtype=object)\n for _idx, _pe in np.ndenumerate(p.elements):\n _pexy0 = xy + np.dot(_pe.xy, tf.Mt(transform).T)\n _pelem[_idx] = Pin(xy=_pexy0, netname=_pe.netname, layer=_pe.layer, name=_pe.name, master=self)\n elements = _pelem\n self.pins[pn] = Pin(xy=_xy0, netname=p.netname, layer=p.layer, name=p.name, master=self,\n elements=elements)\n\n def summarize(self):\n \"\"\"Summarizes object information.\"\"\"\n return PhysicalObject.summarize(self) + \", \" + \\\n \"size: \" + str(self.size.tolist()) + \", \" + \\\n \"shape: \" + str(None if self.shape is None else self.shape.tolist()) + \", \" + \\\n \"pitch: \" + str(self.pitch.tolist()) + \", \" + \\\n \"transform: \" + str(self.transform) + \", \" + \\\n \"pins: \" + str(self.pins)\n\nclass VirtualInstance(Instance): # IterablePhysicalObject):\n \"\"\"\n A virtual instance object that is composed of multiple physical object and is considered as an instance.\n The VirtualInstance object is instantiated as a separate cellview (with its library and cell names specified)\n or a group with its native elements instantiated.\n \"\"\"\n\n native_elements = None\n # Dict[PhysicalObject] the elements that compose the virtual instance. Its keys represent the names of the elements.\n\n def __init__(self, xy, libname, cellname, native_elements, viewname='layout', shape=None, pitch=None,\n transform='R0', unit_size=np.array([0, 0]), pins=None, name=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n xy : numpy.ndarray(dtype=int)\n The value of the x and y coordinate values of this object.\n The xy-coordinate of the object. The format is [x0, y0].\n native_elements : Dict[PhysicalObject]\n A dictionary that contains elements that this object is composed of.\n Its keys represent the names of the elements.\n shape : numpy.ndarray(dtype=int) or None\n The size of the instance array. The format is [col, row].\n pitch : numpy.ndarray(dtype=int) or None\n The stride of the instance array. Its format is [x_pitch, y_pitch]. If none, the template size is used for\n the array pitch.\n transform : str\n The transform parameter. Possible values are 'R0', 'R90', 'R180', 'R270', 'MX', 'MY'.\n unit_size : List[int] or numpy.ndarray(dtype=np.int)\n The size of the unit element of this instance.\n pins : Dict[Pin]\n The dictionary that contains the pin information.\n name : str\n The name of the object.\n params : dict\n The dictionary that contains the parameters of this object, with parameter names as keys.\n \"\"\"\n self.native_elements = native_elements\n\n Instance.__init__(self, xy=xy, libname=libname, cellname=cellname, viewname=viewname, shape=shape, pitch=pitch,\n transform=transform, unit_size=unit_size, pins=pins, name=name, params=params)\n #Instance.__init__(self, xy=xy, libname='VirtualInstance', cellname='VirtualInstance', shape=shape, pitch=pitch,\n # transform=transform, unit_size=unit_size, pins=pins, name=name, params=params)\n\n def summarize(self):\n \"\"\"Summarizes object information.\"\"\"\n return Instance.summarize(self) + \", \" + \\\n \"native elements: \" + str(self.native_elements)\n\n def get_element_position(self, obj ):\n \"\"\"\n get element's xy-position from origin\n Parameters\n ----------\n obj : element\n element belongs to self\n \"\"\"\n\n vinst = self\n tr = vinst.transform\n coners = np.zeros((4, 2))\n v_r = np.zeros(2) # for rotation\n bbox_raw = obj.bbox\n offset = vinst.xy\n if tr == \"R0\":\n v_r = v_r + (1, 1)\n coners[0] = offset + v_r * bbox_raw[0]\n coners[2] = offset + v_r * bbox_raw[1]\n elif tr == \"MX\":\n v_r = v_r + (1, -1)\n coners[1] = offset + v_r * bbox_raw[0]\n coners[3] = offset + v_r * bbox_raw[1]\n coners[0] = coners[0] + (coners[1][0], coners[3][1])\n coners[2] = coners[2] + (coners[3][0], coners[1][1])\n elif tr == \"MY\":\n v_r = v_r + (-1, 1)\n coners[3] = offset + v_r * bbox_raw[0]\n coners[1] = offset + v_r * bbox_raw[1]\n coners[0] = coners[0] + (coners[1][0], coners[3][1])\n coners[2] = coners[2] + (coners[3][0], coners[1][1])\n elif tr == \"R90\":\n v_r = v_r + (-1, -1)\n coners[2] = offset + v_r * bbox_raw[0]\n coners[0] = offset + v_r * bbox_raw[1]\n else:\n raise ValueError(\" Others transfom not implemented\")\n return coners[0], coners[2]\n\n\n\n\n\n\n\n\n# Test\nif __name__ == '__main__':\n test_rect = False\n test_path = False\n test_pin = False\n test_text = False\n test_pointer = False\n test_instance = True\n test_virtual_instance = False\n\n # You can create various objects by running part of the following commands.\n if test_rect:\n print(\"Rect test\")\n rect0 = Rect(xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], netname='net0', params={'maxI': 0.005})\n print(rect0)\n if test_path:\n print(\"Path test\")\n path0 = Path(xy=[[0, 0], [0, 100]], width=10, extension=5, layer=['M1', 'drawing'], netname='net0')\n print(path0)\n if test_pin:\n print(\"Pin test\")\n pin0 = Pin(xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], netname='net0', master=rect0,\n params={'direction': 'input'})\n print(pin0)\n if test_text:\n print(\"Text test\")\n text0 = Text(xy=[0, 0], layer=['text', 'drawing'], text='test', params=None)\n print(text0)\n if test_instance:\n print(\"Instance test - creating a vanilla instance.\")\n inst0_pins = dict()\n inst0_pins['in'] = Pin(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'], netname='in')\n inst0_pins['out'] = Pin(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'], netname='out')\n inst0 = Instance(name='I0', xy=[100, 100], libname='mylib', cellname='mycell', shape=[3, 2], pitch=[100, 100],\n unit_size=[100, 100], pins=inst0_pins, transform='R0')\n print(\" \", inst0)\n print(\" \", inst0.pointers)\n print(inst0.elements)\n for idx, it in inst0.ndenumerate():\n print(\"what?\")\n print(\" \", idx, it)\n print(\" \", idx, it.pins['in'])\n print(\"Instance test - updating the instance's coordinate values.\")\n inst0.xy = [200, 200]\n print(\" \", inst0)\n print(\" \", inst0.pointers)\n for idx, it in inst0.ndenumerate():\n print(\" \", idx, it)\n print(\" \", idx, it.pins['in'])\n if test_virtual_instance:\n print(\"VirtualInstance test - creating a vanilla virtual instance.\")\n inst1_pins = dict()\n inst1_pins['in'] = Pin(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'], netname='in')\n inst1_pins['out'] = Pin(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'], netname='out')\n inst1_native_elements = dict()\n inst1_native_elements['R0'] = Rect(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'])\n inst1_native_elements['R1'] = Rect(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'])\n inst1_native_elements['R2'] = Rect(xy=[[0, 0], [100, 100]], layer=['prBoundary', 'drawing'])\n inst1 = VirtualInstance(name='I0', libname='mylib', cellname='myvcell', xy=[500, 500],\n native_elements=inst1_native_elements, shape=[3, 2], pitch=[100, 100],\n unit_size=[100, 100], pins=inst1_pins, transform='R0')\n print(\" \", inst1)\n for idx, it in inst1.ndenumerate():\n print(\" \", idx, it.pins['in'])\n for idx, it in inst1.pins['in'].ndenumerate():\n print(\" \", idx, it)\n"
},
{
"alpha_fraction": 0.5461340546607971,
"alphanum_fraction": 0.5553338527679443,
"avg_line_length": 35.72795104980469,
"blob_id": "d65107be1a74cdd5df15f3e3ee91fa4e1e50d47d",
"content_id": "a29e3fab2493204d6b9cb3e3ef698ee41486b780",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29566,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 805,
"path": "/laygo2/object/physical_bak190623.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements classes for various layout objects.\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nimport numpy as np\nimport laygo2.util.transform as tf\n\n\nclass Pointer: #LayoutObject):\n \"\"\"\n Pointer class. Pointers are attached to other LayoutObjects to indicate various positional information\n associated with the object for placing and routing purposes.\n\n See Also\n --------\n LayoutObject\n \"\"\"\n\n name = None\n \"\"\"str or None: the name of the object.\"\"\"\n\n _xy = np.zeros(2, dtype=np.int)\n \"\"\"numpy.ndarray(dtype=numpy.int): the internal variable of xy.\"\"\"\n\n def get_xy(self):\n \"\"\"numpy.ndarray(dtype=int): gets the xy.\"\"\"\n return self._xy\n\n def set_xy(self, value):\n \"\"\"numpy.ndarray(dtype=int): sets the xy.\"\"\"\n self._xy = np.asarray(value, dtype=np.int)\n\n xy = property(get_xy, set_xy)\n \"\"\"numpy.ndarray(detype=numpy.int): the xy-coordinates of the object.\"\"\"\n\n direction = None\n \"\"\"str: the direction of the pointer.\"\"\"\n master = None\n \"\"\"LayoutObject.Instance: the master instance that the point tag is attached.\"\"\"\n params = None\n \"\"\"dict or None: the parameters of the object. \"\"\"\n\n\n def __init__(self, name, xy, direction=None, master=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the object. Its default format is [x, y].\n direction : str\n The direction of the pointer, which will be used for place functions.\n master : LayoutObject.LayoutObject or its derived classes\n The master instance handle.\n \"\"\"\n self.name = name\n self.xy = xy\n self.params = params\n self.direction = direction\n self.master = master\n\n def __str__(self):\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return self.__repr__() + \" \" \\\n \"name: \" + self.name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"xy: \" + str(self.xy.tolist()) + \", \" + \\\n \"params: \" + str(self.params) + \", \" + \\\n \"direction: \" + str(self.direction) + \", \" + \\\n \"master: /% \" + str(self.master) + \" %/\"\n\n\nclass LayoutObject:\n \"\"\"\n Basic layout object.\n \"\"\"\n\n name = None\n \"\"\"str or None: the name of the object.\"\"\"\n\n _xy = np.zeros(2, dtype=np.int)\n \"\"\"numpy.ndarray(dtype=numpy.int): the internal variable of xy.\"\"\"\n\n def get_xy(self):\n \"\"\"numpy.ndarray(dtype=int): gets the xy.\"\"\"\n return self._xy\n\n def set_xy(self, value):\n \"\"\"numpy.ndarray(dtype=int): sets the xy.\"\"\"\n self._xy = np.asarray(value, dtype=np.int)\n self.update_pointers()\n\n xy = property(get_xy, set_xy)\n \"\"\"numpy.ndarray(detype=numpy.int): the xy-coordinates of the object.\"\"\"\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): the bounding box of the object. Its default format is\n [[x_ll, y_ll], [x_ur, y_ur]]\"\"\"\n return np.sort(np.array([self.xy, self.xy]), axis=0)\n\n params = None\n \"\"\"dict or None: the parameters of the object. \"\"\"\n\n # Pointers\n pointers = dict()\n\n def __init__(self, name, xy, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n xy : numpy.ndarray([int, int])\n The xy-coordinates of the object.\n params : dict or None\n Additional parameters of the object.\n \"\"\"\n self.name = name\n\n # initialize pointers\n self.pointers['left'] = Pointer(name='left', xy=[0, 0], direction='left', master=self)\n self.pointers['right'] = Pointer(name='right', xy=[0, 0], direction='right', master=self)\n self.pointers['bottom'] = Pointer(name='bottom', xy=[0, 0], direction='bottom', master=self)\n self.pointers['top'] = Pointer(name='top', xy=[0, 0], direction='top', master=self)\n self.pointers['bottom_left'] = Pointer(name='bottom_left', xy=[0, 0], direction='bottom_left', master=self)\n self.pointers['bottom_right'] = Pointer(name='bottom_right', xy=[0, 0], direction='bottom_right', master=self)\n self.pointers['top_left'] = Pointer(name='top_left', xy=[0, 0], direction='top_left', master=self)\n self.pointers['top_right'] = Pointer(name='top_right', xy=[0, 0], direction='top_right', master=self)\n self.left = self.pointers['left']\n self.right = self.pointers['right']\n self.bottom = self.pointers['bottom']\n self.top = self.pointers['top']\n self.bottom_left = self.pointers['bottom_left']\n self.bottom_right = self.pointers['bottom_right']\n self.top_left = self.pointers['top_left']\n self.top_right = self.pointers['top_right']\n\n self.params = params\n self.xy = xy\n\n def __str__(self):\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return self.__repr__() + \" \" \\\n \"name: \" + self.name + \", \" + \\\n \"class: \" + self.__class__.__name__ + \", \" + \\\n \"xy: \" + str(self.xy.tolist()) + \", \" + \\\n \"params: \" + str(self.params)\n\n def update_pointers(self):\n xy_left = np.diag(np.dot(np.array([[1, 0], [0.5, 0.5]]), self.bbox)).astype(np.int)\n xy_right = np.diag(np.dot(np.array([[0, 1], [0.5, 0.5]]), self.bbox)).astype(np.int)\n xy_bottom = np.diag(np.dot(np.array([[0.5, 0.5], [1, 0]]), self.bbox)).astype(np.int)\n xy_top = np.diag(np.dot(np.array([[0.5, 0.5], [0, 1]]), self.bbox)).astype(np.int)\n xy_bottom_left = np.diag(np.dot(np.array([[1, 0], [1, 0]]), self.bbox)).astype(np.int)\n xy_bottom_right = np.diag(np.dot(np.array([[0, 1], [1, 0]]), self.bbox)).astype(np.int)\n xy_top_left = np.diag(np.dot(np.array([[1, 0], [0, 1]]), self.bbox)).astype(np.int)\n xy_top_right = np.diag(np.dot(np.array([[0, 1], [0, 1]]), self.bbox)).astype(np.int)\n self.pointers['left'].xy = xy_left\n self.pointers['right'].xy = xy_right\n self.pointers['bottom'].xy = xy_bottom\n self.pointers['top'].xy = xy_top\n self.pointers['bottom_left'].xy = xy_bottom_left\n self.pointers['bottom_right'].xy = xy_bottom_right\n self.pointers['top_left'].xy = xy_top_left\n self.pointers['top_right'].xy = xy_top_right\n\n\n\nclass LayoutIterableObject(LayoutObject):\n \"\"\"\n Layout object that contains iterable elements. The iteration feature is implemented through a numpy ndarray object,\n called elements, by mapping iterator-related functions. Indexing sub-elements follows the numpy convention, which\n provides easy multi-dimensional indexing and advanced slicing.\n\n See Also\n --------\n LayoutObject\n \"\"\"\n\n elements = None\n \"\"\"numpy.array(dtype=LayoutObject): the iterable elements.\"\"\"\n\n def __getitem__(self, pos):\n \"\"\"Returns its sub-elements based on pos parameter.\"\"\"\n return self.elements[pos]\n\n def __setitem__(self, key, item):\n self.elements[key] = item\n\n def __iter__(self):\n \"\"\"Iterator function. Directly mapped to its elements.\"\"\"\n return self.elements.__iter__()\n\n def __next__(self):\n \"\"\"Iterator function. Directly mapped to its elements.\"\"\"\n return self.elements.__next__()\n\n def ndenumerate(self):\n \"\"\"Enumerates over the element array. Calls np.ndenumerate() of its elements.\"\"\"\n return np.ndenumerate(self.elements)\n\n def __init__(self, name, xy=None, params=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the layout object. Its default format is [x, y].\n params : dict or None\n The parameters of the object.\n elements : numpy.ndarray(dtype=LayoutObject) or None\n The iterable elements of the object.\n \"\"\"\n\n self.elements = elements\n LayoutObject.__init__(self, name=name, xy=xy, params=params)\n\n\nclass LayoutObjectArray(np.ndarray):\n \"\"\"LayoutObject array class for containing multiple layout objects. Subclassing ndarray to utilize advance slicing\n functions.\"\"\"\n name = None\n \"\"\"str: the name of the object.\"\"\"\n\n params = None\n \"\"\"dict or None: parameters of the object. \"\"\"\n\n def __new__(cls, input_array, name=None, xy=None, params=None):\n \"\"\"\n Constructor for ndarray subclasses - check the NumPy manual for details.\n\n Parameters\n ----------\n input_array : np.ndarray\n An array of LayoutObject objects.\n name : str\n The name of the array.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinate of the object. The format is [x0, y0].\n params : dict\n Additional parameters of the array.\n \"\"\"\n # Input array is an already formed ndarray instance\n # We first cast to be our class type\n obj = np.asarray(input_array).view(cls)\n # add the new attribute to the created instance\n obj.name = name\n obj.xy = xy\n obj.params = params\n # Finally, we must return the newly created object:\n return obj\n\n def __array_finalize__(self, obj):\n \"\"\"\n Array finalizing function for subclassing ndarray - check the NumPy manual for details\n \"\"\"\n if obj is None: return\n # Transfer parameters\n self.name = getattr(obj, 'name', None)\n self.xy = getattr(obj, 'xy', None)\n self.params = getattr(obj, 'params', None)\n\n def summarize(self):\n \"\"\"Summarizes object information.\"\"\"\n return \" \" + \\\n \"name:\" + self.name + \", \" + \\\n \"class:\" + self.__class__.__name__ + \", \" + \\\n \"shape:\" + str(self.shape) + \", \" + \\\n \"xy:\" + str(self.xy) + \", \" + \\\n \"params:\" + str(self.params) + \"\\n\"\n\n\nclass Rect(LayoutObject):\n \"\"\"\n Rect object.\n\n See Also\n --------\n LayoutObject\n \"\"\"\n\n layer = None\n \"\"\"list(str): Rect layer. The format is [name, purpose].\"\"\"\n netname = None\n \"\"\"str: the name of the net associated with the rect.\"\"\"\n\n def get_xy0(self):\n \"\"\"Gets the xy0.\"\"\"\n return self.xy[0]\n\n def set_xy0(self, value):\n \"\"\"Sets the xy0.\"\"\"\n self.xy[0] = np.asarray(value)\n self.update_pointers()\n\n xy0 = property(get_xy0, set_xy0)\n \"\"\"numpy.array(dtype=int): the xy-coordinate of the primary (mostly lower-left) corner of the rect.\"\"\"\n\n def get_xy1(self):\n \"\"\"gets the xy1.\"\"\"\n return self.xy[1]\n\n def set_xy1(self, value):\n \"\"\"Sets the xy1.\"\"\"\n self.xy[1] = np.asarray(value)\n self.update_pointers()\n\n xy1 = property(get_xy1, set_xy1)\n \"\"\"numpy.array(dtype=int): the xy-coordinate of the secondary (mostly upper-right) corner of the rect.\"\"\"\n\n @property\n def height(self):\n \"\"\"int: the height of the rect\"\"\"\n return abs(self.xy0[1] - self.xy1[1])\n\n @property\n def width(self):\n \"\"\"int: the width of the rect\"\"\"\n return abs(self.xy0[0] - self.xy1[0])\n\n @property\n def size(self):\n \"\"\"numpy.ndarray(dtype=int): the size of the rect.\"\"\"\n return np.array([self.width, self.height])\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): the bounding box of the object. Its default format is\n [[x_ll, y_ll], [x_ur, y_ur]]\"\"\"\n return np.sort(np.array([self.xy0, self.xy1]), axis=0)\n\n pointers = dict()\n \"\"\"dict(): pointer dictionary.\"\"\"\n # frequently used pointers\n left = None\n right = None\n top = None\n bottom = None\n center = None\n bottom_left = None\n bottom_right = None\n top_left = None\n top_right = None\n\n def __init__(self, name, xy, layer, netname=None, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the object. Its format is [[x0, y0], [x1, y1]].\n layer : list(str)\n The layer information of the object. Its format is [layer, purpose]\n netname : str, optional\n The name of net associated with the object.\n params : dict\n Additional parameters of the object.\n \"\"\"\n LayoutObject.__init__(self, name, xy, params=params)\n\n self.layer = layer\n self.netname = netname\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return LayoutObject.summarize(self) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"netname: \" + str(self.netname)\n\n\nclass Wire(Rect):\n \"\"\"\n Wire object.\n\n See Also\n --------\n LayoutObject\n \"\"\"\n width = None\n \"\"\"int: the width of the wire\"\"\"\n extension = 0\n \"\"\"int: the amount of extension from the both edges of the wire.\"\"\"\n\n def __init__(self, name, xy, layer, width, extension=0, netname=None, params=None):\n self.width = width\n self.extension = extension\n\n Rect.__init__(self, name, xy, layer, netname=netname, params=params)\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return LayoutObject.summarize(self) + \", \" + \\\n \"width: \" + str(self.width) + \", \" + \\\n \"extension: \" + str(self.extension) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"netname: \" + str(self.netname)\n\n\nclass Pin(LayoutIterableObject):\n \"\"\"\n Pin object.\n\n See Also\n --------\n LayoutIterableObject\n \"\"\"\n\n layer = None\n \"\"\"list(str): the layer of the pin. Its format is [name, purpose].\"\"\"\n netname = None\n \"\"\"str: the name of the net associated with the pin.\"\"\"\n master = None\n \"\"\"Instance: the master instance of the pin. Used for instance pins only.\"\"\"\n elements = None\n \"\"\"numpy.array(dtype=Pin): the array contains its sub-element pins.\"\"\"\n\n def get_xy0(self):\n \"\"\"numpy.ndarray(dtype=int): gets the primary corner of the instance.\"\"\"\n return self.xy[0]\n\n def set_xy0(self, value):\n \"\"\"numpy.ndarray(dtype=int): sets the primary corner of the instance.\"\"\"\n self.xy[0] = np.asarray(value)\n self.update_pointers()\n\n xy0 = property(get_xy0, set_xy0)\n \"\"\"numpy.array(dtype=int): The xy-coordinate of the primary corner of the pin.\"\"\"\n\n def get_xy1(self):\n \"\"\"numpy.ndarray(dtype=int): gets the secondary corner of the instance.\"\"\"\n return self.xy[1]\n\n def set_xy1(self, value):\n \"\"\"numpy.ndarray(dtype=int): sets the primary corner of the instance.\"\"\"\n self.xy[1] = np.asarray(value)\n self.update_pointers()\n\n xy1 = property(get_xy1, set_xy1)\n \"\"\"numpy.array(dtype=int): The xy-coordinate of the secondary corner of the rect.\"\"\"\n\n @property\n def height(self):\n \"\"\"int: the height of the rect.\"\"\"\n return abs(self.xy0[1] - self.xy1[1])\n\n @property\n def width(self):\n \"\"\"int: the width of the rect.\"\"\"\n return abs(self.xy0[0] - self.xy1[0])\n\n @property\n def size(self):\n \"\"\"numpy.ndarray(dtype=int): the size of the rect.\"\"\"\n return np.array([self.width, self.height])\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): the bounding box of the object. Its default format is\n [[x_ll, y_ll], [x_ur, y_ur]]\"\"\"\n return np.sort(np.array([self.xy0, self.xy1]), axis=0)\n\n def __init__(self, name, xy, layer, netname=None, master=None, params=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the object. Its default format is [[x0, y0], [x1, y1]].\n layer : list(str)\n Layer information. Its default format is [layer, purpose].\n netname : str, optional\n The name of net associated with the object. If None, name is used for the net name.\n master : Instance, optional\n Master instance handle.\n params : dict, optional\n Additional parameters of the object.\n \"\"\"\n self.layer = layer\n if netname is None:\n netname = name\n self.netname = netname\n self.elements = elements\n self.master = master\n\n LayoutIterableObject.__init__(self, name, xy, params=params, elements=elements)\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return LayoutIterableObject.summarize(self) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"netname: \" + str(self.netname) + \", \" + \\\n \"master: \" + str(self.master)\n\n\nclass Text(LayoutObject):\n \"\"\"\n Text object.\n\n See Also\n --------\n LayoutObject\n \"\"\"\n layer = None\n \"\"\"list(str): the layer information of the text. Its default format is [name, purpose].\"\"\"\n text = None\n \"\"\"str: the text body.\"\"\"\n\n def __init__(self, name, xy, layer, text, params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinates of the object. Its default format is [x, y].\n layer : list(str)\n The layer information of the text. Its default format is [name, purpose].\n text : str\n The text entry.\n \"\"\"\n self.layer = layer\n self.text = text\n\n LayoutObject.__init__(self, name, xy, params=params)\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return LayoutObject.summarize(self) + \", \" + \\\n \"layer: \" + str(self.layer) + \", \" + \\\n \"text: \" + str(self.text)\n\n\nclass Instance(LayoutIterableObject):\n \"\"\"\n Instance object, corresponding to a single/mosaic layout instance.\n\n See Also\n --------\n LayoutIterableObject\n \"\"\"\n shape = None\n \"\"\"np.array([int, int]) or None: the shape of the instance mosaic. None if the instance is non-mosaic.\"\"\"\n _pitch = None\n \"\"\"np.array([int, int]) or None: the internal variable for pitch.\"\"\"\n _unit_size = None\n \"\"\"np.array([int, int]) or None: the internal variable for unit_size.\"\"\"\n transform = 'R0'\n \"\"\"str: the transform parameter of the instance.\"\"\"\n template = None\n \"\"\"TemplateObject.TemplateObject: the master template of the instance.\"\"\"\n pins = None\n \"\"\"dict(): pins of the instance.\"\"\"\n pointers = dict()\n \"\"\"dict(): pointers of the instance.\"\"\"\n\n # frequently used pointers\n left = None\n right = None\n top = None\n bottom = None\n bottom_left = None\n bottom_right = None\n top_left = None\n top_right = None\n\n @property\n def xy0(self):\n \"\"\"numpy.ndarray(dtype=[int, int]): the xy-coordinate of the object.\"\"\"\n return self.xy\n\n @property\n def xy1(self):\n \"\"\"numpy.ndarray(dtype=[int, int]): the secondary xy-coordinate of the object.\"\"\"\n if self.size is None:\n return self.xy\n else:\n return self.xy + np.dot(self.size, tf.Mt(self.transform).T)\n\n def get_unit_size(self):\n \"\"\"numpy.ndarray(dtype=[int, int]): gets the unit size of the object.\"\"\"\n if self._unit_size is None:\n return self.template.size(params = self.params)\n else:\n return self._unit_size()\n\n def set_unit_size(self, value):\n \"\"\"numpy.ndarray(dtype=[int, int]): sets the unit size of the object.\"\"\"\n self._unit_size = value\n\n unit_size = property(get_unit_size, set_unit_size)\n \"\"\"np.array([int, int]) or None: the unit size of the instance. The unit size is the size of the instance with \n [1, 1] shape.\"\"\"\n\n @property\n def size(self):\n \"\"\"numpy.ndarray(dtype=int): the size of the instance. Its default format is [x_size, y_size].\"\"\"\n if self.shape is None:\n return self.unit_size\n else:\n return (self.shape - np.array([1, 1])) * self.pitch + self.unit_size\n\n def get_pitch(self):\n \"\"\"numpy.ndarray(dtype=int): gets the pitch of the instance.\"\"\"\n if self._pitch is None:\n return self.unit_size\n else:\n return self._pitch\n\n def set_pitch(self, value):\n \"\"\"numpy.ndarray(dtype=int): sets the pitch of the instance.\"\"\"\n self._pitch = value\n\n pitch = property(get_pitch, set_pitch)\n \"\"\"numpy.ndarray(dtype=int): the pitch of the instance. Its default format is [x_pitch, y_pitch].\n None if template size is used for the instance pitch.\"\"\"\n\n def get_spacing(self):\n return self.pitch\n\n def set_spacing(self, value):\n self.pitch = value\n\n spacing = property(get_spacing, set_spacing)\n \"\"\"numpy.ndrarray([int, int]): (deprecated) the pitch of the instance. Previously the pitch was named to spacing,\n to be compatible with GDS-II's notations.\"\"\"\n\n @property\n def bbox(self):\n \"\"\"numpy.ndarray(dtype=int): the bounding box of the instance. Its default format is\n [[x_ll, y_ll], [x_ur, y_ur]]\"\"\"\n bbox = np.array([self.xy, self.xy + np.dot(self.size, tf.Mt(self.transform).T)])\n return np.sort(bbox, axis=0)\n\n def __init__(self, name, xy, template, shape=None, pitch=None, transform='R0', params=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n xy : numpy.ndarray(dtype=int)\n The xy-coordinate of the object. The format is [x0, y0].\n template: Template\n The template object handle.\n shape : numpy.ndarray(dtype=int) or None\n The size of the instance array. The format is [col, row].\n pitch : numpy.ndarray(dtype=int) or None\n The stride of the instance array. Its format is [x_pitch, y_pitch]. If none, the template size is used for\n the array pitch.\n transform : str\n The transform parameter. Possible values are 'R0', 'R90', 'R180', 'R270', 'MX', 'MY'.\n params : dict\n Additional parameters of the object.\n \"\"\"\n # Preprocess parameters\n if isinstance(xy, Pointer):\n xy = xy.xy # convert to xy coordinate.\n xy = np.asarray(xy)\n self.template = template\n if shape is not None:\n _shape = np.asarray(shape)\n if _shape.shape != (2, ):\n raise ValueError('Instance shape should be a (2, ) numpy array or None.')\n self.shape = _shape\n if pitch is not None:\n self.pitch = np.asarray(pitch)\n self.transform = transform\n\n # Construct an array for elements.\n if shape is None:\n elements = self # np.array([])\n else:\n _shape = tuple(shape)\n elements = np.zeros(_shape, dtype=np.object)\n # elements = LayoutObjectArray(np.zeros(_shape, dtype=np.object))\n _it = np.nditer(elements, flags=['multi_index', 'refs_ok'])\n while not _it.finished:\n _idx = _it.multi_index\n _xy = xy + np.dot(self.pitch * np.array(_idx), tf.Mt(self.transform).T)\n inst = Instance(name=name, xy=_xy, template=self.template, shape=None, pitch=pitch,\n transform=self.transform, params=params)\n elements[_idx] = inst\n _it.iternext()\n\n LayoutIterableObject.__init__(self, name, xy, params=params, elements=elements)\n\n # Create the pin dictionary\n self.pins = dict()\n pins = template.pins(params=self.params)\n if pins is not None:\n if not isinstance(pins, dict):\n raise ValueError(\"laygo2.Template.pins() should return a dictionary that contains pin information.\")\n else:\n for pn, p in pins.items():\n if shape is not None:\n elements = []\n for i in range(shape[0]):\n elements.append([])\n for j in range(shape[1]):\n _xy = p['xy'] + np.dot(self.pitch * np.array([i, j]), tf.Mt(transform).T)\n pin = Pin(name=pn, xy=_xy, netname=p['netname'], layer=p['layer'], master=self,\n elements=None) # master uses self instead of self.elements[i, j].\n elements[i].append(pin)\n elements = np.array(elements)\n else:\n elements = None\n self.pins[pn] = Pin(name=pn, xy=p['xy'], netname=p['netname'], layer=p['layer'], master=self,\n elements=elements)\n\n def summarize(self):\n \"\"\"Summarizes object information.\"\"\"\n return LayoutObject.summarize(self) + \", \" + \\\n \"template: \" + str(self.template) + \", \" + \\\n \"size: \" + str(self.size) + \", \" + \\\n \"shape: \" + str(self.shape) + \", \" + \\\n \"pitch: \" + str(self.pitch) + \", \" + \\\n \"transform: \" + str(self.transform) + \", \" + \\\n \"pins: \" + str(self.pins)\n\n\n# Test\nif __name__ == '__main__':\n print(\"Rect test\")\n rect0 = Rect(name='R0', xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], netname='net0', params={'maxI': 0.005})\n print(rect0)\n print(\"Wire test\")\n wire0 = Wire(name='R0', xy=[[0, 0], [0, 100]], width=10, extension=5, layer=['M1', 'drawing'], netname='net0', params={'maxI': 0.005})\n print(wire0)\n print(\"Pin test\")\n pin0 = Pin(name='P0', xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], netname='net0', master=rect0,\n params={'direction':'input'})\n print(pin0)\n print(\"Text test\")\n text0 = Text(name='T0', xy=[0, 0], layer=['text', 'drawing'], text='test', params=None)\n print(text0)\n print(\"Pointer test\")\n pointer0 = Pointer(name='PT0', xy=[0, 0], direction='left', master=rect0)\n print(pointer0)\n print(\"Template test\")\n from templates import Template\n template0_pins = dict()\n template0_pins['in'] = {'xy': [[0, 0], [10, 10]], 'netname': 'in', 'layer': ['M1', 'drawing']}\n template0_pins['out'] = {'xy': [[90, 90], [100, 100]], 'netname': 'out', 'layer': ['M1', 'drawing']}\n template0 = Template(name='my_template0', xy=[[0, 0], [100, 100]], pins=template0_pins)\n print(template0)\n print(\"Instance test\")\n inst0 = Instance(name='I0', xy=[100, 100], template=template0, shape=[3, 2], pitch=[100, 100], transform='R0')\n print(inst0)\n print(inst0.shape)\n for idx, it in inst0.ndenumerate():\n print(idx, it.pins['in'])\n for idx, it in inst0.pins['in'].ndenumerate():\n print(idx, it)\n"
},
{
"alpha_fraction": 0.7757009267807007,
"alphanum_fraction": 0.7873831987380981,
"avg_line_length": 41.79999923706055,
"blob_id": "464fd9a0c11e7179e9a87e7f99954c97c95cc5ca",
"content_id": "4b32eabc946439013a31fd4a5f545fb2c1a5b07d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 468,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 10,
"path": "/README.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# laygo2\n\nThe LAYout with Gridded Object 2 (LAYGO2) is a layout generation framework\nfor custom integrated circuits.\nLAYGO2 provides a rich and productive layout design environment for designers\nwith several advanced concepts such templates and grids.\n\nFor further documentation, see [./docs_workspace/](./docs_workspace/).\n\nLAYGO2에 대한 상세한 설명은 다음 페이지를 참조하세요: [./docs_workspace/user_guide_kor/](./docs_workspace/user_guide_kor/)\n"
},
{
"alpha_fraction": 0.5621232986450195,
"alphanum_fraction": 0.5669912099838257,
"avg_line_length": 49.151161193847656,
"blob_id": "58dee2676e3eaecdb2df252a58e2e5159b8c6310",
"content_id": "4564498a13950c7f51bb11dcd797ca868277e332",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4314,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 86,
"path": "/examples/quick_start/quick_start_tech/technology_example_grids.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "import laygo2.object.database\nimport laygo2.object.grid\n\nimport numpy as np\nimport yaml\nimport pprint\n\n# Grid library for the advanced example technology (advtech).\n\n# Technology parameters\nif __name__ == '__main__':\n tech_fname = './technology_example.yaml'\nelse:\n tech_fname = './quick_start_tech/technology_example.yaml'\nwith open(tech_fname, 'r') as stream:\n try:\n tech_params = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n\n# Grid library\ndef load_grids(templates):\n \"\"\"\n Load grids to a grid library object.\n\n Parameters\n ----------\n templates: laygo2.object.database.TemplateLibrary\n The template library object that contains via templates.\n \"\"\"\n glib = laygo2.object.database.GridLibrary(name='advtech_grids')\n for gname, grid in tech_params['grids'].items():\n gv = laygo2.object.grid.OneDimGrid(name=gname + '_v', scope=grid['vertical']['scope'],\n elements=grid['vertical']['elements'])\n gh = laygo2.object.grid.OneDimGrid(name=gname + '_h', scope=grid['horizontal']['scope'],\n elements=grid['horizontal']['elements'])\n if grid['type'] == 'placement': # placement grid\n g = laygo2.object.grid.PlacementGrid(name=gname, vgrid=gv, hgrid=gh)\n glib.append(g)\n elif grid['type'] == 'routing': # routing grid\n vwidth = laygo2.object.grid.CircularMapping(elements=grid['vertical']['width'])\n hwidth = laygo2.object.grid.CircularMapping(elements=grid['horizontal']['width'])\n vextension = laygo2.object.grid.CircularMapping(elements=grid['vertical']['extension'])\n hextension = laygo2.object.grid.CircularMapping(elements=grid['horizontal']['extension'])\n vlayer = laygo2.object.grid.CircularMapping(elements=grid['vertical']['layer'], dtype=object)\n hlayer = laygo2.object.grid.CircularMapping(elements=grid['horizontal']['layer'], dtype=object)\n pin_vlayer = laygo2.object.grid.CircularMapping(elements=grid['vertical']['pin_layer'], dtype=object)\n pin_hlayer = laygo2.object.grid.CircularMapping(elements=grid['horizontal']['pin_layer'], dtype=object)\n xcolor = list()\n if 'xcolor' in grid['vertical'].keys():\n if grid['vertical']['xcolor'] == 'not MPT':\n xcolor = np.array(['not MPT']*vlayer.shape[0], dtype=object)\n else:\n xcolor.append( grid['vertical']['xcolor'])\n else:\n xcolor = np.array(['not MPT']*vlayer.shape[0], dtype=object)\n\n ycolor = list()\n if 'ycolor' in grid['horizontal'].keys():\n if grid['horizontal']['ycolor'] == 'not MPT':\n ycolor = np.array(['not MPT']*hlayer.shape[0], dtype=object)\n else:\n ycolor.append( grid['horizontal']['ycolor'])\n else:\n ycolor = np.array(['not MPT']*hlayer.shape[0], dtype=object)\n primary_grid = grid['primary_grid']\n # Create the via map defined by the yaml file.\n vmap_original = grid['via']['map'] # viamap defined in the yaml file.\n vmap_mapped = list() # map template objects to the via map.\n for vmap_org_row in vmap_original:\n vmap_mapped_row = []\n for vmap_org_elem in vmap_org_row:\n vmap_mapped_row.append(templates[vmap_org_elem])\n vmap_mapped.append(vmap_mapped_row)\n viamap = laygo2.object.grid.CircularMappingArray(elements=vmap_mapped, dtype=object)\n\n g = laygo2.object.grid.RoutingGrid(name=gname, vgrid=gv, hgrid=gh,\n vwidth=vwidth, hwidth=hwidth,\n vextension=vextension, hextension=hextension,\n vlayer=vlayer, hlayer=hlayer,\n pin_vlayer=pin_vlayer, pin_hlayer=pin_hlayer,\n viamap=viamap, primary_grid=primary_grid,\n xcolor=xcolor, ycolor=ycolor)\n glib.append(g)\n return glib\n\n"
},
{
"alpha_fraction": 0.7627535462379456,
"alphanum_fraction": 0.7695144414901733,
"avg_line_length": 38.682926177978516,
"blob_id": "db9db6e3d7b8ae91fcfa2e039ab5b2f02afdd807",
"content_id": "b3b78c2ce1c3a3d48e8a9c7291a28ac5fc60c722",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1627,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 41,
"path": "/docs_workspace/user_guide/2_examples.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "# laygo2 Design Examples\n\nThis document introduces setup procedures and several design examples for begineers.\n\n* **[Quick installation and setup](#Quick-Installation-and-Setup)** describes the installation and set up procedure of \nlaygo in linux environments.\n* **[Technology setup](#Technology-Setup)** illustrates how to set up laygo2 for new technology nodes.\n* **[simple-gates](#Simple-Gates)**: introduces layout generators for simple logic gates.\n\n## Quick Installation and Setup\n\nCurrently, laygo2 does not support pip installation as the project is in an early development phase, which often \nrequires frequent code updates. Instead, users can easily download and install the laygo2 package by cloning its github \nrepository by using the following command.\n\n >>>> git clone XX\n \nIt is highly recommended that the following command is used periodically to maintain the code to the latest version.\n\n >>>> git pull origin master\n\nSecondly, update the PHTHONPATH environment variable to import the laygo2 package.\n(The example shown below is the simplest one for beginners, and we recommend to use a virtual environment for \nadvanced users.)\n\n # (csh/tcsh example) add the following command to your .cshrc\n setenv PYTHONPATH ${PYTHONPATH}:[LAYGO2_INSTALLATION_DIR]/laygo2\n\n## Technology Setup for Laygo2\n\nTo be added\n\n## Simple Gates\n\nRunning the following command will generate a NAND gate layout.\n\n >>>> run ./laygo2/examples/nand_generate.py\n \nThe resulting layout of the NAND gate is shown in the figure below:\n\n\n"
},
{
"alpha_fraction": 0.8684210777282715,
"alphanum_fraction": 0.8684210777282715,
"avg_line_length": 37,
"blob_id": "f49857991adac64f595339dec78b29ee5d67d6ba",
"content_id": "9f814bf03107d4897a5850b439521bccb5a4eca3",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 38,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 1,
"path": "/docs_workspace/README.md",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "Placeholder for future documentation.\n"
},
{
"alpha_fraction": 0.5296849012374878,
"alphanum_fraction": 0.5425617694854736,
"avg_line_length": 36.65501022338867,
"blob_id": "32f707814a99a2efd277d722bf1860f178fc47c7",
"content_id": "b6bf4776eeee475935b0da6f0e917eebdfb84efa",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16153,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 429,
"path": "/laygo2/object/database.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements classes for various database objects.\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nimport laygo2.object\nimport numpy as np\n\n\nclass BaseDatabase:\n \"\"\"class that contains iterable contents.\"\"\"\n\n name = None\n \"\"\"str: the name of the object.\"\"\"\n\n params = None\n \"\"\"dict or None: a dictionary that contains parameters of the design. \"\"\"\n\n elements = None\n \"\"\"dict: the iterable elements of the design.\"\"\"\n\n noname_index = 0\n \"\"\"int: the suffix index of NoName objects (objects without its name).\"\"\"\n\n @property\n def keys(self):\n \"\"\"Returns a list that contains keys of the elements of this object.\"\"\"\n return self.elements.keys\n\n def items(self):\n \"\"\"Matches to the items() function of its elements.\"\"\"\n return self.elements.items()\n\n def __getitem__(self, pos):\n \"\"\"Returns its sub-elements based on pos parameter.\"\"\"\n return self.elements[pos]\n\n def __setitem__(self, key, item):\n item.name = key\n self.append(item)\n\n def append(self, item):\n if isinstance(item, list) or isinstance(item, np.ndarray):\n item_name_list = []\n item_list = []\n for i in item:\n _item_name, _item = self.append(i)\n item_name_list.append(_item_name)\n item_list.append(_item)\n return item_name_list, item_list\n #return [i[0] for i in item_list], [i[1] for i in item_list]\n else:\n item_name = item.name\n if item_name is None: # NoName object. Put a name on it.\n while 'NoName_'+str(self.noname_index) in self.elements.keys():\n self.noname_index += 1\n item_name = 'NoName_' + str(self.noname_index)\n self.noname_index += 1\n errstr = item_name + \" cannot be added to \" + self.name + \", as a child object with the same name exists.\"\n if item_name in self.elements.keys():\n raise KeyError(errstr)\n else:\n if item_name in self.elements.keys():\n raise KeyError(errstr)\n else:\n self.elements[item_name] = item\n return item_name, item\n\n def __iter__(self):\n \"\"\"Iterator function. Directly mapped to its elements.\"\"\"\n return self.elements.__iter__()\n\n def __str__(self):\n return self.summarize()\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return self.__repr__() + \" \" + \\\n \"name: \" + self.name + \", \" + \\\n \"params: \" + str(self.params) + \" \\n\" \\\n \" elements: \" + str(self.elements) + \\\n \"\"\n\n def __init__(self, name, params=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the object.\n \"\"\"\n self.name = name\n self.params = params\n\n self.elements = dict()\n if elements is not None:\n for e in elements:\n self[e] = elements[e]\n\n\nclass Library(BaseDatabase):\n \"\"\"This class implements layout libraries that contain designs as their child objects. \"\"\"\n\n def get_libname(self):\n return self.name\n\n def set_libname(self, val):\n self.name = val\n\n libname = property(get_libname, set_libname)\n \"\"\"str: library name\"\"\"\n \n def append(self, item):\n if isinstance(item, list) or isinstance(item, np.ndarray):\n item_name_list = []\n item_list = []\n for i in item:\n _item_name, _item = self.append(i)\n item_name_list.append(_item_name)\n item_list.append(_item)\n return item_name_list, item_list\n else:\n item_name, item = BaseDatabase.append(self, item)\n item.libname = self.name # update library name\n return item_name, item\n\n def __init__(self, name, params=None, elements=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the library.\n \"\"\"\n BaseDatabase.__init__(self, name=name, params=params, elements=elements)\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return BaseDatabase.summarize(self)\n\n\nclass TemplateLibrary(Library):\n \"\"\"This class implements template libraries that contain templates as their child objects. \"\"\"\n #vTODO: implement this.\n pass\n\n\nclass GridLibrary(Library):\n \"\"\"This class implements layout libraries that contain grid objectss as their child objects. \"\"\"\n # TODO: implement this.\n pass\n\n\nclass Design(BaseDatabase):\n \"\"\"This class implements layout libraries that contain layout objects as their child objects. \"\"\"\n\n @property\n def bbox(self):\n \"\"\" get design bbox which is union of instances.bbox \"\"\"\n libname = self.libname\n cellname = self.cellname\n # Compute boundaries\n xy = [None, None]\n for n, i in self.instances.items():\n if xy[0] is None:\n xy[0] = i.bbox[0] # bl\n xy[1] = i.bbox[1] # tr\n else:\n xy[0][0] = min(xy[0][0], i.bbox[0, 0])\n xy[0][1] = min(xy[0][1], i.bbox[0, 1])\n xy[1][0] = max(xy[1][0], i.bbox[1, 0])\n xy[1][1] = max(xy[1][1], i.bbox[1, 1])\n for n, i in self.virtual_instances.items():\n if xy[0] is None:\n xy[0] = i.bbox[0]\n xy[1] = i.bbox[1]\n else:\n xy[0][0] = min(xy[0][0], i.bbox[0, 0])\n xy[0][1] = min(xy[0][1], i.bbox[0, 1])\n xy[1][0] = max(xy[1][0], i.bbox[1, 0])\n xy[1][1] = max(xy[1][1], i.bbox[1, 1])\n xy = np.array(xy)\n return (xy)\n pass\n\n\n def get_libname(self):\n return self._libname\n\n def set_libname(self, val):\n self._libname = val\n\n libname = property(get_libname, set_libname)\n \"\"\"str: library name\"\"\"\n\n def get_cellname(self):\n return self.name\n\n def set_cellname(self, val):\n self.name = val\n\n cellname = property(get_cellname, set_cellname)\n \"\"\"str: cell name\"\"\"\n rects = None\n paths = None\n pins = None\n texts = None\n instances = None\n virtual_instances = None\n\n def __iter__(self):\n \"\"\"Iterator function. Directly mapped to its elements.\"\"\"\n return self.elements.__iter__()\n\n def __init__(self, name, params=None, elements=None, libname=None):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n name : str\n The name of the design.\n libname : str\n The library name of the design.\n \"\"\"\n self.libname = libname\n self.rects = dict()\n self.paths = dict()\n self.pins = dict()\n self.texts = dict()\n self.instances = dict()\n self.virtual_instances = dict()\n BaseDatabase.__init__(self, name=name, params=params, elements=elements)\n\n def append(self, item):\n if isinstance(item, list) or isinstance(item, np.ndarray):\n return [self.append(i) for i in item]\n else:\n if item is None:\n return None, None # don't do anything\n item_name, _item = BaseDatabase.append(self, item)\n if item.__class__ == laygo2.object.Rect:\n self.rects[item_name] = item\n elif item.__class__ == laygo2.object.Path:\n self.paths[item_name] = item\n elif item.__class__ == laygo2.object.Pin:\n self.pins[item_name] = item\n elif item.__class__ == laygo2.object.Text:\n self.texts[item_name] = item\n elif item.__class__ == laygo2.object.Instance:\n self.instances[item_name] = item\n elif item.__class__ == laygo2.object.VirtualInstance:\n self.virtual_instances[item_name] = item\n return item_name, item\n\n def summarize(self):\n \"\"\"Returns the summary of the object information.\"\"\"\n return \\\n BaseDatabase.summarize(self) + \" \\n\" + \\\n \" libname:\" + str(self.libname) + \" \\n\" + \\\n \" rects:\" + str(self.rects) + \" \\n\" + \\\n \" paths:\" + str(self.paths) + \" \\n\" + \\\n \" pins:\" + str(self.pins) + \" \\n\" + \\\n \" texts:\" + str(self.texts) + \" \\n\" + \\\n \" instances:\" + str(self.instances) + \"\\n\" + \\\n \" virtual instances:\" + str(self.virtual_instances) + \\\n \"\"\n\n # Object creation and manipulation functions.\n def place(self, inst, grid, mn):\n \"\"\"Places an instance on the specified coordinate mn, on this grid.\"\"\"\n if isinstance(inst, ( laygo2.object.Instance, laygo2.object.VirtualInstance) ) :\n inst = grid.place(inst, mn)\n self.append(inst)\n return inst\n else:\n matrix = np.asarray( inst )\n size = matrix.shape\n\n if len(size) == 2:\n m, n = size\n else:\n m, n = 1, size[0]\n matrix = [ matrix ]\n mn_ref = np.array(mn)\n\n for index in range(m):\n row = matrix[index]\n if index != 0 :\n ns = 0\n ms = index -1\n while row[ns] == None: # Right search\n ns = ns + 1\n while matrix[ms][ns] == None: # Down search\n ms = ms - 1\n mn_ref = grid.mn.top_left( matrix[ms][ns] )\n for element in row:\n if isinstance( element, (laygo2.object.Instance, laygo2.object.VirtualInstance) ):\n mn_bl = grid.mn.bottom_left( element )\n mn_comp = mn_ref - mn_bl\n inst_sub = grid.place( element, mn_comp)\n self.append(inst_sub)\n mn_ref = grid.mn.bottom_right( element )\n else:\n if element == None:\n pass\n elif isinstance( element, int):\n mn_ref = mn_ref + [ element,0 ]\n\n def route(self, grid, mn, direction=None, via_tag=None):\n \"\"\"Creates Path and Via objects over the abstract coordinates specified by mn, on this routing grid. \"\"\"\n r = grid.route(mn=mn, direction=direction, via_tag=via_tag)\n self.append(r)\n return r\n\n def route_via_track(self, grid, mn, track, via_tag=[None, True]):\n \"\"\"Creates Path and Via objects over the abstract coordinates specified by mn, \n on the track of specified routing grid. \"\"\"\n r = grid.route_via_track(mn=mn, track=track, via_tag=via_tag)\n self.append(r)\n return r\n\n def via(self, grid, mn, params=None):\n \"\"\"Creates a Via object over the abstract coordinates specified by mn, on this routing grid. \"\"\"\n v = grid.via(mn=mn, params=params)\n self.append(v)\n return v\n\n def pin(self, name, grid, mn, direction=None, netname=None, params=None):\n \"\"\"Creates a Pin object over the abstract coordinates specified by mn, on this routing grid. \"\"\"\n p = grid.pin(name=name, mn=mn, direction=direction, netname=netname, params=params)\n self.append(p)\n return p\n\n # I/O functions\n def export_to_template(self, libname=None, cellname=None):\n \"\"\"Convert this design to a native-instance template\"\"\"\n if libname is None:\n libname = self.libname\n if cellname is None:\n cellname = self.cellname\n\n xy = self.bbox\n pins = self.pins\n return laygo2.object.template.NativeInstanceTemplate(libname=libname, cellname=cellname, bbox=xy, pins=pins)\n\n def get_matchedrects_by_layer(self, lpp ):\n \"\"\" return matched objects by [ \"M*\", \"drawing or pin\"] \"\"\"\n rects = self.rects\n insts = self.instances\n vinsts = self.virtual_instances\n\n obj_check = []\n\n for rname, rect in rects.items():\n if np.array_equal( rect.layer, lpp):\n obj_check.append(rect)\n\n for iname, inst in insts.items():\n for pname , pin in inst.pins.items():\n if np.array_equal( pin.layer, lpp ):\n obj_check.append(pin)\n\n for iname, vinst in vinsts.items():\n for name, inst in vinst.native_elements.items():\n if isinstance(inst, laygo2.object.physical.Rect):\n if np.array_equal( inst.layer, lpp):\n _xy = vinst.get_element_position(inst)\n ninst = laygo2.object.physical.Rect(\n xy=_xy, layer = lpp, hextension = inst.hextension, vextension = inst.vextension\n ,color = inst.color )\n obj_check.append(ninst) ## ninst is for sort, inst should be frozen for implement to layout\n return obj_check\n\n\nif __name__ == '__main__':\n from laygo2.object.physical import *\n # Test\n lib = Library(name='mylib')\n dsn = Design(name='mycell')\n lib.append(dsn)\n rect0 = Rect(xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], name='R0', netname='net0', params={'maxI': 0.005})\n dsn.append(rect0)\n rect1 = Rect(xy=[[200, 0], [300, 100]], layer=['M1', 'drawing'], netname='net0', params={'maxI': 0.005})\n dsn.append(rect1)\n path0 = Path(xy=[[0, 0], [0, 100]], width=10, extension=5, layer=['M1', 'drawing'], netname='net0',\n params={'maxI': 0.005})\n dsn.append(path0)\n pin0 = Pin(xy=[[0, 0], [100, 100]], layer=['M1', 'pin'], netname='n0', master=rect0, params={'direction': 'input'})\n dsn.append(pin0)\n #text0 = Text(xy=[0, 0], layer=['text', 'drawing'], text='test', params=None)\n #dsn.append(text0)\n inst0_pins = dict()\n inst0_pins['in'] = Pin(xy=[[0, 0], [10, 10]], layer=['M1', 'drawing'], netname='in')\n inst0_pins['out'] = Pin(xy=[[90, 90], [100, 100]], layer=['M1', 'drawing'], netname='out')\n inst0 = Instance(name='I0', xy=[100, 100], libname='mylib', cellname='mycell', shape=[3, 2], pitch=[100, 100],\n unit_size=[100, 100], pins=inst0_pins, transform='R0')\n dsn.append(inst0)\n print(lib)\n print(dsn)"
},
{
"alpha_fraction": 0.605427086353302,
"alphanum_fraction": 0.6506991386413574,
"avg_line_length": 49.50349807739258,
"blob_id": "7b3869882a7640e7036585ab736b445f92387104",
"content_id": "79787ac468475e3231e482a65cc58990d491ba20",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7223,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 143,
"path": "/examples/quick_start/quick_start.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\nimport numpy as np\nimport pprint\nimport laygo2\nimport laygo2.interface\nimport quick_start_tech as tech\n\n# Parameter definitions ##############\n# Templates\ntpmos_name = 'pmos'\ntnmos_name = 'nmos'\n# Grids\npg_name = 'placement_cmos'\nr12_name = 'routing_12_cmos'\nr23_name = 'routing_23_cmos'\n# Design hierarchy\nlibname = 'laygo2_test'\ncellname = 'nand2'\n# Design parameters\nnf_a = 3\nnf_b = 4\n# End of parameter definitions #######\n\n# Generation start ###################\n# 1. Load templates and grids.\nprint(\"Load templates\")\ntemplates = tech.load_templates()\ntpmos, tnmos = templates[tpmos_name], templates[tnmos_name]\nprint(templates[tpmos_name], templates[tnmos_name], sep=\"\\n\")\n\nprint(\"Load grids\")\ngrids = tech.load_grids(templates=templates)\npg, r12, r23 = grids[pg_name], grids[r12_name], grids[r23_name]\nprint(grids[pg_name], grids[r12_name], grids[r23_name], sep=\"\\n\")\n\n# 2. Create a design hierarchy.\nlib = laygo2.object.database.Library(name=libname)\ndsn = laygo2.object.database.Design(name=cellname)\nlib.append(dsn)\n\n# 3. Create instances.\nprint(\"Create instances\")\nin0 = tnmos.generate(name='MN0', params={'nf': nf_b})\nsd_swap = False if nf_b % 2 == 1 else True\nin1 = tnmos.generate(name='MN1', params={'nf': nf_a, 'sd_swap': sd_swap})\nip0 = tpmos.generate(name='MP0', transform='MX', params={'nf': nf_b})\nsd_swap = True if nf_b % 2 == 1 else False\nip1 = tpmos.generate(name='MP1', transform='MX', params={'nf': nf_a, 'sd_swap': sd_swap})\n\n# 4. Place instances.\ndsn.place(grid=pg, inst=in0, mn=pg.mn[0, 0])\ndsn.place(grid=pg, inst=in1, mn=pg.mn.bottom_right(in0)) # same with pg == in0.bottom_right\ndsn.place(grid=pg, inst=ip0, mn=pg.mn.top_left(in0) + np.array([0, pg.mn.height(ip0)])) # +height due to MX transform\ndsn.place(grid=pg, inst=ip1, mn=pg.mn.top_right(ip0))\n\n# 5. Create and place wires.\nprint(\"Create wires\")\n# A\nra0 = dsn.route(grid=r12, mn=r12.mn.bbox(in1.pins['G']))\nva0 = dsn.via(grid=r12, mn=r12.mn.overlap(ra0, in1.pins['G'], type='array'))\nra1 = dsn.route(grid=r12, mn=r12.mn.bbox(ip1.pins['G']))\nva1 = dsn.via(grid=r12, mn=r12.mn.overlap(ra1, ip1.pins['G'], type='array'))\nva3, ra2, va4 = dsn.route(grid=r23, mn=[r23.mn.bottom_left(ra0), r23.mn.top_left(ra1)], via_tag=[True, True])\n# B\nrb0 = dsn.route(grid=r12, mn=r12.mn.bbox(in0.pins['G']))\nvb0 = dsn.via(grid=r12, mn=r12.mn.overlap(rb0, in0.pins['G'], type='array'))\nrb1 = dsn.route(grid=r12, mn=r12.mn.bbox(ip0.pins['G']))\nvb1 = dsn.via(grid=r12, mn=r12.mn.overlap(rb1, ip0.pins['G'], type='array'))\nvb3, rb2, vb4 = dsn.route(grid=r23, mn=[r23.mn.bottom_left(rb0), r23.mn.top_left(rb1)], via_tag=[True, True])\n# Internal\nif not (nf_a == 1 and nf_b == 1):\n ri0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(in0.pins['D'][0]) + np.array([0, 1]),\n r12.mn.bottom_right(in1.pins['S'][-1]) + np.array([0, 1])])\n vi0 = [dsn.via(grid=r12, mn=r12.mn.overlap(ri0, i, type='point')) for i in in0.pins['D']]\n vi1 = [dsn.via(grid=r12, mn=r12.mn.overlap(ri0, i, type='point')) for i in in1.pins['S']]\n# Output\nron0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(in1.pins['D'][0]) + np.array([0, 2]),\n r12.mn.bottom_right(in1.pins['D'][-1]) + np.array([0, 2])])\nvon0 = [dsn.via(grid=r12, mn=r12.mn.overlap(ron0, i, type='point')) for i in in1.pins['D']]\nrop0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(ip0.pins['D'][0]),\n r12.mn.bottom_right(ip1.pins['D'][-1])])\nvop0 = [dsn.via(grid=r12, mn=r12.mn.overlap(rop0, i, type='point')) for i in ip0.pins['D']]\nvop1 = [dsn.via(grid=r12, mn=r12.mn.overlap(rop0, i, type='point')) for i in ip1.pins['D']]\nm = r23.mn.bottom_right(ra2)[0] + 1\nvo0, ro0, vo1 = dsn.route(grid=r23, mn=np.array([[m, r23.mn.bottom_right(ron0)[1]], [m, r23.mn.bottom_right(rop0)[1]]]),\n via_tag=[True, True])\n# VSS\nrvss0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(in0.pins['S'][0]), r12.mn.bottom_left(in1.pins['S'][0])])\nvvss = [dsn.via(grid=r12, mn=r12.mn.overlap(rvss0, s, type='point')) for s in in0.pins['S']]\n# VDD\nrvdd0 = dsn.route(grid=r12, mn=[r12.mn.top_left(ip0.pins['S'][0]), r12.mn.top_right(ip1.pins['S'][-1])])\nvvdd = [dsn.via(grid=r12, mn=r12.mn.overlap(rvdd0, s, type='point')) for s in ip0.pins['S']]\nvvdd += [dsn.via(grid=r12, mn=r12.mn.overlap(rvdd0, s, type='point')) for s in ip1.pins['S']]\n\n# 6. Create pins.\npa0 = dsn.pin(name='A', grid=r23, mn=r23.mn.bbox(ra2))\npb0 = dsn.pin(name='B', grid=r23, mn=r23.mn.bbox(rb2))\npo0 = dsn.pin(name='O', grid=r23, mn=r23.mn.bbox(ro0))\npvss0 = dsn.pin(name='VSS', grid=r12, mn=r12.mn.bbox(rvss0))\npvdd0 = dsn.pin(name='VDD', grid=r12, mn=r12.mn.bbox(rvdd0))\n\nprint(dsn)\n\n# 7. Export to physical database.\nprint(\"Export design\")\nabstract = False # export abstract\nlaygo2.interface.gds.export(lib, filename=libname+'_'+cellname+'.gds', cellname=None, scale=1e-9,\n layermapfile=\"./quick_start_tech/technology_example.layermap\", physical_unit=1e-9, logical_unit=0.001,\n pin_label_height=0.1, pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n abstract_instances=abstract)\nskill_str = laygo2.interface.skill.export(lib, filename=libname+'_'+cellname+'.il', cellname=None, scale=1e-3)\nprint(skill_str)\n\n# 7-a. Import the GDS file back and display\nwith open(libname+'_'+cellname+'.gds', 'rb') as stream:\n pprint.pprint(laygo2.interface.gds.readout(stream, scale=1e-9))\n\n# 8. Export to a template database file.\nnat_temp = dsn.export_to_template()\nlaygo2.interface.yaml.export_template(nat_temp, filename=libname+'_templates.yaml', mode='append')\n\n"
},
{
"alpha_fraction": 0.5693618655204773,
"alphanum_fraction": 0.5784456729888916,
"avg_line_length": 45.44270706176758,
"blob_id": "6c76c33fa1663ef30938c9b6e79d127a49b47755",
"content_id": "e4a1d0c0da184579b4f92e5cc3eae9e8b4346d35",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8917,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 192,
"path": "/laygo2/interface/skill.py",
"repo_name": "yjleeeeee/laygo2",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nThis module implements interface with virtuoso in skill language\n\n\"\"\"\nimport logging\nfrom math import log10\nfrom decimal import *\n\nimport numpy as np\nimport laygo2.object\nimport laygo2.util.transform as tf\n\n__author__ = \"\"\n__maintainer__ = \"\"\n__status__ = \"Prototype\"\n\n\ndef _py2skill_number(value, scale=0.001):\n fmt_str = \"%.\" + \"%d\" % (-1 * log10(scale) + 1) + \"f \" # for truncations\n return fmt_str % (value * scale)\n\n\ndef _py2skill_list(pylist, scale=0.001):\n \"\"\"Convert a python list object to a skill list.\"\"\"\n list_str = \"list( \"\n for item in pylist:\n if isinstance(item, list): # nested list\n list_str += _py2skill_list(item) + \" \"\n elif isinstance(item, np.ndarray): # nested list\n list_str += _py2skill_list(item, scale=scale) + \" \"\n elif isinstance(item, str):\n list_str += \"\\\"\" + str(item) + \"\\\" \"\n elif isinstance(item, int) or isinstance(item, np.integer):\n # fmt_str = \"%.\"+\"%d\" % (-1*log10(scale)+1)+\"f \" # for truncations\n # list_str += fmt_str%(item*scale) + \" \"\n list_str += _py2skill_number(item, scale) + \" \"\n list_str += \")\"\n return list_str\n\n\ndef _translate_obj(objname, obj, scale=0.001, master=None, offset=np.array([0, 0])):\n \"\"\"\n Convert an object to corresponding scale commands.\n offset : np.array([int, int])\n Offsets to obj.xy\n \"\"\"\n if master is None: \n mxy = np.array([0, 0])\n mtf = 'R0'\n else: # if the translated object has a master (e.g. VirtualInstance)\n mxy = master.xy\n mtf = master.transform\n if obj.__class__ == laygo2.object.Rect:\n color = obj.color # coloring func. added\n # Invoke _laygo2_generate_rect( cv layer bbox ) in {header_filename}\n _xy = np.sort(obj.xy, axis=0) # make sure obj.xy is sorted\n _xy = mxy + np.dot(_xy + np.array([[-obj.hextension, -obj.vextension], [obj.hextension, obj.vextension]]),\n tf.Mt(mtf).T)\n #_xy = mxy + np.dot(obj.xy + np.array([[-obj.hextension, -obj.vextension], [obj.hextension, obj.vextension]]),\n # tf.Mt(mtf).T)\n return \"_laygo2_generate_rect(cv, %s, %s, \\\"%s\\\") ; for the Rect object %s \\n\" \\\n % (_py2skill_list(obj.layer), _py2skill_list(_xy, scale=scale), color, objname) # coloring func. added\n elif obj.__class__ == laygo2.object.Path:\n # TODO: implement path export function.\n pass\n elif obj.__class__ == laygo2.object.Pin:\n if obj.elements is None:\n _objelem = [obj]\n else:\n _objelem = obj.elements\n for idx, _obj in np.ndenumerate(_objelem):\n # Invoke _laygo2_generate_pin(cv, name, layer, bbox) in {header_filename}\n _xy = mxy + np.dot(_obj.xy, tf.Mt(mtf).T)\n return \"_laygo2_generate_pin(cv, \\\"%s\\\", %s, %s ) ; for the Pin object %s \\n\" \\\n % (_obj.netname, _py2skill_list(_obj.layer), _py2skill_list(_xy, scale=scale),\n objname)\n elif obj.__class__ == laygo2.object.Text:\n # TODO: implement text export function.\n pass\n elif obj.__class__ == laygo2.object.Instance:\n # Invoke _laygo2_generate_instance( cv name libname cellname viewname loc orient num_rows num_cols\n # sp_rows sp_cols params params_order )\n _xy = mxy + np.dot(obj.xy, tf.Mt(mtf).T)\n if master is None: \n transform = obj.transform\n else: # if the translated object has a master (e.g. VirtualInstance)\n transform = tf.combine(obj.transform, master.transform)\n if obj.shape is None:\n num_rows = 1\n num_cols = 1\n sp_rows = 0\n sp_cols = 0\n else:\n num_rows = obj.shape[1]\n num_cols = obj.shape[0]\n sp_rows = _py2skill_number(obj.pitch[1])\n sp_cols = _py2skill_number(obj.pitch[0])\n return \"_laygo2_generate_instance(cv, \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", %s, \\\"%s\\\", %d, %d, %s, %s, %s, %s) \" \\\n \"; for the Instance object %s \\n\" \\\n % (objname, obj.libname, obj.cellname, obj.viewname, _py2skill_list(_xy), transform,\n num_rows, num_cols, sp_rows, sp_cols, \"nil\", \"nil\", objname)\n elif obj.__class__ == laygo2.object.VirtualInstance:\n cmd = \"\"\n if obj.shape is None:\n for elem_name, elem in obj.native_elements.items():\n if not elem.__class__ == laygo2.object.Pin:\n if obj.name == None:\n obj.name='NoName'\n else:\n pass\n cmd += _translate_obj(obj.name + '_' + elem_name, elem, scale=scale, master=obj)\n else: # arrayed VirtualInstance\n for i, j in np.ndindex(tuple(obj.shape.tolist())): # iterate over obj.shape\n for elem_name, elem in obj.native_elements.items():\n if not elem.__class__ == laygo2.object.Pin:\n cmd += _translate_obj(obj.name + '_' + elem_name + str(i) + '_' + str(j), \n elem, scale=scale, master=obj[i, j]) \n return cmd\n else:\n return obj.translate_to_skill() #\n\n return \"\"\n\ndef export(db, filename=None, cellname=None, scale=1e-3, reset_library=False, tech_library=None):\n \"\"\"\n Export a design object to skill code.\n\n Parameters\n ----------\n db: laygo2.database.Library\n Design to export\n filename: str, optional\n If specified, the generated skill script is stored in filename.\n cellname: str or List[str]\n The name(s) of cell(s) to be exported.\n scale: float\n The scaling factor between laygo2's integer coordinats actual physical coordinates.\n reset_library: bool, optional\n If True, the library to export the cells is reset.\n tech_library: str, optional\n The name of technology library to be attached to the resetted library.\n \"\"\"\n # parse header functions.\n cmd = \"; laygo2 layout export skill script.\\n\\n\"\n import os\n header_filename = os.path.abspath(laygo2.interface.__file__)[:-11] + 'skill_export.il'\n with open(header_filename, 'r') as f:\n cmd += f.read()\n cmd += '\\n'\n\n cellname = db.keys() if cellname is None else cellname # export all cells if cellname is not given.\n cellname = [cellname] if isinstance(cellname, str) else cellname # convert to a list for iteration.\n if reset_library:\n cmd += \"_laygo2_reset_library(\\\"%s\\\" \\\"%s\\\")\\n\" % (db.name, tech_library)\n for cn in cellname:\n cmd += \"\\n; exporting %s__%s\\n\" % (db.name, cn) # open the design.\n logging.debug('Export_to_SKILL: Cellname:' + cn)\n cmd += \"let( (cv) \\n\" # limit the scope of cv\n cmd += \"cv = _laygo2_open_layout(\\\"%s\\\" \\\"%s\\\" \\\"layout\\\")\\n\" % (db.name, cn) # open the design.\n # export objects\n for objname, obj in db[cn].items():\n cmd += _translate_obj(objname, obj, scale=scale)\n cmd += \"_laygo2_save_and_close_layout(cv)\\n\" # close the layout\n cmd += \");let\\n\"\n if filename is not None: # export to a file.\n with open(filename, \"w\") as f:\n f.write(cmd)\n return cmd\n"
}
] | 19 |
zpp360/spider | https://github.com/zpp360/spider | 7ea5862123bc4126a1b53f38bc0758fa8b5d7c95 | d4efc3fad81d0c6bc08d5837e256093c93bb3ce0 | 7e52b6f638cabe11be165ab8bdb1a9f6828b71db | refs/heads/master | 2021-05-14T18:18:07.323301 | 2018-01-03T00:06:41 | 2018-01-03T00:06:41 | 116,070,297 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6413043737411499,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 17.399999618530273,
"blob_id": "5cae5de83a0958d9a8fc806f244153fc612745e2",
"content_id": "9dd9628fcc8e3b788f5362b995c96072fb4b3aea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 5,
"path": "/baike_spider/html_downloader.py",
"repo_name": "zpp360/spider",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\nclass HtmlDownloader(object):\n def download(self, new_url):\n pass\n"
}
] | 1 |
dingchunda/osrm-backend | https://github.com/dingchunda/osrm-backend | eaea3d5b32bdc4e625cb8e28abb22581b1dd3626 | 8750749b83bd9193ca3481c630eefda689ecb73c | 10e199fcc3f754c6e8e45d9e123a35b33fe48d20 | refs/heads/master | 2020-08-10T14:02:32.349802 | 2019-12-17T08:05:37 | 2019-12-17T08:05:37 | 214,356,677 | 0 | 0 | BSD-2-Clause | 2019-10-11T06:09:04 | 2019-11-15T03:47:08 | 2019-11-21T10:01:58 | C++ | [
{
"alpha_fraction": 0.6771300435066223,
"alphanum_fraction": 0.7354260087013245,
"avg_line_length": 23.77777862548828,
"blob_id": "d7f260c0b98618758da0d6cabf9c992a677164c6",
"content_id": "7629515d1302cbede752dbc42664fad5eb5e6cef",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 223,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 9,
"path": "/osrm-ch/profiles/turnbot.lua",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "-- Testbot, with turn penalty\n-- Used for testing turn penalties\n\nrequire 'testbot'\n\nfunction turn_function (angle)\n -- multiplying by 10 converts to deci-seconds see issue #1318\n return 10*20*math.abs(angle)/180\nend\n"
},
{
"alpha_fraction": 0.6492589712142944,
"alphanum_fraction": 0.6610932946205139,
"avg_line_length": 27.715511322021484,
"blob_id": "76aaf0f145cec779b1dbd162d4edce97dfabd880",
"content_id": "c3ab1804a3724df9afcb9019cc07ec66a4f01875",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 36844,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 1283,
"path": "/test/mld.lua",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "-- Car profile\n\napi_version = 4\n\n--local Set \nfunction Set(source)\n set = {}\n if source then\n for i,v in ipairs(source) do\n set[v] = true\n end\n end\n return set\nend\n\n--local find_access_tag = require(\"lib/access\").find_access_tag\nfunction find_access_tag(source,access_tags_hierarchy)\n for i,v in ipairs(access_tags_hierarchy) do\n local tag = source:get_value_by_key(v)\n if tag and tag ~= '' then\n return tag\n end\n end\n return \"\"\nend\n\n--local Sequence \nfunction Sequence(source)\n return source\nend\n\n--local Utils \nUtils = {}\n\n-- split string 'a; b; c' to table with values ['a', 'b', 'c']\n-- so it use just one separator ';'\nfunction Utils.string_list_tokens(str)\n result = {}\n local idx = 0\n for s in str.gmatch(str, \"([^;]*)\") do\n if s ~= nil and s ~= '' then\n idx = idx + 1\n result[idx] = s:gsub(\"^%s*(.-)%s*$\", \"%1\")\n end\n end\n\n return result\nend\n\n-- same as Utils.StringListTokens, but with many possible separators:\n-- ',' | ';' | ' '| '(' | ')'\nfunction Utils.tokenize_common(str)\n result = {}\n local idx = 0\n for s in str.gmatch(str, \"%S+\") do\n if s ~= nil and s ~= '' then\n idx = idx + 1\n result[idx] = s:gsub(\"^%s*(.-)%s*$\", \"%1\")\n end\n end\n\n return result\nend\n\n-- returns true, if string contains a number\nfunction Utils.is_number(str)\n return (tonumber(str) ~= nil)\nend\n\n\n--local tags\nlocal Tags = {}\n\n-- return [forward,backward] values for a specific tag.\n-- e.g. for maxspeed search forward:\n-- maxspeed:forward\n-- maxspeed\n-- and backward:\n-- maxspeed:backward\n-- maxspeed\n\nfunction Tags.get_forward_backward_by_key(way,data,key)\n local forward = way:get_value_by_key(key .. ':forward')\n local backward = way:get_value_by_key(key .. ':backward')\n\n if not forward or not backward then\n local common = way:get_value_by_key(key)\n\n if (data.oneway) then\n if data.is_forward_oneway then\n forward = forward or common\n end\n if data.is_reverse_oneway then\n backward = backward or common\n end\n else\n forward = forward or common\n backward = backward or common\n end\n end\n\n return forward, backward\nend\n\n-- return [forward,backward] values, searching a\n-- prioritized sequence of tags\n-- e.g. for the sequence [maxspeed,advisory] search forward:\n-- maxspeed:forward\n-- maxspeed\n-- advisory:forward\n-- advisory\n-- and for backward:\n-- maxspeed:backward\n-- maxspeed\n-- advisory:backward\n-- advisory\n\nfunction Tags.get_forward_backward_by_set(way,data,keys)\n local forward, backward\n for i,key in ipairs(keys) do\n if not forward then\n forward = way:get_value_by_key(key .. ':forward')\n end\n if not backward then\n backward = way:get_value_by_key(key .. ':backward')\n end\n if not forward or not backward then\n local common = way:get_value_by_key(key)\n forward = forward or common\n backward = backward or common\n end\n if forward and backward then\n break\n end\n end\n\n return forward, backward\nend\n\n-- look through a sequence of keys combined with a prefix\n-- e.g. for the sequence [motorcar,motor_vehicle,vehicle] and the prefix 'oneway' search for:\n-- oneway:motorcar\n-- oneway:motor_vehicle\n-- oneway:vehicle\n\nfunction Tags.get_value_by_prefixed_sequence(way,seq,prefix)\n local v\n for i,key in ipairs(seq) do\n v = way:get_value_by_key(prefix .. ':' .. key)\n if v then\n return v\n end\n end\nend\n\n-- check if key-value pairs are set in a way and return a\n-- corresponding constant if it is. e.g. for this input:\n--\n-- local speeds = {\n-- highway = {\n-- residential = 19.112178856160000,\n-- primary = 40\n-- },\n-- amenity = {\n-- parking = 10\n-- }\n-- }\n--\n-- we would check whether the following key-value combinations\n-- are set, and return the corresponding constant:\n--\n-- highway = residential => 20\n-- highway = primary => 40\n-- amenity = parking => 10\n\nfunction Tags.get_constant_by_key_value(way,lookup)\n for key,set in pairs(lookup) do\n local way_value = way:get_value_by_key(key)\n for value,t in pairs(set) do\n if way_value == value then\n return key,value,t\n end\n end\n end\nend\n\n--local Relations \nRelations = {}\n\nfunction is_direction(role)\n return (role == 'north' or role == 'south' or role == 'west' or role == 'east')\nend\n\n-- match ref values to relations data\nfunction Relations.match_to_ref(relations, ref)\n\n function calculate_scores(refs, tag_value)\n local tag_tokens = Set(Utils.tokenize_common(tag_value))\n local result = {}\n for i, r in ipairs(refs) do\n local ref_tokens = Utils.tokenize_common(r)\n local score = 0\n\n for _, t in ipairs(ref_tokens) do\n if tag_tokens[t] then\n if Utils.is_number(t) then\n score = score + 2\n else\n score = score + 1\n end\n end\n end\n\n result[r] = score\n end\n\n return result\n end\n\n local references = Utils.string_list_tokens(ref)\n local result_match = {}\n local order = {}\n for i, r in ipairs(references) do\n result_match[r] = { forward = nil, backward = nil }\n order[i] = r\n end\n\n for i, rel in ipairs(relations) do\n local name_scores = nil\n local name_tokens = {}\n local route_name = rel[\"route_name\"]\n if route_name then\n name_scores = calculate_scores(references, route_name)\n end\n\n local ref_scores = nil\n local ref_tokens = {}\n local route_ref = rel[\"route_ref\"]\n if route_ref then\n ref_scores = calculate_scores(references, route_ref)\n end\n\n -- merge scores\n local direction = rel[\"route_direction\"]\n if direction then\n local best_score = -1\n local best_ref = nil\n\n function find_best(scores)\n if scores then\n for k ,v in pairs(scores) do\n if v > best_score then\n best_ref = k\n best_score = v\n end\n end\n end\n end\n\n find_best(name_scores)\n find_best(ref_scores)\n\n if best_ref then\n local result_direction = result_match[best_ref]\n\n local is_forward = rel[\"route_forward\"]\n if is_forward == nil then\n result_direction.forward = direction\n result_direction.backward = direction\n elseif is_forward == true then\n result_direction.forward = direction\n else\n result_direction.backward = direction\n end\n\n result_match[best_ref] = result_direction\n end\n end\n\n end\n\n local result = {}\n for i, r in ipairs(order) do\n result[i] = { ref = r, dir = result_match[r] };\n end\n\n return result\nend\n\nfunction get_direction_from_superrel(rel, relations)\n local result = nil\n local result_id = nil\n local rel_id_list = relations:get_relations(rel)\n\n function set_result(direction, current_rel)\n if (result ~= nil) and (direction ~= nil) then\n print('WARNING: relation ' .. rel:id() .. ' is a part of more then one supperrelations ' .. result_id .. ' and ' .. current_rel:id())\n result = nil\n else\n result = direction\n result_id = current_rel:id()\n end\n end\n\n for i, rel_id in ipairs(rel_id_list) do\n local parent_rel = relations:relation(rel_id)\n if parent_rel:get_value_by_key('type') == 'route' then\n local role = parent_rel:get_role(rel)\n\n if is_direction(role) then\n set_result(role, parent_rel)\n else\n local dir = parent_rel:get_value_by_key('direction')\n if is_direction(dir) then\n set_result(dir, parent_rel)\n end\n end\n end\n -- TODO: support forward/backward\n end\n\n return result\nend\n\nfunction Relations.parse_route_relation(rel, way, relations)\n local t = rel:get_value_by_key(\"type\")\n local role = rel:get_role(way)\n local result = {}\n\n function add_extra_data(m)\n local name = rel:get_value_by_key(\"name\")\n if name then\n result['route_name'] = name\n end\n\n local ref = rel:get_value_by_key(\"ref\")\n if ref then\n result['route_ref'] = ref\n end\n end\n\n if t == 'route' then\n local role_direction = nil\n local route = rel:get_value_by_key(\"route\")\n if route == 'road' then\n -- process case, where directions set as role\n if is_direction(role) then\n role_direction = role\n end\n end\n\n local tag_direction = nil\n local direction = rel:get_value_by_key('direction')\n if direction then\n direction = string.lower(direction)\n if is_direction(direction) then\n tag_direction = direction\n end\n end\n\n -- determine direction\n local result_direction = role_direction\n if result_direction == nil and tag_direction ~= '' then\n result_direction = tag_direction\n end\n\n if role_direction ~= nil and tag_direction ~= nil and role_direction ~= tag_direction then\n result_direction = nil\n print('WARNING: conflict direction in role of way ' .. way:id() .. ' and direction tag in relation ' .. rel:id())\n end\n\n\n -- process superrelations\n local super_dir = get_direction_from_superrel(rel, relations)\n\n -- check if there are data error\n if (result_direction ~= nil) and (super_dir ~= nil) and (result_direction ~= super_dir) then\n print('ERROR: conflicting relation directions found for way ' .. way:id() .. \n ' relation direction is ' .. result_direction .. ' superrelation direction is ' .. super_dir)\n result_direction = nil\n elseif result_direction == nil then\n result_direction = super_dir\n end\n\n result['route_direction'] = result_direction\n\n if role == 'forward' then\n result['route_forward'] = true\n elseif role == 'backward' then\n result['route_forward'] = false\n else\n result['route_forward'] = nil\n end\n\n add_extra_data(m)\n end\n\n return result\nend\n\nfunction Relations.process_way_refs(way, relations, result)\n local parsed_rel_list = {}\n local rel_id_list = relations:get_relations(way)\n for i, rel_id in ipairs(rel_id_list) do\n local rel = relations:relation(rel_id)\n parsed_rel_list[i] = Relations.parse_route_relation(rel, way, relations)\n end\n\n -- now process relations data\n local matched_refs = nil;\n if result.ref then\n local match_res = Relations.match_to_ref(parsed_rel_list, result.ref)\n\n function gen_ref(is_forward)\n local ref = ''\n for _, m in pairs(match_res) do\n if ref ~= '' then\n ref = ref .. '; '\n end\n\n local dir = m.dir.forward\n if is_forward == false then\n dir = m.dir.backward\n end\n\n if dir then\n ref = ref .. m.ref .. ' $' .. dir\n else\n ref = ref .. m.ref\n end\n end\n\n return ref\n end\n\n result.forward_ref = gen_ref(true)\n result.backward_ref = gen_ref(false)\n end\nend\n\n--local Guidance\n\n-- Guidance: Default Mapping from roads to types/priorities\nhighway_classes = {\n motorway = road_priority_class.motorway,\n motorway_link = road_priority_class.motorway_link,\n trunk = road_priority_class.trunk,\n trunk_link = road_priority_class.trunk_link,\n primary = road_priority_class.primary,\n primary_link = road_priority_class.primary_link,\n secondary = road_priority_class.secondary,\n secondary_link = road_priority_class.secondary_link,\n tertiary = road_priority_class.tertiary,\n tertiary_link = road_priority_class.tertiary_link,\n unclassified = road_priority_class.unclassified,\n residential = road_priority_class.main_residential,\n service = road_priority_class.alley,\n living_street = road_priority_class.side_residential,\n track = road_priority_class.bike_path,\n path = road_priority_class.bike_path,\n footway = road_priority_class.foot_path,\n pedestrian = road_priority_class.foot_path,\n steps = road_priority_class.foot_path\n}\n\ndefault_highway_class = road_priority_class.connectivity;\n\nmotorway_types = Set {\n 'motorway',\n 'motorway_link',\n 'trunk',\n 'trunk_link'\n}\n\n-- these road types are set with a car in mind. For bicycle/walk we probably need different ones\nroad_types = Set {\n 'motorway',\n 'motorway_link',\n 'trunk',\n 'trunk_link',\n 'primary',\n 'primary_link',\n 'secondary',\n 'secondary_link',\n 'tertiary',\n 'tertiary_link',\n 'unclassified',\n 'residential',\n 'living_street'\n}\n\nlink_types = Set {\n 'motorway_link',\n 'trunk_link',\n 'primary_link',\n 'secondary_link',\n 'tertiary_link'\n}\n\n-- returns forward,backward psv lane count\nlocal function get_psv_counts(way,data)\n local psv_forward, psv_backward = Tags.get_forward_backward_by_key(way,data,'lanes:psv')\n if psv_forward then\n psv_forward = tonumber(psv_forward)\n end\n if psv_backward then\n psv_backward = tonumber(psv_backward)\n end\n return psv_forward or 0,\n psv_backward or 0\nend\n\n-- trims lane string with regard to supported lanes\nlocal function process_lanes(turn_lanes,vehicle_lanes,first_count,second_count)\n if turn_lanes then\n if vehicle_lanes then\n return applyAccessTokens(turn_lanes,vehicle_lanes)\n elseif first_count ~= 0 or second_count ~= 0 then\n return trimLaneString(turn_lanes, first_count, second_count)\n else\n return turn_lanes\n end\n end\nend\n\n-- this is broken for left-sided driving. It needs to switch left and right in case of left-sided driving\nfunction get_turn_lanes(way,data)\n local psv_fw, psv_bw = get_psv_counts(way,data)\n local turn_lanes_fw, turn_lanes_bw = Tags.get_forward_backward_by_key(way,data,'turn:lanes')\n local vehicle_lanes_fw, vehicle_lanes_bw = Tags.get_forward_backward_by_key(way,data,'vehicle:lanes')\n\n --note: backward lanes swap psv_bw and psv_fw\n return process_lanes(turn_lanes_fw,vehicle_lanes_fw,psv_bw,psv_fw) or turn_lanes,\n process_lanes(turn_lanes_bw,vehicle_lanes_bw,psv_fw,psv_bw) or turn_lanes\nend\n\nfunction set_classification (highway, result, input_way)\n if motorway_types[highway] then\n result.road_classification.motorway_class = true\n end\n if link_types[highway] then\n result.road_classification.link_class = true\n end\n\n if highway_classes[highway] ~= nil then\n result.road_classification.road_priority_class = highway_classes[highway]\n else\n result.road_classification.road_priority_class = default_highway_class\n end\n \n if road_types[highway] then\n result.road_classification.may_be_ignored = false;\n else\n result.road_classification.may_be_ignored = true;\n end\n\n local lane_count = input_way:get_value_by_key(\"lanes\")\n if lane_count then\n local lc = tonumber(lane_count)\n if lc ~= nil then\n result.road_classification.num_lanes = lc\n end\n else\n local total_count = 0\n local forward_count = input_way:get_value_by_key(\"lanes:forward\")\n if forward_count then\n local fc = tonumber(forward_count)\n if fc ~= nil then\n total_count = fc\n end\n end\n local backward_count = input_way:get_value_by_key(\"lanes:backward\")\n if backward_count then\n local bc = tonumber(backward_count)\n if bc ~= nil then\n total_count = total_count + bc\n end\n end\n if total_count ~= 0 then\n result.road_classification.num_lanes = total_count\n end\n end\nend\n\n--local Destination\nfunction get_directional_tag(way, is_forward, tag)\n local v\n if is_forward then\n v = way:get_value_by_key(tag .. ':forward') or way:get_value_by_key(tag)\n else\n v = way:get_value_by_key(tag .. ':backward') or way:get_value_by_key(tag)\n end\n if v then\n return v.gsub(v, ';', ', ')\n end\nend\n\n-- Assemble destination as: \"A59: Düsseldorf, Köln\"\n-- destination:ref ^ ^ destination\n\nfunction get_destination(way, is_forward)\n ref = get_directional_tag(way, is_forward, 'destination:ref')\n dest = get_directional_tag(way, is_forward, 'destination')\n street = get_directional_tag(way, is_forward, 'destination:street')\n if ref and dest then\n return ref .. ': ' .. dest\n else\n return ref or dest or street or ''\n end\nend\n\n--local WayHandlers\nWayHandlers = {}\n\n-- set default mode\nfunction WayHandlers.default_mode(profile,way,result,data)\n result.forward_mode = profile.default_mode\n result.backward_mode = profile.default_mode\nend\n\n-- handles name, including ref and pronunciation\nfunction WayHandlers.names(profile,way,result,data)\n -- parse the remaining tags\n local name = way:get_value_by_key(\"name\")\n local pronunciation = way:get_value_by_key(\"name:pronunciation\")\n local ref = way:get_value_by_key(\"ref\")\n local exits = way:get_value_by_key(\"junction:ref\")\n\n -- Set the name that will be used for instructions\n if name then\n result.name = name\n end\n\n if ref then\n result.ref = canonicalizeStringList(ref, \";\")\n end\n\n if pronunciation then\n result.pronunciation = pronunciation\n end\n\n if exits then\n result.exits = canonicalizeStringList(exits, \";\")\n end\nend\n\n-- junctions\nfunction WayHandlers.roundabouts(profile,way,result,data)\n local junction = way:get_value_by_key(\"junction\");\n\n if junction == \"roundabout\" then\n result.roundabout = true\n end\nend\n\n-- determine if this way can be used as a start/end point for routing\nfunction WayHandlers.startpoint(profile,way,result,data)\n result.is_startpoint = result.forward_mode == profile.default_mode or\n result.backward_mode == profile.default_mode\nend\n\n-- handle turn lanes\nfunction WayHandlers.turn_lanes(profile,way,result,data)\n local forward, backward = get_turn_lanes(way,data)\n\n if forward then\n result.turn_lanes_forward = forward\n end\n\n if backward then\n result.turn_lanes_backward = backward\n end\nend\n\n-- set the road classification based on guidance globals configuration\nfunction WayHandlers.classification(profile,way,result,data)\n set_classification(data.highway,result,way)\nend\n\n-- handle destination tags\nfunction WayHandlers.destinations(profile,way,result,data)\n if data.is_forward_oneway or data.is_reverse_oneway then\n local destination = get_destination(way, data.is_forward_oneway)\n result.destinations = canonicalizeStringList(destination, \",\")\n end\nend\n\n-- handling ferries and piers\nfunction WayHandlers.ferries(profile,way,result,data)\n local route = data.route\n if route then\n local route_speed = profile.route_speeds[route]\n if route_speed and route_speed > 0 then\n local duration = way:get_value_by_key(\"duration\")\n if duration and durationIsValid(duration) then\n result.duration = math.max(parseDuration(duration), 1 )\n end\n result.forward_mode = mode.ferry\n result.backward_mode = mode.ferry\n result.forward_speed = route_speed\n result.backward_speed = route_speed\n end\n end\nend\n\n-- handling movable bridges\nfunction WayHandlers.movables(profile,way,result,data)\n local bridge = data.bridge\n if bridge then\n local bridge_speed = profile.bridge_speeds[bridge]\n if bridge_speed and bridge_speed > 0 then\n local capacity_car = way:get_value_by_key(\"capacity:car\")\n if capacity_car ~= 0 then\n result.forward_mode = profile.default_mode\n result.backward_mode = profile.default_mode\n local duration = way:get_value_by_key(\"duration\")\n if duration and durationIsValid(duration) then\n result.duration = math.max( parseDuration(duration), 1 )\n else\n result.forward_speed = bridge_speed\n result.backward_speed = bridge_speed\n end\n end\n end\n end\nend\n\n-- service roads\nfunction WayHandlers.service(profile,way,result,data)\n local service = way:get_value_by_key(\"service\")\n if service then\n -- Set don't allow access to certain service roads\n if profile.service_tag_forbidden[service] then\n result.forward_mode = mode.inaccessible\n result.backward_mode = mode.inaccessible\n return false\n end\n end\nend\n\n-- check accessibility by traversing our access tag hierarchy\nfunction WayHandlers.access(profile,way,result,data)\n data.forward_access, data.backward_access =\n Tags.get_forward_backward_by_set(way,data,profile.access_tags_hierarchy)\n\n -- only allow a subset of roads to be treated as restricted\n if profile.restricted_highway_whitelist[data.highway] then\n if profile.restricted_access_tag_list[data.forward_access] then\n result.forward_restricted = true\n end\n\n if profile.restricted_access_tag_list[data.backward_access] then\n result.backward_restricted = true\n end\n end\n\n -- blacklist access tags that aren't marked as restricted\n if profile.access_tag_blacklist[data.forward_access] and not result.forward_restricted then\n result.forward_mode = mode.inaccessible\n end\n\n if profile.access_tag_blacklist[data.backward_access] and not result.backward_restricted then\n result.backward_mode = mode.inaccessible\n end\n\n if result.forward_mode == mode.inaccessible and result.backward_mode == mode.inaccessible then\n return false\n end\nend\n\n-- handle speed (excluding maxspeed)\nfunction WayHandlers.speed(profile,way,result,data)\n if result.forward_speed ~= -1 then\n return -- abort if already set, eg. by a route\n end\n\n local key,value,speed = Tags.get_constant_by_key_value(way,profile.speeds)\n \n if speed then\n -- set speed by way type\n result.forward_speed = speed\n result.backward_speed = speed\n else\n -- Set the avg speed on ways that are marked accessible\n if profile.access_tag_whitelist[data.forward_access] then\n result.forward_speed = profile.default_speed\n elseif data.forward_access and not profile.access_tag_blacklist[data.forward_access] then\n result.forward_speed = profile.default_speed -- fallback to the avg speed if access tag is not blacklisted\n elseif not data.forward_access and data.backward_access then\n result.forward_mode = mode.inaccessible\n end\n \n if profile.access_tag_whitelist[data.backward_access] then\n result.backward_speed = profile.default_speed\n elseif data.backward_access and not profile.access_tag_blacklist[data.backward_access] then\n result.backward_speed = profile.default_speed -- fallback to the avg speed if access tag is not blacklisted\n elseif not data.backward_access and data.forward_access then\n result.backward_mode = mode.inaccessible\n end\n end\n\n if data.forward_access == \"private\" then\n result.forward_speed = profile.private_speed\n end\n\n if data.backward_access == \"private\" then\n result.backward_speed = profile.private_speed\n end\n\n if result.forward_speed == -1 and result.backward_speed == -1 and result.duration <= 0 then\n return false\n end\nend\n\n-- add class information\nfunction WayHandlers.classes(profile,way,result,data)\n if not profile.classes then\n return\n end\n\n local allowed_classes = Set {}\n for k, v in pairs(profile.classes) do\n allowed_classes[v] = true\n end\n\n local forward_toll, backward_toll = Tags.get_forward_backward_by_key(way, data, \"toll\")\n local forward_route, backward_route = Tags.get_forward_backward_by_key(way, data, \"route\")\n local tunnel = way:get_value_by_key(\"tunnel\")\n\n if allowed_classes[\"tunnel\"] and tunnel and tunnel ~= \"no\" then\n result.forward_classes[\"tunnel\"] = true\n result.backward_classes[\"tunnel\"] = true\n end\n\n if allowed_classes[\"toll\"] and forward_toll == \"yes\" then\n result.forward_classes[\"toll\"] = true\n end\n if allowed_classes[\"toll\"] and backward_toll == \"yes\" then\n result.backward_classes[\"toll\"] = true\n end\n\n if allowed_classes[\"ferry\"] and forward_route == \"ferry\" then\n result.forward_classes[\"ferry\"] = true\n end\n if allowed_classes[\"ferry\"] and backward_route == \"ferry\" then\n result.backward_classes[\"ferry\"] = true\n end\n\n if allowed_classes[\"restricted\"] and result.forward_restricted then\n result.forward_classes[\"restricted\"] = true\n end\n if allowed_classes[\"restricted\"] and result.backward_restricted then\n result.backward_classes[\"restricted\"] = true\n end\n\n if allowed_classes[\"motorway\"] and (data.highway == \"motorway\" or data.highway == \"motorway_link\") then\n result.forward_classes[\"motorway\"] = true\n result.backward_classes[\"motorway\"] = true\n end\nend\n\n-- scale speeds to get better average driving times\nfunction WayHandlers.penalties(profile,way,result,data)\n if profile.properties.weight_name == 'routability' then\n if result.forward_speed > 0 then\n result.forward_rate = (result.forward_speed) / 3.6\n end\n if result.backward_speed > 0 then\n result.backward_rate = (result.backward_speed) / 3.6\n end\n if result.duration > 0 then\n result.weight = result.duration \n end\n end\nend\n\n\n-- handle oneways tags\nfunction WayHandlers.oneway(profile,way,result,data)\n if not profile.oneway_handling then\n return\n end\n\n local oneway\n if profile.oneway_handling == true then\n oneway = Tags.get_value_by_prefixed_sequence(way,profile.restrictions,'oneway') or way:get_value_by_key(\"oneway\")\n elseif profile.oneway_handling == 'specific' then\n oneway = Tags.get_value_by_prefixed_sequence(way,profile.restrictions,'oneway')\n elseif profile.oneway_handling == 'conditional' then\n -- Following code assumes that `oneway` and `oneway:conditional` tags have opposite values and takes weakest (always `no`).\n -- So if we will have:\n -- oneway=yes, oneway:conditional=no @ (condition1)\n -- oneway=no, oneway:conditional=yes @ (condition2)\n -- condition1 will be always true and condition2 will be always false.\n if way:get_value_by_key(\"oneway:conditional\") then\n oneway = \"no\"\n else\n oneway = Tags.get_value_by_prefixed_sequence(way,profile.restrictions,'oneway') or way:get_value_by_key(\"oneway\")\n end\n end\n\n data.oneway = oneway\n\n if oneway == \"-1\" then\n data.is_reverse_oneway = true\n result.forward_mode = mode.inaccessible\n elseif oneway == \"yes\" or\n oneway == \"1\" or\n oneway == \"true\" then\n data.is_forward_oneway = true\n result.backward_mode = mode.inaccessible\n elseif profile.oneway_handling == true then\n local junction = way:get_value_by_key(\"junction\")\n if data.highway == \"motorway\" or\n junction == \"roundabout\" or\n junction == \"circular\" then\n if oneway ~= \"no\" then\n -- implied oneway\n data.is_forward_oneway = true\n result.backward_mode = mode.inaccessible\n end\n end\n end\nend\n\nfunction WayHandlers.weights(profile,way,result,data)\n if profile.properties.weight_name == 'distance' then\n result.weight = -1\n -- set weight rates to 1 for the distance weight, edge weights are distance / rate\n if (result.forward_mode ~= mode.inaccessible and result.forward_speed > 0) then\n result.forward_rate = 1\n end\n if (result.backward_mode ~= mode.inaccessible and result.backward_speed > 0) then\n result.backward_rate = 1\n end\n end\nend\n\n-- Call a sequence of handlers, aborting in case a handler returns false. Example:\n--\n-- handlers = Sequence {\n-- WayHandlers.tag_prefetch,\n-- WayHandlers.default_mode,\n-- WayHandlers.blocked_ways,\n-- WayHandlers.access,\n-- WayHandlers.speed,\n-- WayHandlers.names\n-- }\n--\n-- WayHandlers.run(handlers,way,result,data,profile)\n--\n-- Each method in the list will be called on the WayHandlers object.\n-- All handlers must accept the parameteres (profile, way, result, data, relations) and return false\n-- if the handler chain should be aborted.\n-- To ensure the correct order of method calls, use a Sequence of handler names.\n\nfunction WayHandlers.run(profile, way, result, data, handlers, relations)\n for i,handler in ipairs(handlers) do\n if handler(profile, way, result, data, relations) == false then\n return false\n end\n end\nend\n\nspeed_profile = {\n [\"motorway\"] = 50.210252228560002,\n [\"motorway_link\"] = 30.442746367840002,\n [\"trunk\"] = 38.910443051919998,\n [\"trunk_link\"] = 41.463545034719999,\n [\"primary\"] = 30.121716837280001,\n [\"primary_link\"] = 26.335761231272002,\n [\"secondary\"] = 26.828629389463998,\n [\"secondary_link\"] = 25.553861161440000,\n [\"tertiary\"] = 24.271965765887998,\n [\"tertiary_link\"] = 22.194191398904000,\n [\"unclassified\"] = 26.588079653855999,\n [\"residential\"] = 19.112178856160000,\n [\"living_street\"] = 7.616016031872000,\n [\"service\"] = 20.310053947280000,\n [\"movable\"] = 14.000000000000000,\n [\"shuttle_train\"] = 19.000000000000000,\n [\"default\"] = 11.000000000000000,\n [\"private\"] = 8.000000000000000\n -- Tuningid = lua, date = 2019-09-04\n}\n\nfunction setup()\n return {\n properties = {\n max_speed_for_map_matching = 180/3.6, -- 180kmph -> m/s\n -- For routing based on duration, but weighted for preferring certain roads\n weight_name = 'routability',\n -- For shortest duration without penalties for accessibility\n -- weight_name = 'duration',\n -- For shortest distance without penalties for accessibility\n -- weight_name = 'distance',\n process_call_tagless_node = false,\n u_turn_penalty = 20,\n continue_straight_at_waypoint = true,\n use_turn_restrictions = true,\n left_hand_driving = false,\n traffic_light_penalty = 2,\n },\n\n default_mode = mode.driving,\n default_speed = speed_profile.default,\n private_speed = speed_profile.private,\n oneway_handling = true,\n turn_penalty = 7.5,\n cardinal_directions = false,\n\n -- a list of suffixes to suppress in name change instructions. The suffixes also include common substrings of each other\n suffix_list = {\n 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'North', 'South', 'West', 'East', 'Nor', 'Sou', 'We', 'Ea'\n },\n\n barrier_whitelist = Set {\n 'cattle_grid',\n 'border_control',\n 'checkpoint',\n 'toll_booth',\n 'sally_port',\n 'gate',\n 'lift_gate',\n 'no',\n 'entrance'\n },\n\n access_tag_whitelist = Set {\n 'yes',\n 'private',\n 'motorcar',\n 'motor_vehicle',\n 'vehicle',\n 'permissive',\n 'designated',\n 'destination'\n },\n\n access_tag_blacklist = Set {\n 'no',\n 'agricultural',\n 'forestry',\n 'emergency',\n 'psv',\n 'delivery'\n },\n\n restricted_access_tag_list = Set {\n 'delivery',\n 'destination'\n },\n\n access_tags_hierarchy = Sequence {\n 'motorcar',\n 'motor_vehicle',\n 'vehicle',\n 'access'\n },\n\n service_tag_forbidden = Set {\n 'emergency_access'\n },\n\n restrictions = Sequence {\n 'motorcar',\n 'motor_vehicle',\n 'vehicle'\n },\n\n classes = Sequence {\n 'toll', 'motorway', 'ferry', 'restricted', 'tunnel'\n },\n\n speeds = Sequence {\n highway = speed_profile\n },\n\n restricted_highway_whitelist = Set {\n 'motorway',\n 'motorway_link',\n 'trunk',\n 'trunk_link',\n 'primary',\n 'primary_link',\n 'secondary',\n 'secondary_link',\n 'tertiary',\n 'tertiary_link',\n 'unclassified',\n 'residential',\n 'living_street',\n 'service'\n },\n\n route_speeds = {\n ferry = 5,\n shuttle_train = speed_profile.shuttle_train\n },\n\n bridge_speeds = {\n movable = speed_profile.movable\n },\n relation_types = Sequence {\n \"route\"\n },\n }\nend\n\nfunction process_node(profile, node, result, relations)\n -- parse access and barrier tags\n local access = find_access_tag(node, profile.access_tags_hierarchy)\n if access and access ~= \"\" then\n if profile.access_tag_blacklist[access] then\n result.barrier = true\n end\n else\n local barrier = node:get_value_by_key(\"barrier\")\n if barrier and \"\" ~= barrier then\n -- make an exception for rising bollard barriers\n local bollard = node:get_value_by_key(\"bollard\")\n local rising_bollard = bollard and \"rising\" == bollard\n\n if not profile.barrier_whitelist[barrier] and not rising_bollard then\n result.barrier = true\n end\n end\n end\n\n -- check if node is a traffic light\n local tag = node:get_value_by_key(\"highway\")\n if \"traffic_signals\" == tag then\n result.traffic_lights = true\n end\nend\n\nfunction process_way(profile, way, result, relations)\n -- the intial filtering of ways based on presence of tags\n -- affects processing times significantly, because all ways\n -- have to be checked.\n -- to increase performance, prefetching and intial tag check\n -- is done in directly instead of via a handler.\n\n -- in general we should try to abort as soon as\n -- possible if the way is not routable, to avoid doing\n -- unnecessary work. this implies we should check things that\n -- commonly forbids access early, and handle edge cases later.\n\n -- data table for storing intermediate values during processing\n local data = {\n -- prefetch tags\n impassable = way:get_value_by_key(\"impassable\"),\n status = way:get_value_by_key(\"status\"),\n railway = way:get_value_by_key('railway'),\n highway = way:get_value_by_key('highway'),\n bridge = way:get_value_by_key('bridge'),\n route = way:get_value_by_key('route'),\n oneway = way:get_value_by_key('oneway'),\n area = way:get_value_by_key('area')\n }\n\n -- perform an quick initial check and abort if the way is\n -- obviously not routable.\n -- highway or route tags must be in data table, bridge is optional\n if (not data.highway or data.highway == '') and data.bridge and data.bridge ~= ''then\n return\n end\n\n if (not data.highway or data.highway == '') and\n (not data.route or data.route == '') then\n return\n end\n\n if data.route and data.route == 'ferry' then\n local motor_vehicle = way:get_value_by_key(\"motor_vehicle\")\n local motorcar = way:get_value_by_key(\"motorcar\")\n if not ((motor_vehicle and motor_vehicle == \"yes\") or (motorcar and motorcar == \"yes\")) then\n return\n end\n end\n\n -- \"route\" key whitelist: [\"road\", \"bus\", \"ferry\"]\n if data.route and data.route ~= '' and not ('road' == data.route or 'bus' == data.route or 'ferry' == data.route) then\n return\n end\n\n if data.highway == 'track' or data.highway == 'path' or data.highway == 'footway' \n or data.highway == 'pedestrian' or data.highway == 'steps' or data.highway == 'bus_stop' or data.highway == 'cycleway' then\n return\n end \n\n if data.highway == 'construction' or data.railway == 'construction' then\n return\n end\n\n if data.impassable == 'yes' or data.status == 'impassable' then\n return\n end\n\n if data.area == 'yes' then\n return\n end\n\n handlers = Sequence {\n -- set the default mode for this profile. if can be changed later\n -- in case it turns we're e.g. on a ferry\n WayHandlers.default_mode,\n\n -- determine access status by checking our hierarchy of\n -- access tags, e.g: motorcar, motor_vehicle, vehicle\n WayHandlers.access,\n\n -- check whether forward/backward directions are routable\n WayHandlers.oneway,\n\n -- check a road's destination\n WayHandlers.destinations,\n\n -- check whether we're using a special transport mode\n WayHandlers.ferries,\n WayHandlers.movables,\n\n -- handle service road restrictions\n WayHandlers.service,\n\n -- compute speed taking into account way type, maxspeed tags, etc.\n WayHandlers.speed,\n WayHandlers.penalties,\n\n -- compute class labels\n WayHandlers.classes,\n\n -- handle turn lanes and road classification, used for guidance\n WayHandlers.turn_lanes,\n WayHandlers.classification,\n\n -- handle various other flags\n WayHandlers.roundabouts,\n WayHandlers.startpoint,\n\n -- set name, ref and pronunciation\n WayHandlers.names,\n\n -- set weight properties of the way\n WayHandlers.weights,\n }\n\n WayHandlers.run(profile, way, result, data, handlers, relations)\n\n if profile.cardinal_directions then\n Relations.process_way_refs(way, relations, result)\n end\nend\n\nfunction process_turn(profile, turn)\n -- Use a sigmoid function to return a penalty that maxes out at turn_penalty\n -- over the space of 0-180 degrees. Values here were chosen by fitting\n -- the function to some turn penalty samples from real driving.\n local turn_penalty = 7.5\n local turn_bias = 1.075\n if turn.has_traffic_light then\n turn.weight = profile.properties.traffic_light_penalty\n end\n\n if turn.number_of_roads > 2 or turn.source_mode ~= turn.target_mode or turn.is_u_turn then\n if turn.angle>=0 then\n turn.weight = turn.weight + turn_penalty / (1 + 2.718 ^ - ((13 / turn_bias) * turn.angle/180 - 6.5*turn_bias))\n else\n turn.weight = turn.weight + turn_penalty / (1 + 2.718 ^ - ((13 * turn_bias) * - turn.angle/180 - 6.5/turn_bias))\n end\n if turn.is_u_turn then\n turn.weight = turn.weight + profile.properties.u_turn_penalty\n end\n end\n turn.duration = turn.weight\nend\n\nreturn {\n setup = setup,\n process_way = process_way,\n process_node = process_node,\n process_turn = process_turn\n}\n"
},
{
"alpha_fraction": 0.6827557682991028,
"alphanum_fraction": 0.683993399143219,
"avg_line_length": 35.17910385131836,
"blob_id": "f910306c881a5f5ff7eb4f6bdf5cc5089c572f7e",
"content_id": "2f69044d1c68c8305d42a0ec5eec73b7773ae0b4",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2424,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 67,
"path": "/include/guidance/serialization.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_GUIDANCE_IO_HPP\n#define OSRM_GUIDANCE_IO_HPP\n\n#include \"guidance/turn_data_container.hpp\"\n\n#include \"storage/serialization.hpp\"\n#include \"storage/tar.hpp\"\n\n#include <boost/assert.hpp>\n\n#include \"../../../src/protobuf/edge-based-graph.pb.h\"\n\n\nnamespace osrm\n{\nnamespace guidance\n{\nnamespace serialization\n{\n\n// read/write for turn data file\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n guidance::detail::TurnDataContainerImpl<Ownership> &turn_data_container)\n{\n storage::serialization::read(\n reader, name + \"/turn_instructions\", turn_data_container.turn_instructions);\n storage::serialization::read(\n reader, name + \"/lane_data_ids\", turn_data_container.lane_data_ids);\n storage::serialization::read(\n reader, name + \"/entry_class_ids\", turn_data_container.entry_class_ids);\n storage::serialization::read(\n reader, name + \"/pre_turn_bearings\", turn_data_container.pre_turn_bearings);\n storage::serialization::read(\n reader, name + \"/post_turn_bearings\", turn_data_container.post_turn_bearings);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const guidance::detail::TurnDataContainerImpl<Ownership> &turn_data_container)\n{\n storage::serialization::write(\n writer, name + \"/turn_instructions\", turn_data_container.turn_instructions);\n storage::serialization::write(\n writer, name + \"/lane_data_ids\", turn_data_container.lane_data_ids);\n storage::serialization::write(\n writer, name + \"/entry_class_ids\", turn_data_container.entry_class_ids);\n storage::serialization::write(\n writer, name + \"/pre_turn_bearings\", turn_data_container.pre_turn_bearings);\n storage::serialization::write(\n writer, name + \"/post_turn_bearings\", turn_data_container.post_turn_bearings);\n\n std::cout << \"#### turn instruction: \" << turn_data_container.turn_instructions.size() << std::endl;\n pbebg::TurnInstructions pb_instructions;\n for (auto i : turn_data_container.turn_instructions){\n pb_instructions.add_turn_instruction(i.pack_to_uint32());\n }\n std::fstream pb_out(\"1.ebg.turn.instruction.pb\", std::ios::out | std::ios::binary);\n pb_instructions.SerializeToOstream(&pb_out);\n}\n}\n}\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.6494552493095398,
"alphanum_fraction": 0.6499289274215698,
"avg_line_length": 37.733943939208984,
"blob_id": "7bd8b2c8f11288919d9a45fca59a57430b7c4d12",
"content_id": "50a91d4fd55caa0ba879b411497cfdcf4c0af1e4",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4222,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 109,
"path": "/include/customizer/serialization.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_CUSTOMIZER_SERIALIZATION_HPP\n#define OSRM_CUSTOMIZER_SERIALIZATION_HPP\n\n#include \"customizer/edge_based_graph.hpp\"\n\n#include \"partitioner/cell_storage.hpp\"\n\n#include \"storage/serialization.hpp\"\n#include \"storage/shared_memory_ownership.hpp\"\n#include \"storage/tar.hpp\"\n\n#include \"../../../src/protobuf/mld.pb.h\"\n\n\nnamespace osrm\n{\nnamespace customizer\n{\nnamespace serialization\n{\n\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::CellMetricImpl<Ownership> &metric)\n{\n storage::serialization::read(reader, name + \"/weights\", metric.weights);\n storage::serialization::read(reader, name + \"/durations\", metric.durations);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::CellMetricImpl<Ownership> &metric)\n{\n storage::serialization::write(writer, name + \"/weights\", metric.weights);\n storage::serialization::write(writer, name + \"/durations\", metric.durations);\n}\n\ntemplate <typename EdgeDataT, storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n MultiLevelGraph<EdgeDataT, Ownership> &graph)\n{\n storage::serialization::read(reader, name + \"/node_array\", graph.node_array);\n storage::serialization::read(reader, name + \"/node_weights\", graph.node_weights);\n storage::serialization::read(reader, name + \"/node_durations\", graph.node_durations);\n storage::serialization::read(reader, name + \"/edge_array\", graph.edge_array);\n storage::serialization::read(reader, name + \"/is_forward_edge\", graph.is_forward_edge);\n storage::serialization::read(reader, name + \"/is_backward_edge\", graph.is_backward_edge);\n storage::serialization::read(reader, name + \"/node_to_edge_offset\", graph.node_to_edge_offset);\n}\n\ntemplate <typename EdgeDataT, storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const MultiLevelGraph<EdgeDataT, Ownership> &graph)\n{\n storage::serialization::write(writer, name + \"/node_array\", graph.node_array);\n storage::serialization::write(writer, name + \"/node_weights\", graph.node_weights);\n storage::serialization::write(writer, name + \"/node_durations\", graph.node_durations);\n storage::serialization::write(writer, name + \"/edge_array\", graph.edge_array);\n storage::serialization::write(writer, name + \"/is_forward_edge\", graph.is_forward_edge);\n storage::serialization::write(writer, name + \"/is_backward_edge\", graph.is_backward_edge);\n storage::serialization::write(writer, name + \"/node_to_edge_offset\", graph.node_to_edge_offset);\n\n std::cout << \"#### query graph: node array: \" << graph.node_array.size()\n << \" node_weights: \" << graph.node_weights.size()\n << \" edge_array: \" << graph.edge_array.size()\n << \" is_forward_edge: \" << graph.is_forward_edge.size()\n << \" is_backward_edge: \" << graph.is_backward_edge.size()\n << \" node_to_edge_offset: \" << graph.node_to_edge_offset.size()\n << \" num levels: \" << (int)graph.node_to_edge_offset.back()\n << std::endl;\n pbmld::QueryGraph pb_graph;\n\n for (auto i : graph.node_array){\n pb_graph.add_nodes(i.first_edge);\n }\n\n int index = 0;\n for (auto i : graph.edge_array){\n auto pb_edge = pb_graph.add_edges();\n pb_edge->set_target(i.target);\n pb_edge->set_turn_id(i.data.turn_id);\n pb_edge->set_is_forward(graph.is_forward_edge[index]);\n pb_edge->set_is_backward(graph.is_backward_edge[index]);\n index++;\n }\n\n for (auto i : graph.node_to_edge_offset){\n pb_graph.add_node_level_offset((unsigned int)i);\n }\n\n for (auto i : graph.node_durations){\n pb_graph.add_node_distances(i);\n }\n for (auto i : graph.node_weights){\n pb_graph.add_node_weights(i);\n }\n\n std::fstream pb_out(\"1.mld.graph.pb\", std::ios::out | std::ios::binary);\n pb_graph.SerializeToOstream(&pb_out);\n}\n}\n}\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.728863000869751,
"alphanum_fraction": 0.728863000869751,
"avg_line_length": 23.5,
"blob_id": "7937f5fa248a057befc85c25825558c1edac4b85",
"content_id": "0cb7f84d7df8ea989c59892cd8dd784e6e5e709c",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 686,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 28,
"path": "/osrm-ch/include/storage/shared_barriers.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef SHARED_BARRIERS_HPP\n#define SHARED_BARRIERS_HPP\n\n#include <boost/interprocess/sync/named_condition.hpp>\n#include <boost/interprocess/sync/named_mutex.hpp>\n#include <boost/interprocess/sync/named_sharable_mutex.hpp>\n\nnamespace osrm\n{\nnamespace storage\n{\nstruct SharedBarriers\n{\n\n SharedBarriers()\n : pending_update_mutex(boost::interprocess::open_or_create, \"pending_update\"),\n query_mutex(boost::interprocess::open_or_create, \"query\")\n {\n }\n\n // Mutex to protect access to the boolean variable\n boost::interprocess::named_mutex pending_update_mutex;\n boost::interprocess::named_sharable_mutex query_mutex;\n};\n}\n}\n\n#endif // SHARED_BARRIERS_HPP\n"
},
{
"alpha_fraction": 0.7195122241973877,
"alphanum_fraction": 0.7195122241973877,
"avg_line_length": 26.5,
"blob_id": "96bb1281b643474ae7176a13654ca959ebc840cf",
"content_id": "631164fc789214af5f09468d16f65f7a50724e37",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 164,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 6,
"path": "/osrm-ch/src/protobuf/gen.sh",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nprotoc --cpp_out=. node-based-graph.proto\nprotoc --cpp_out=. edge-based-graph.proto\nprotoc --cpp_out=. query-graph.proto\nprotoc --cpp_out=. rtree.proto"
},
{
"alpha_fraction": 0.6568174958229065,
"alphanum_fraction": 0.659510612487793,
"avg_line_length": 36.6695671081543,
"blob_id": "3259089d7f569734844cea5a1418db60326da267",
"content_id": "4a1170b6495daae13eb0fa124aa776ad82f52ee6",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 12996,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 345,
"path": "/include/extractor/serialization.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_EXTRACTOR_IO_HPP\n#define OSRM_EXTRACTOR_IO_HPP\n\n#include \"extractor/conditional_turn_penalty.hpp\"\n#include \"extractor/datasources.hpp\"\n#include \"extractor/edge_based_edge.hpp\"\n#include \"extractor/intersection_bearings_container.hpp\"\n#include \"extractor/maneuver_override.hpp\"\n#include \"extractor/name_table.hpp\"\n#include \"extractor/nbg_to_ebg.hpp\"\n#include \"extractor/node_data_container.hpp\"\n#include \"extractor/profile_properties.hpp\"\n#include \"extractor/restriction.hpp\"\n#include \"extractor/segment_data_container.hpp\"\n\n#include \"storage/io.hpp\"\n#include \"storage/serialization.hpp\"\n\n#include \"util/deallocating_vector.hpp\"\n\n#include \"../../../src/protobuf/node-based-graph.pb.h\"\n#include \"../../../src/protobuf/edge-based-graph.pb.h\"\n#include \"../../../src/protobuf/scc.pb.h\"\n\n\n#include <boost/assert.hpp>\n\nnamespace osrm\n{\nnamespace extractor\n{\nnamespace serialization\n{\n\n// read/write for bearing data\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::IntersectionBearingsContainer<Ownership> &intersection_bearings)\n{\n storage::serialization::read(reader, name + \"/bearing_values\", intersection_bearings.values);\n storage::serialization::read(\n reader, name + \"/node_to_class_id\", intersection_bearings.node_to_class_id);\n util::serialization::read(\n reader, name + \"/class_id_to_ranges\", intersection_bearings.class_id_to_ranges_table);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::IntersectionBearingsContainer<Ownership> &intersection_bearings)\n{\n storage::serialization::write(writer, name + \"/bearing_values\", intersection_bearings.values);\n storage::serialization::write(\n writer, name + \"/node_to_class_id\", intersection_bearings.node_to_class_id);\n util::serialization::write(\n writer, name + \"/class_id_to_ranges\", intersection_bearings.class_id_to_ranges_table);\n}\n\n// read/write for properties file\ninline void\nread(storage::tar::FileReader &reader, const std::string &name, ProfileProperties &properties)\n{\n reader.ReadInto(name, properties);\n}\n\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const ProfileProperties &properties)\n{\n writer.WriteElementCount64(name, 1);\n writer.WriteFrom(name, properties);\n}\n\n// read/write for datasources file\ninline void read(storage::tar::FileReader &reader, const std::string &name, Datasources &sources)\n{\n reader.ReadInto(name, sources);\n}\n\ninline void write(storage::tar::FileWriter &writer, const std::string &name, Datasources &sources)\n{\n writer.WriteElementCount64(name, 1);\n writer.WriteFrom(name, sources);\n}\n\n// read/write for segment data file\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::SegmentDataContainerImpl<Ownership> &segment_data)\n{\n storage::serialization::read(reader, name + \"/index\", segment_data.index);\n storage::serialization::read(reader, name + \"/nodes\", segment_data.nodes);\n util::serialization::read(reader, name + \"/forward_weights\", segment_data.fwd_weights);\n util::serialization::read(reader, name + \"/reverse_weights\", segment_data.rev_weights);\n util::serialization::read(reader, name + \"/forward_durations\", segment_data.fwd_durations);\n util::serialization::read(reader, name + \"/reverse_durations\", segment_data.rev_durations);\n storage::serialization::read(\n reader, name + \"/forward_data_sources\", segment_data.fwd_datasources);\n storage::serialization::read(\n reader, name + \"/reverse_data_sources\", segment_data.rev_datasources);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::SegmentDataContainerImpl<Ownership> &segment_data)\n{\n storage::serialization::write(writer, name + \"/index\", segment_data.index);\n storage::serialization::write(writer, name + \"/nodes\", segment_data.nodes);\n util::serialization::write(writer, name + \"/forward_weights\", segment_data.fwd_weights);\n util::serialization::write(writer, name + \"/reverse_weights\", segment_data.rev_weights);\n util::serialization::write(writer, name + \"/forward_durations\", segment_data.fwd_durations);\n util::serialization::write(writer, name + \"/reverse_durations\", segment_data.rev_durations);\n storage::serialization::write(\n writer, name + \"/forward_data_sources\", segment_data.fwd_datasources);\n storage::serialization::write(\n writer, name + \"/reverse_data_sources\", segment_data.rev_datasources);\n\n\n std::cout << \"#### cnbg: \" << segment_data.index.size() << \", \"<< segment_data.nodes.size()\n << \", \"<< segment_data.fwd_weights.size()<< \", \"<< segment_data.rev_weights.size() << std::endl;\n\n pbnbg::CompressedNbg pb_cnbg;\n for (auto i : segment_data.index){\n pb_cnbg.add_index(i);\n }\n for (auto i : segment_data.nodes){\n pb_cnbg.add_nodes(i);\n }\n for (auto i : segment_data.fwd_weights){\n pb_cnbg.add_forward_weights(i);\n }\n for (auto i : segment_data.rev_weights){\n pb_cnbg.add_reverse_weights(i);\n }\n for (auto i : segment_data.fwd_durations){\n pb_cnbg.add_distances(i);\n }\n\n std::fstream pb_out(\"1.nbg.compressed.pb\", std::ios::out | std::ios::binary);\n pb_cnbg.SerializeToOstream(&pb_out);\n\n}\n\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::EdgeBasedNodeDataContainerImpl<Ownership> &node_data_container)\n{\n // read actual data\n storage::serialization::read(reader, name + \"/nodes\", node_data_container.nodes);\n storage::serialization::read(\n reader, name + \"/annotations\", node_data_container.annotation_data);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::EdgeBasedNodeDataContainerImpl<Ownership> &node_data_container)\n{\n storage::serialization::write(writer, name + \"/nodes\", node_data_container.nodes);\n storage::serialization::write(\n writer, name + \"/annotations\", node_data_container.annotation_data);\n\n std::cout << \"#### ebg nodes: \" << node_data_container.nodes.size() << \", \"\n << node_data_container.annotation_data.size() << std::endl;\n pbebg::EdgeBasedNodeContainer pb_nodes;\n for (auto i : node_data_container.nodes){\n auto c = pb_nodes.add_nodes();\n c->set_geometry_id(i.geometry_id.id);\n c->set_component_id(i.component_id.id);\n c->set_annotation_id(i.annotation_id);\n c->set_is_tiny(i.component_id.is_tiny);\n c->set_segregated(i.annotation_id);\n }\n\n for (auto i : node_data_container.annotation_data){\n auto c = pb_nodes.add_annotation_data();\n c->set_name_id(i.name_id);\n }\n\n std::fstream pb_out(\"1.ebg.nodes.pb\", std::ios::out | std::ios::binary);\n pb_nodes.SerializeToOstream(&pb_out);\n}\n\n\ntemplate <storage::Ownership Ownership>\ninline void writeScc(const detail::EdgeBasedNodeDataContainerImpl<Ownership> &node_data_container,\n util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list)\n{\n\n //std::cout<< \"### scc: node_data_container.node.size: \" << node_data_container.nodes.size()\n // << \" edge_based_edge_list: \"<< edge_based_edge_list.size() << std::endl;\n\n std::map<std::uint32_t, std::uint32_t> node_component_map;\n std::uint32_t max_component_id = 0;\n for(unsigned long i = 0; i < node_data_container.nodes.size(); ++i) {\n node_component_map[i] = node_data_container.nodes[i].component_id.id;\n if(node_data_container.nodes[i].component_id.id > max_component_id ){\n max_component_id = node_data_container.nodes[i].component_id.id;\n }\n }\n\n std::vector< std::vector<std::uint32_t> > scc_info;\n for(std::uint32_t i = 0; i <= max_component_id; ++i){\n std::vector<std::uint32_t> x;\n scc_info.push_back(x);\n }\n\n for(auto i = edge_based_edge_list.begin(); i != edge_based_edge_list.end(); ++i){\n if(i->source >= node_component_map.size() || i->target >= node_component_map.size()){\n std::cout << \"## scc err: \" << node_component_map.size() << \" i->source: \" << i->source\n << \" i->target: \"<< i->target << std::endl;\n continue;\n }\n\n bool found = false;\n for ( auto j : scc_info[node_component_map[i->source]]) {\n if(j == node_component_map[i->target]){\n found = true;\n break;\n }\n }\n if (!found){\n scc_info[node_component_map[i->source]].push_back(node_component_map[i->target]);\n }\n }\n\n int isolated_component_num = 0;\n pbscc::SCCGraph pb_scc;\n pb_scc.set_v(max_component_id+1);\n for (auto i : scc_info){\n auto c = pb_scc.add_adj();\n for(auto j: i){\n c->add_targets(j);\n }\n if (i.size() == 0) {\n isolated_component_num++;\n }\n }\n\n std::cout<< \"### scc: node_component_map: \" << node_component_map.size() << \" scc_info: \"<< scc_info.size()\n << \"isolated component: \" << isolated_component_num << std::endl;\n\n std::fstream pb_out(\"1.ebg.scc.pb\", std::ios::out | std::ios::binary);\n pb_scc.SerializeToOstream(&pb_out);\n}\n\ninline void read(storage::io::BufferReader &reader, ConditionalTurnPenalty &turn_penalty)\n{\n reader.ReadInto(turn_penalty.turn_offset);\n reader.ReadInto(turn_penalty.location.lat);\n reader.ReadInto(turn_penalty.location.lon);\n auto const num_conditions = reader.ReadElementCount64();\n turn_penalty.conditions.resize(num_conditions);\n for (auto &condition : turn_penalty.conditions)\n {\n reader.ReadInto(condition.modifier);\n storage::serialization::read(reader, condition.times);\n storage::serialization::read(reader, condition.weekdays);\n storage::serialization::read(reader, condition.monthdays);\n }\n}\n\ninline void write(storage::io::BufferWriter &writer, const ConditionalTurnPenalty &turn_penalty)\n{\n writer.WriteFrom(turn_penalty.turn_offset);\n writer.WriteFrom(static_cast<util::FixedLatitude::value_type>(turn_penalty.location.lat));\n writer.WriteFrom(static_cast<util::FixedLongitude::value_type>(turn_penalty.location.lon));\n writer.WriteElementCount64(turn_penalty.conditions.size());\n for (const auto &c : turn_penalty.conditions)\n {\n writer.WriteFrom(c.modifier);\n storage::serialization::write(writer, c.times);\n storage::serialization::write(writer, c.weekdays);\n storage::serialization::write(writer, c.monthdays);\n }\n}\n\ninline void write(storage::io::BufferWriter &writer,\n const std::vector<ConditionalTurnPenalty> &conditional_penalties)\n{\n writer.WriteElementCount64(conditional_penalties.size());\n for (const auto &penalty : conditional_penalties)\n {\n write(writer, penalty);\n }\n}\n\ninline void read(storage::io::BufferReader &reader,\n std::vector<ConditionalTurnPenalty> &conditional_penalties)\n{\n auto num_elements = reader.ReadElementCount64();\n conditional_penalties.resize(num_elements);\n for (auto &penalty : conditional_penalties)\n {\n read(reader, penalty);\n }\n}\n\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const std::vector<ConditionalTurnPenalty> &conditional_penalties)\n{\n storage::io::BufferWriter buffer_writer;\n write(buffer_writer, conditional_penalties);\n\n storage::serialization::write(writer, name, buffer_writer.GetBuffer());\n}\n\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n std::vector<ConditionalTurnPenalty> &conditional_penalties)\n{\n std::string buffer;\n storage::serialization::read(reader, name, buffer);\n\n storage::io::BufferReader buffer_reader{buffer};\n read(buffer_reader, conditional_penalties);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::NameTableImpl<Ownership> &name_table)\n{\n storage::io::BufferWriter buffer_writer;\n util::serialization::write(writer, name, name_table.indexed_data);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::NameTableImpl<Ownership> &name_table)\n{\n std::string buffer;\n util::serialization::read(reader, name, name_table.indexed_data);\n}\n}\n}\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.7459505796432495,
"alphanum_fraction": 0.7459505796432495,
"avg_line_length": 29.86842155456543,
"blob_id": "fff6b30ae74b3888e236c217cf003e9c1e6b35a7",
"content_id": "3a36d3d5750e64c029bca2a9c2470a7218db95e9",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1173,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 38,
"path": "/osrm-ch/include/extractor/guidance/turn_discovery.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_EXTRACTOR_GUIDANCE_TURN_DISCOVERY_HPP_\n#define OSRM_EXTRACTOR_GUIDANCE_TURN_DISCOVERY_HPP_\n\n#include \"extractor/guidance/intersection.hpp\"\n#include \"extractor/guidance/turn_analysis.hpp\"\n#include \"util/typedefs.hpp\"\n\n#include <string>\n\nnamespace osrm\n{\nnamespace extractor\n{\nnamespace guidance\n{\nnamespace lanes\n{\n\n// OSRM processes edges by looking at a via_edge, coming into an intersection. For turn lanes, we\n// might require to actually look back a turn. We do so in the hope that the turn lanes match up at\n// the previous intersection for all incoming lanes.\nbool findPreviousIntersection(\n const NodeID node,\n const EdgeID via_edge,\n const Intersection intersection,\n const TurnAnalysis &turn_analysis, // to generate other intersections\n const util::NodeBasedDynamicGraph &node_based_graph, // query edge data\n // output parameters, will be in an arbitrary state on failure\n NodeID &result_node,\n EdgeID &result_via_edge,\n Intersection &result_intersection);\n\n} // namespace lanes\n} // namespace guidance\n} // namespace extractor\n} // namespace osrm\n\n#endif /*OSRM_EXTRACTOR_GUIDANCE_TURN_DISCOVERY_HPP_*/\n"
},
{
"alpha_fraction": 0.7055016160011292,
"alphanum_fraction": 0.7055016160011292,
"avg_line_length": 14.449999809265137,
"blob_id": "8d8023d05378787f42da8b52a889601446123fc0",
"content_id": "3fc227a6ab64ca5395157a00e97fb99f6597e43d",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 309,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 20,
"path": "/run.sh",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\ncityname=\"SIN\"\ncd build\necho `date`\necho \"start osrm-extract\"\n./osrm-extract -p car.lua ${cityname}.osm.pbf\necho `date`\n\necho \"start osrm-partition\"\n./osrm-partition ${cityname}.osrm\necho `date`\n\necho \"start osrm-customize\"\n./osrm-customize ${cityname}.osrm\n\necho `date`\necho \"pb files\"\nls *.pb\n"
},
{
"alpha_fraction": 0.719393253326416,
"alphanum_fraction": 0.7226435542106628,
"avg_line_length": 24.63888931274414,
"blob_id": "dfc9ca9510b71eaa2d7d6ba87dbd25828c3f9f7a",
"content_id": "c7c45a8ad00053d711ecd33a066b55a2e1cbe285",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 923,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 36,
"path": "/osrm-ch/src/util/guidance/turn_lanes.cpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#include \"util/guidance/turn_lanes.hpp\"\n\n#include <algorithm>\n#include <iostream>\n#include <tuple>\n\n#include <boost/assert.hpp>\n\nnamespace osrm\n{\nnamespace util\n{\nnamespace guidance\n{\nLaneTupel::LaneTupel() : lanes_in_turn(0), first_lane_from_the_right(INVALID_LANEID)\n{\n // basic constructor, set everything to zero\n}\n\nLaneTupel::LaneTupel(const LaneID lanes_in_turn, const LaneID first_lane_from_the_right)\n : lanes_in_turn(lanes_in_turn), first_lane_from_the_right(first_lane_from_the_right)\n{\n}\n\n// comparation based on interpretation as unsigned 32bit integer\nbool LaneTupel::operator==(const LaneTupel other) const\n{\n return std::tie(lanes_in_turn, first_lane_from_the_right) ==\n std::tie(other.lanes_in_turn, other.first_lane_from_the_right);\n}\n\nbool LaneTupel::operator!=(const LaneTupel other) const { return !(*this == other); }\n\n} // namespace guidance\n} // namespace util\n} // namespace osrm\n"
},
{
"alpha_fraction": 0.5966089963912964,
"alphanum_fraction": 0.625220775604248,
"avg_line_length": 43.93650817871094,
"blob_id": "15baa9fdb73e4f352ab3a8a66ddcf74468bfb202",
"content_id": "fd85ff35892857d1a4d138869dbb259b0901380f",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2831,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 63,
"path": "/osrm-ch/scripts/gdb_printers.py",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "import gdb.printing\n\n# https://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html\n# https://sourceware.org/gdb/onlinedocs/gdb/Writing-a-Pretty_002dPrinter.html\n\nCOORDINATE_PRECISION = 1e6\n\nclass CoordinatePrinter:\n \"\"\"Print a CoordinatePrinter object.\"\"\"\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n lon, lat = int(self.val['lon']['__value']), int(self.val['lat']['__value'])\n return '{{{}, {}}}'.format(float(lon) / COORDINATE_PRECISION, float(lat) / COORDINATE_PRECISION)\n\nclass TurnInstructionPrinter:\n \"\"\"Print a TurnInstruction object.\"\"\"\n\n modifiers = {0:'UTurn', 1:'SharpRight', 2:'Right', 3:'SlightRight',\n 4:'Straight', 5:'SlightLeft', 6:'Left', 7:'SharpLeft'}\n types = {0:'Invalid', 1:'NewName', 2:'Continue', 3:'Turn', 4:'Merge', 5:'OnRamp',\n 6:'OffRamp', 7:'Fork', 8:'EndOfRoad', 9:'Notification', 10:'EnterRoundabout',\n 11:'EnterAndExitRoundabout', 12:'EnterRotary', 13:'EnterAndExitRotary',\n 14:'EnterRoundaboutIntersection', 15:'EnterAndExitRoundaboutIntersection',\n 16:'UseLane', 17:'NoTurn', 18:'Suppressed', 19:'EnterRoundaboutAtExit',\n 20:'ExitRoundabout', 21:'EnterRotaryAtExit', 22:'ExitRotary',\n 23:'EnterRoundaboutIntersectionAtExit', 24:'ExitRoundaboutIntersection',\n 25:'StayOnRoundabout', 26:'Sliproad'}\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n t, m = int(self.val['type']), int(self.val['direction_modifier'])\n m = '%s (%d)' % (self.modifiers[m], m) if m in self.modifiers else str(m)\n t = '%s (%d)' % (self.types[t], t) if t in self.types else str(t)\n return '{{type = {}, direction_modifier = {}}}'.format(t, m)\n\nclass TurnLaneDataPrinter:\n \"\"\"Print a TurnLaneData object.\"\"\"\n\n mask = {0:'Empty', 1:'None', 2:'Straight', 4:'SharpLeft', 8:'Left', 16:'SlightLeft',\n 32:'SlightRight', 64:'Right', 128:'SharpRight', 256:'UTurn', 512:'MergeToLeft',\n 1024:'MergeToRight'}\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n tg = int(self.val['tag'])\n fr, to = int(self.val['from']), int(self.val['to'])\n return '{{tag = {}, from = {}, to = {}}}'.format(self.mask[tg] if tg in self.mask else tg, fr, to)\n\ndef build_pretty_printer():\n pp = gdb.printing.RegexpCollectionPrettyPrinter('OSRM')\n pp.add_printer('TurnInstruction', '::TurnInstruction$', TurnInstructionPrinter)\n pp.add_printer('Coordinate', '::Coordinate$', CoordinatePrinter)\n pp.add_printer('TurnLaneData', '::TurnLaneData$', TurnLaneDataPrinter)\n return pp\n\n#gdb.pretty_printers = [filter(lambda x: x.name != 'OSRM', gdb.pretty_printers)]\ngdb.printing.register_pretty_printer(gdb.current_objfile(), build_pretty_printer())\n"
},
{
"alpha_fraction": 0.4883148968219757,
"alphanum_fraction": 0.492004930973053,
"avg_line_length": 29.11111068725586,
"blob_id": "a80f2810ef47d3c2e4cfd6382bb6552d9bd413e3",
"content_id": "4acbd23c1396eb513c87a34b3e51e0754e3fb8ec",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1626,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 54,
"path": "/osrm-ch/features/lib/table_diff.js",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nvar util = require('util');\nvar path = require('path');\nvar fs = require('fs');\nvar chalk = require('chalk');\n\nvar unescapeStr = (str) => str.replace(/\\\\\\|/g, '\\|').replace(/\\\\\\\\/g, '\\\\');\n\nmodule.exports = function (expected, actual) {\n let headers = expected.raw()[0];\n let expected_keys = expected.hashes();\n let diff = [];\n let hasErrors = false;\n\n var good = 0, bad = 0;\n\n expected_keys.forEach((row, i) => {\n var rowError = false;\n\n for (var j in row) {\n if (unescapeStr(row[j]) != actual[i][j]) {\n rowError = true;\n hasErrors = true;\n break;\n }\n }\n\n if (rowError) {\n bad++;\n diff.push(Object.assign({}, row, {c_status: 'undefined'}));\n diff.push(Object.assign({}, actual[i], {c_status: 'comment'}));\n } else {\n good++;\n diff.push(row);\n }\n });\n\n if (!hasErrors) return null;\n\n var s = ['Tables were not identical:'];\n s.push(headers.map(key => ' ' + key).join(' | '));\n diff.forEach((row) => {\n var rowString = '| ';\n headers.forEach((header) => {\n if (!row.c_status) rowString += chalk.green(' ' + row[header] + ' | ');\n else if (row.c_status === 'undefined') rowString += chalk.yellow('(-) ' + row[header] + ' | ');\n else rowString += chalk.red('(+) ' + row[header] + ' | ');\n });\n s.push(rowString);\n });\n\n return s.join('\\n') + '\\nTODO this is a temp workaround waiting for https://github.com/cucumber/cucumber-js/issues/534';\n};\n"
},
{
"alpha_fraction": 0.6944948434829712,
"alphanum_fraction": 0.6944948434829712,
"avg_line_length": 31.41176414489746,
"blob_id": "834761833077df65162ac006d2b3f241b56ff495",
"content_id": "462b35dad6616b806ae72a65fc7c050dc36ddc4d",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1653,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 51,
"path": "/osrm-ch/include/extractor/guidance/sliproad_handler.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_EXTRACTOR_GUIDANCE_SLIPROAD_HANDLER_HPP_\n#define OSRM_EXTRACTOR_GUIDANCE_SLIPROAD_HANDLER_HPP_\n\n#include \"extractor/guidance/intersection.hpp\"\n#include \"extractor/guidance/intersection_generator.hpp\"\n#include \"extractor/guidance/intersection_handler.hpp\"\n#include \"extractor/query_node.hpp\"\n\n#include \"util/name_table.hpp\"\n#include \"util/node_based_graph.hpp\"\n\n#include <cstddef>\n#include <utility>\n#include <vector>\n\nnamespace osrm\n{\nnamespace extractor\n{\nnamespace guidance\n{\n\n// Intersection handlers deal with all issues related to intersections.\n// They assign appropriate turn operations to the TurnOperations.\nclass SliproadHandler : public IntersectionHandler\n{\n public:\n SliproadHandler(const IntersectionGenerator &intersection_generator,\n const util::NodeBasedDynamicGraph &node_based_graph,\n const std::vector<QueryNode> &node_info_list,\n const util::NameTable &name_table,\n const SuffixTable &street_name_suffix_table);\n\n ~SliproadHandler() override final = default;\n\n // check whether the handler can actually handle the intersection\n bool canProcess(const NodeID /*nid*/,\n const EdgeID /*via_eid*/,\n const Intersection & /*intersection*/) const override final;\n\n // process the intersection\n Intersection operator()(const NodeID nid,\n const EdgeID via_eid,\n Intersection intersection) const override final;\n};\n\n} // namespace guidance\n} // namespace extractor\n} // namespace osrm\n\n#endif /*OSRM_EXTRACTOR_GUIDANCE_SLIPROAD_HANDLER_HPP_*/\n"
},
{
"alpha_fraction": 0.7043010592460632,
"alphanum_fraction": 0.7043010592460632,
"avg_line_length": 25.571428298950195,
"blob_id": "b6540b900b03806bf20940b156ca3b7632036ee2",
"content_id": "40ec8fbc65a82bfe148c2ed5422d96523c879c4f",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 186,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/src/protobuf/gen.sh",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nprotoc --cpp_out=. rtree.proto\nprotoc --cpp_out=. node-based-graph.proto\nprotoc --cpp_out=. edge-based-graph.proto\nprotoc --cpp_out=. mld.proto\nprotoc --cpp_out=. scc.proto\n"
},
{
"alpha_fraction": 0.7358208894729614,
"alphanum_fraction": 0.737313449382782,
"avg_line_length": 24.769229888916016,
"blob_id": "07f65d62825f4cf780584a84d649b6a056a1229f",
"content_id": "f160fcd93725cceb5d98dd7fee4918feeaa31569",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 670,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 26,
"path": "/osrm-ch/src/extractor/suffix_table.cpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#include \"extractor/suffix_table.hpp\"\n\n#include \"extractor/scripting_environment.hpp\"\n\n#include <boost/algorithm/string.hpp>\n\nnamespace osrm\n{\nnamespace extractor\n{\n\nSuffixTable::SuffixTable(ScriptingEnvironment &scripting_environment)\n{\n std::vector<std::string> suffixes_vector = scripting_environment.GetNameSuffixList();\n for (auto &suffix : suffixes_vector)\n boost::algorithm::to_lower(suffix);\n suffix_set.insert(std::begin(suffixes_vector), std::end(suffixes_vector));\n}\n\nbool SuffixTable::isSuffix(const std::string &possible_suffix) const\n{\n return suffix_set.count(possible_suffix) > 0;\n}\n\n} /* namespace extractor */\n} /* namespace osrm */\n"
},
{
"alpha_fraction": 0.6165011525154114,
"alphanum_fraction": 0.6210848093032837,
"avg_line_length": 41.225807189941406,
"blob_id": "cc77a88752e03e66d5d7b46d76d9de5510745a3a",
"content_id": "9d322486fe939400db2fe66af4c0e0f4f70c46d5",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1309,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 31,
"path": "/osrm-ch/src/engine/engine_config.cpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#include \"engine/engine_config.hpp\"\n\nnamespace osrm\n{\nnamespace engine\n{\n\nbool EngineConfig::IsValid() const\n{\n const bool all_path_are_empty =\n storage_config.ram_index_path.empty() && storage_config.file_index_path.empty() &&\n storage_config.hsgr_data_path.empty() && storage_config.nodes_data_path.empty() &&\n storage_config.edges_data_path.empty() && storage_config.core_data_path.empty() &&\n storage_config.geometries_path.empty() && storage_config.timestamp_path.empty() &&\n storage_config.datasource_names_path.empty() &&\n storage_config.datasource_indexes_path.empty() && storage_config.names_data_path.empty();\n\n const auto unlimited_or_more_than = [](const int v, const int limit) {\n return v == -1 || v > limit;\n };\n\n const bool limits_valid = unlimited_or_more_than(max_locations_distance_table, 2) &&\n unlimited_or_more_than(max_locations_map_matching, 2) &&\n unlimited_or_more_than(max_locations_trip, 2) &&\n unlimited_or_more_than(max_locations_viaroute, 2) &&\n unlimited_or_more_than(max_results_nearest, 0);\n\n return ((use_shared_memory && all_path_are_empty) || storage_config.IsValid()) && limits_valid;\n}\n}\n}\n"
},
{
"alpha_fraction": 0.6557154059410095,
"alphanum_fraction": 0.6566873788833618,
"avg_line_length": 38.56922912597656,
"blob_id": "62debf436e0e59dd05ff9692bc1b80b125c8e4e1",
"content_id": "f6bc4bc0c374b912aa52c71d332eccaec8e494c1",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5144,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 130,
"path": "/include/partitioner/serialization.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_PARTITIONER_SERIALIZATION_HPP\n#define OSRM_PARTITIONER_SERIALIZATION_HPP\n\n#include \"partitioner/cell_storage.hpp\"\n#include \"partitioner/edge_based_graph.hpp\"\n#include \"partitioner/multi_level_graph.hpp\"\n#include \"partitioner/multi_level_partition.hpp\"\n\n#include \"storage/block.hpp\"\n#include \"storage/io.hpp\"\n#include \"storage/serialization.hpp\"\n#include \"storage/shared_memory_ownership.hpp\"\n#include \"storage/tar.hpp\"\n\n#include \"../../../src/protobuf/mld.pb.h\"\n\n\nnamespace osrm\n{\nnamespace partitioner\n{\nnamespace serialization\n{\n\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::MultiLevelPartitionImpl<Ownership> &mlp)\n{\n reader.ReadInto(name + \"/level_data\", *mlp.level_data);\n storage::serialization::read(reader, name + \"/partition\", mlp.partition);\n storage::serialization::read(reader, name + \"/cell_to_children\", mlp.cell_to_children);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::MultiLevelPartitionImpl<Ownership> &mlp)\n{\n writer.WriteElementCount64(name + \"/level_data\", 1);\n writer.WriteFrom(name + \"/level_data\", *mlp.level_data);\n storage::serialization::write(writer, name + \"/partition\", mlp.partition);\n storage::serialization::write(writer, name + \"/cell_to_children\", mlp.cell_to_children);\n\n std::cout << \"#### partitions: partition.size: \" << mlp.partition.size()\n << \" cell_to_children.size: \" << mlp.cell_to_children.size()\n << \" levelData.num_level: \" << mlp.level_data->num_level\n << \" levelData.lidx_to_offset.size: \" << mlp.level_data->lidx_to_offset.size()\n << \" levelData.lidx_to_mask.size: \" << mlp.level_data->lidx_to_mask.size()\n << \" levelData.bit_to_level.size: \" << mlp.level_data->bit_to_level.size()\n << \" levelData.lidx_to_children_offsets.size: \" << mlp.level_data->lidx_to_children_offsets.size()\n << std::endl;\n\n pbmld::Partitions pb_partitions;\n for (auto i : mlp.partition ){\n pb_partitions.add_partition(i);\n }\n\n auto pb_level_data = new pbmld::LevelData;\n pb_level_data->set_number_level(mlp.level_data->num_level);\n for(auto i : mlp.level_data->lidx_to_offset) {\n pb_level_data->add_lidx_to_offset(i);\n }\n for(auto i : mlp.level_data->lidx_to_mask) {\n pb_level_data->add_lidx_to_mask(i);\n }\n for(auto i : mlp.level_data->bit_to_level) {\n pb_level_data->add_bit_to_level(i);\n }\n pb_partitions.set_allocated_level_data(pb_level_data);\n std::fstream pb_out(\"1.mld.partitions.pb\", std::ios::out | std::ios::binary);\n pb_partitions.SerializeToOstream(&pb_out);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void read(storage::tar::FileReader &reader,\n const std::string &name,\n detail::CellStorageImpl<Ownership> &storage)\n{\n storage::serialization::read(reader, name + \"/source_boundary\", storage.source_boundary);\n storage::serialization::read(\n reader, name + \"/destination_boundary\", storage.destination_boundary);\n storage::serialization::read(reader, name + \"/cells\", storage.cells);\n storage::serialization::read(\n reader, name + \"/level_to_cell_offset\", storage.level_to_cell_offset);\n}\n\ntemplate <storage::Ownership Ownership>\ninline void write(storage::tar::FileWriter &writer,\n const std::string &name,\n const detail::CellStorageImpl<Ownership> &storage)\n{\n storage::serialization::write(writer, name + \"/source_boundary\", storage.source_boundary);\n storage::serialization::write(\n writer, name + \"/destination_boundary\", storage.destination_boundary);\n storage::serialization::write(writer, name + \"/cells\", storage.cells);\n storage::serialization::write(\n writer, name + \"/level_to_cell_offset\", storage.level_to_cell_offset);\n\n std::cout << \"#### cells: source_boundary: \" << storage.source_boundary.size()\n << \" destination_boundary: \" << storage.destination_boundary.size()\n << \" cells: \" << storage.cells.size()\n << \" level_to_cell_offset: \" << storage.level_to_cell_offset.size() << std::endl;\n\n pbmld::Cells pb_cells;\n for (auto i : storage.source_boundary ){\n pb_cells.add_source_boundary(i);\n }\n for (auto i : storage.destination_boundary ){\n pb_cells.add_destination_boundary(i);\n }\n for (auto i : storage.level_to_cell_offset ){\n pb_cells.add_level_offset(i);\n }\n for (auto i : storage.cells ){\n auto cell = pb_cells.add_cells();\n cell->set_value_offset(i.value_offset);\n cell->set_source_boundary_offset(i.source_boundary_offset);\n cell->set_destination_boundary_offset(i.destination_boundary_offset);\n cell->set_source_node_number(i.num_source_nodes);\n cell->set_destination_node_number(i.num_destination_nodes);\n }\n std::fstream pb_out(\"1.mld.cells.pb\", std::ios::out | std::ios::binary);\n pb_cells.SerializeToOstream(&pb_out);\n}\n}\n}\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.6368159055709839,
"alphanum_fraction": 0.6394948363304138,
"avg_line_length": 34.551021575927734,
"blob_id": "c82aa4742e4146ebd20ffa5158c99cd29d14b32c",
"content_id": "8a4f1c63c1d01bcadeac772da9205f6cfc5805cb",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5226,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 147,
"path": "/include/customizer/files.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_CUSTOMIZER_FILES_HPP\n#define OSRM_CUSTOMIZER_FILES_HPP\n\n#include \"customizer/serialization.hpp\"\n\n#include \"storage/tar.hpp\"\n\n#include \"util/integer_range.hpp\"\n\n#include <unordered_map>\n\n#include \"../../../src/protobuf/mld.pb.h\"\n\n\nnamespace osrm\n{\nnamespace customizer\n{\nnamespace files\n{\n\n// reads .osrm.cell_metrics file\ntemplate <typename CellMetricT>\ninline void readCellMetrics(const boost::filesystem::path &path,\n std::unordered_map<std::string, std::vector<CellMetricT>> &metrics)\n{\n static_assert(std::is_same<CellMetricView, CellMetricT>::value ||\n std::is_same<CellMetric, CellMetricT>::value,\n \"\");\n\n const auto fingerprint = storage::tar::FileReader::VerifyFingerprint;\n storage::tar::FileReader reader{path, fingerprint};\n\n for (auto &pair : metrics)\n {\n const auto &metric_name = pair.first;\n auto &metric_exclude_classes = pair.second;\n\n auto prefix = \"/mld/metrics/\" + metric_name + \"/exclude\";\n auto num_exclude_classes = reader.ReadElementCount64(prefix);\n metric_exclude_classes.resize(num_exclude_classes);\n\n auto id = 0;\n for (auto &metric : metric_exclude_classes)\n {\n serialization::read(reader, prefix + \"/\" + std::to_string(id++), metric);\n }\n }\n}\n\n// writes .osrm.cell_metrics file\ntemplate <typename CellMetricT>\ninline void\nwriteCellMetrics(const boost::filesystem::path &path,\n const std::unordered_map<std::string, std::vector<CellMetricT>> &metrics)\n{\n static_assert(std::is_same<CellMetricView, CellMetricT>::value ||\n std::is_same<CellMetric, CellMetricT>::value,\n \"\");\n\n const auto fingerprint = storage::tar::FileWriter::GenerateFingerprint;\n storage::tar::FileWriter writer{path, fingerprint};\n\n std::cout << \"#### cell metrics: pair size: \" << metrics.size() << std::endl;\n pbmld::Metrics pb_metrics;\n for (const auto &pair : metrics)\n {\n std::cout << \"#### cell metrics metrics name: \" << pair.first\n << \" metrics number:\"<< pair.second.size() << std::endl;\n\n const auto &metric_name = pair.first;\n const auto &metric_exclude_classes = pair.second;\n\n auto prefix = \"/mld/metrics/\" + metric_name + \"/exclude\";\n writer.WriteElementCount64(prefix, metric_exclude_classes.size());\n\n auto id = 0;\n for (auto &exclude_metric : metric_exclude_classes)\n {\n serialization::write(writer, prefix + \"/\" + std::to_string(id++), exclude_metric);\n\n // osrm weights are measured by duration\n // osrm duration reset by distance\n auto pb_metric = pb_metrics.add_metrics();\n for(auto i : exclude_metric.weights) {\n pb_metric->add_weights(i);\n }\n for(auto i : exclude_metric.durations) {\n pb_metric->add_distances(i);\n }\n }\n }\n std::fstream pb_out(\"1.mld.metrics.pb\", std::ios::out | std::ios::binary);\n pb_metrics.SerializeToOstream(&pb_out);\n}\n\n// reads .osrm.mldgr file\ntemplate <typename MultiLevelGraphT>\ninline void readGraph(const boost::filesystem::path &path,\n MultiLevelGraphT &graph,\n std::uint32_t &connectivity_checksum)\n{\n static_assert(std::is_same<customizer::MultiLevelEdgeBasedGraphView, MultiLevelGraphT>::value ||\n std::is_same<customizer::MultiLevelEdgeBasedGraph, MultiLevelGraphT>::value,\n \"\");\n\n storage::tar::FileReader reader{path, storage::tar::FileReader::VerifyFingerprint};\n\n reader.ReadInto(\"/mld/connectivity_checksum\", connectivity_checksum);\n serialization::read(reader, \"/mld/multilevelgraph\", graph);\n}\n\n// writes .osrm.mldgr file\ntemplate <typename MultiLevelGraphT>\ninline void writeGraph(const boost::filesystem::path &path,\n const MultiLevelGraphT &graph,\n const std::uint32_t connectivity_checksum)\n{\n static_assert(std::is_same<customizer::MultiLevelEdgeBasedGraphView, MultiLevelGraphT>::value ||\n std::is_same<customizer::MultiLevelEdgeBasedGraph, MultiLevelGraphT>::value,\n \"\");\n\n storage::tar::FileWriter writer{path, storage::tar::FileWriter::GenerateFingerprint};\n\n writer.WriteElementCount64(\"/mld/connectivity_checksum\", 1);\n writer.WriteFrom(\"/mld/connectivity_checksum\", connectivity_checksum);\n serialization::write(writer, \"/mld/multilevelgraph\", graph);\n}\n\n//override .osrm.enw file\ntemplate <typename NodeWeigtsVectorT, typename NodeDurationsVectorT>\nvoid writeEdgeBasedNodeWeightsDurations(const boost::filesystem::path &path,\n const NodeWeigtsVectorT &weights,\n const NodeDurationsVectorT &durations)\n{\n const auto fingerprint = storage::tar::FileWriter::GenerateFingerprint;\n storage::tar::FileWriter writer{path, fingerprint};\n\n storage::serialization::write(writer, \"/extractor/edge_based_node_weights\", weights);\n storage::serialization::write(writer, \"/extractor/edge_based_node_durations\", durations);\n}\n\n}\n}\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.5207756161689758,
"alphanum_fraction": 0.5235456824302673,
"avg_line_length": 20.235294342041016,
"blob_id": "6d3fc2a8d46a69fbc8517e6d928ef7669618fc0f",
"content_id": "27b63af6cd11b27eff44ff196301b3f86aa64a6f",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 361,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 17,
"path": "/osrm-ch/features/lib/utils.js",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nconst util = require('util');\n\nmodule.exports = {\n\n ensureDecimal: (i) => {\n if (parseInt(i) === i) return i.toFixed(1);\n else return i;\n },\n\n errorReason: (err) => {\n return err.signal ?\n util.format('killed by signal %s', err.signal) :\n util.format('exited with code %d', err.code);\n }\n};\n"
},
{
"alpha_fraction": 0.7415902018547058,
"alphanum_fraction": 0.7415902018547058,
"avg_line_length": 24.153846740722656,
"blob_id": "1a3c423a106aa95a6df3370e12ac0b7c71e608ac",
"content_id": "1865939044d5052dfa45fda45ca4fcef20773f47",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 654,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 26,
"path": "/osrm-ch/include/extractor/guidance/turn_lane_augmentation.hpp",
"repo_name": "dingchunda/osrm-backend",
"src_encoding": "UTF-8",
"text": "#ifndef OSRM_EXTRACTOR_GUIDANCE_TURN_LANE_AUGMENTATION_HPP_\n#define OSRM_EXTRACTOR_GUIDANCE_TURN_LANE_AUGMENTATION_HPP_\n\n#include \"extractor/guidance/intersection.hpp\"\n#include \"extractor/guidance/turn_lane_data.hpp\"\n#include \"util/attributes.hpp\"\n\nnamespace osrm\n{\nnamespace extractor\n{\nnamespace guidance\n{\nnamespace lanes\n{\n\nOSRM_ATTR_WARN_UNUSED\nLaneDataVector handleNoneValueAtSimpleTurn(LaneDataVector lane_data,\n const Intersection &intersection);\n\n} // namespace lanes\n} // namespace guidance\n} // namespace extractor\n} // namespace osrm\n\n#endif /* OSRM_EXTRACTOR_GUIDANCE_TURN_LANE_AUGMENTATION_HPP_ */\n"
}
] | 20 |
tarrott/slack-reminders | https://github.com/tarrott/slack-reminders | c9ba04f68d309ec12eb587903e7c238c12d9850b | 7a619b367f452a3b2c664e980fa0b69bac2d9dbe | 5fd2f63945636eb4430f8bb458ee15b7b84d314b | refs/heads/master | 2023-01-09T16:15:24.221528 | 2020-11-02T21:49:42 | 2020-11-02T21:49:42 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5722891688346863,
"alphanum_fraction": 0.5769745707511902,
"avg_line_length": 23.491804122924805,
"blob_id": "2df7bcaa31a6f60a6775769c3bba64bfe6fd3835",
"content_id": "341563d0292ed5f0b8ecec6ada8785ae113922c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1494,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 61,
"path": "/reminders.py",
"repo_name": "tarrott/slack-reminders",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport requests\nimport tzlocal\n\nwebhook = os.environ[\"SLACK_WEBHOOK\"]\ntimezone = os.environ[\"TIMEZONE\"]\n\ndef parse_reminder(lines):\n reminders = []\n\n for line in lines:\n if line[-1:] == '\\n':\n line = line[:-1]\n line = line.split()\n reminder = {\n \"message\": line[0],\n \"weekday\": line[1],\n \"hour\": line[2],\n \"minute\": line[3]\n }\n reminders.append(reminder)\n\n return reminders\n\n\ndef send_reminder(message):\n data = f'{{\"text\": \"{message}\"}}'\n\n try:\n response = requests.post(webhook, data)\n except Exception as e:\n print(f\"ERROR ({str(response.status_code)}): {response.text}\")\n print(e)\n\n\ndef schedule_reminders(reminders):\n scheduler = BlockingScheduler(timezone=timezone)\n\n for id,reminder in enumerate(reminders, 1):\n scheduler.add_job(\n send_reminder,\n args=[reminder[\"message\"]],\n trigger='cron',\n day_of_week=reminder[\"weekday\"],\n hour=reminder[\"hour\"],\n minute=reminder[\"minute\"],\n id=str(id)\n )\n scheduler.start()\n\n\nif __name__ == \"__main__\":\n local_timezone = tzlocal.get_localzone()\n print(f\"The local timezone is {local_timezone}\")\n\n with open('reminders.txt', 'r') as f:\n lines = f.readlines()\n reminders = parse_reminder(lines)\n schedule_reminders(reminders)\n"
},
{
"alpha_fraction": 0.7760416865348816,
"alphanum_fraction": 0.7864583134651184,
"avg_line_length": 16.545454025268555,
"blob_id": "333a9eab8c8866732924dd56411e1b0bc9e7503a",
"content_id": "c6b289207fa0dec542d3ff468bb8be95df2276b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 11,
"path": "/Dockerfile",
"repo_name": "tarrott/slack-reminders",
"src_encoding": "UTF-8",
"text": "FROM python:3.8-slim-buster\n\nRUN mkdir /app\nCOPY requirements.txt /app\nWORKDIR /app\nRUN pip install -r requirements.txt\n\nCOPY reminders.txt /app\nCOPY reminders.py /app\n\nCMD python reminders.py"
},
{
"alpha_fraction": 0.7384615540504456,
"alphanum_fraction": 0.7564102411270142,
"avg_line_length": 96.625,
"blob_id": "cb18cf7ba7e0e68b4f7a63e1f3a7dc105f60e78b",
"content_id": "a3d036870fd3d449df556e6d68f82f48fb9e7071",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 301,
"num_lines": 8,
"path": "/README.md",
"repo_name": "tarrott/slack-reminders",
"src_encoding": "UTF-8",
"text": "# Slack-Reminders\nSlack-Reminders sends messages defined in `reminders.txt` to a specific slack channel using a registered webhook with the slack api. The project uses the Advanced Python Scheduler 3.0 library to schedule the events similar to a cronjob and can be ran as a continious process inside a Docker container.\n\n### Usage\n1. Rename `sample.reminders.txt` to `reminders.txt`\n2. Each line in `reminders.txt` is a separate reminder. The format is space-delimited **Reminder-message** **Weekday** (0-6 or sun-sat) **Hour** (0-23) **Minute** (0-59). Use `*` to send reminder at every interval.\n3. Build Docker Image `docker build -t slack-reminders:latest`\n4. Run with `docker run -d -e TIMEZONE=\"TIMEZONE\" -e SLACK_WEBHOOK=\"URL\" --name slack-reminders slack-reminders:latest`"
}
] | 3 |
JesseMaitland/rsm | https://github.com/JesseMaitland/rsm | 9f2888931086c6acabe48ab89801fa309f89d355 | 011bd55aeef15343ccb07cbcd49526e7d06c3630 | e52f8a15f9da1b5f94fa8a0e66d7c34962bad105 | refs/heads/master | 2020-07-07T17:00:47.655329 | 2019-09-06T16:18:17 | 2019-09-06T16:18:17 | 203,414,438 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6690403819084167,
"alphanum_fraction": 0.6690403819084167,
"avg_line_length": 32.89655303955078,
"blob_id": "e2c7f79a989c0b4dc44fefcbe2b4cf10c7cc1bc0",
"content_id": "45bb1e0162334f834acb82b5c9a80c6048865585",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2949,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 87,
"path": "/rsm_lib/migration_lib/migration_manager.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\nfrom datetime import datetime\nfrom shutil import rmtree\nfrom rsm_lib.common import logger\nfrom rsm_lib.database.models import Migration\nfrom rsm_lib.database.common import Session\n\n\nclass MigrationManager:\n migrations_path = Path().cwd().absolute() / 'migrations'\n\n def __init__(self):\n pass\n\n def create_migrations_directory(self):\n try:\n self.migrations_path.mkdir(exist_ok=False)\n logger.info('migrations directory not found. Created directory ./migrations successfully. ')\n except FileExistsError:\n pass\n\n @staticmethod\n def get_file_id(name):\n file_id = datetime.utcnow().strftime('%Y-%m-%d-%H%M%s')\n return f'{file_id}-{name}'\n\n @staticmethod\n def get_template_path():\n return Path(__file__).parent / 'migration_template.yml'\n\n def get_migration_path(self, file_name):\n return self.migrations_path / f'{file_name}' / f'{file_name}.yml'\n\n @staticmethod\n def create_migration_directories(migration_path, template_path):\n sub_dir_names = ['up', 'down']\n\n for name in sub_dir_names:\n sub_dir = migration_path.parent / name\n sub_dir.mkdir(parents=True)\n sql_path = sub_dir / 'migration.sql'\n sql_path.touch()\n sql_path.write_text('-- put your migration here')\n\n migration_path.touch()\n migration_path.write_text(template_path.read_text())\n\n @staticmethod\n def add_migration_db(migration_id, db_session):\n migration = Migration(migration_id)\n db_session.add(migration)\n\n @staticmethod\n def remove_migration_directories(migration_path):\n rmtree(migration_path.parent.absolute().as_posix())\n\n\ndef create_new_migration(name):\n\n # first create a migration manager and the parent dir if it does not exist\n migration_manager = MigrationManager()\n migration_manager.create_migrations_directory()\n\n migration_id = migration_manager.get_file_id(name)\n db_session = Session()\n\n try:\n logger.info(f'creating migration with id {migration_id}')\n template_path = migration_manager.get_template_path()\n migrations_path = migration_manager.get_migration_path(migration_id)\n migration_manager.add_migration_db(migration_id, db_session)\n\n logger.info(f'migration added to database. creating migration directories')\n migration_manager.create_migration_directories(migrations_path, template_path)\n\n logger.info(f'committing migration {migration_id} to database.')\n db_session.commit()\n\n logger.info(f'migration {migration_id} created successfully!')\n except Exception:\n logger.exception(f'there was a creating the migration {migration_id}. check logs')\n db_session.rollback()\n migration_manager.remove_migration_directories(migrations_path)\n raise\n\n finally:\n logger.info('migration finished')\n"
},
{
"alpha_fraction": 0.6817042827606201,
"alphanum_fraction": 0.6817042827606201,
"avg_line_length": 29.461538314819336,
"blob_id": "45e08d9709e2af080955577f891ed513eb453887",
"content_id": "6bbba1ac00052b6e89dcea783756b9ff92586243",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 13,
"path": "/rsm_lib/entry_points/db_management.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from rsm_lib.common import logger\nfrom rsm_lib.database.common import create_db\n\n\ndef create_database():\n try:\n logger.info('creating database tables to track migrations')\n create_db()\n print('database tables created successfully')\n except Exception:\n logger.exception('there was a problem creating the database tables. check logs.')\n finally:\n exit()\n\n\n\n"
},
{
"alpha_fraction": 0.6707882285118103,
"alphanum_fraction": 0.6754250526428223,
"avg_line_length": 24.8799991607666,
"blob_id": "dd1c811dff0958311e6745785d45a8110f909009",
"content_id": "c755111a9cbf32cd9e78b406015a3c775445de96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 647,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 25,
"path": "/setup.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from os import linesep\nfrom setuptools import setup, find_packages\n\n\ndef get_requirements():\n with open('requirements.txt') as file:\n return [line.rstrip(linesep) for line in file.readlines()]\n\n\ndef get_long_description():\n with open('README.md') as file:\n return file.read()\n\n\nsetup(\n name='rs-migrate',\n version='1.0.0',\n author='heyjobs',\n discription='database migration tool for aws redshift',\n include_package_data=True,\n long_description=get_long_description(),\n install_requires=get_requirements(),\n packages=find_packages(exclude=('tests', 'venv', 'migrations', 'logs')),\n scripts=['rsm']\n)\n"
},
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.621052622795105,
"avg_line_length": 30.66666603088379,
"blob_id": "3317bf332ff93cc514932f4cebbe4888d0071b43",
"content_id": "bd2bd1d99d92d6bb14699ae6bf6eca8bb7cbd955",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 6,
"path": "/makefile",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "init:\n\tif [[ -d ./venv ]]; then rm -rf venv; fi \\\n\t&& python3.6 -m venv venv \\\n\t&& . venv/bin/activate \\\n\t&& pip install --upgrade pip setuptools wheel \\\n\t&& pip install -r requirements.txt\n"
},
{
"alpha_fraction": 0.5687453150749207,
"alphanum_fraction": 0.5687453150749207,
"avg_line_length": 33.128204345703125,
"blob_id": "2fd4a16950a227a253c825785cab83037155fb66",
"content_id": "62b5a2fbbc3183e1d8e3016e414c5e2d8d73d7ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1331,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 39,
"path": "/rsm_lib/common/project_logger.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "###################################################################################\n#\n# file makes simple convenience method for getting a logger instance\n#\n###################################################################################\nimport logging\n\n\ndef logger_factory(file_name,\n logger_name=__name__,\n format_string='%(levelname)s : %(asctime)s : %(name)s : %(message)s',\n print_stream=True,\n logging_level='INFO'):\n \"\"\"\n returns a configured logger object\n :param file_name:\n :param logger_name:\n :param format_string:\n :param print_stream: set this true to stream logger output to console\n :param logging_level:\n :return: pre configured logger object\n \"\"\"\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(getattr(logging, logging_level))\n\n formatter = logging.Formatter(fmt=format_string, datefmt='%m/%d/%Y %I:%M:%S %p')\n\n file_handler = logging.FileHandler(filename=file_name)\n file_handler.setLevel(getattr(logging, logging_level))\n file_handler.setFormatter(formatter)\n\n logger.addHandler(file_handler)\n if print_stream:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n return logger\n"
},
{
"alpha_fraction": 0.6612903475761414,
"alphanum_fraction": 0.6612903475761414,
"avg_line_length": 11.399999618530273,
"blob_id": "922d0a1396b1006c423b055992f5ac7d5144b516",
"content_id": "6e16cef8e680270d22157a3ceca513d8a0b06c65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/rsm_lib/tests/test_migration_lib/fixtures/good-migration/up/migration3.sql",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "-- put your migration here\nCREATE TABLE beans\n(\n id int\n);\n"
},
{
"alpha_fraction": 0.5872576236724854,
"alphanum_fraction": 0.5879501104354858,
"avg_line_length": 20.55223846435547,
"blob_id": "34422e2c5e7236abafded7ecbc98af8d678a532d",
"content_id": "36b3627053500d8670053d655bf937db9cc0036b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1444,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 67,
"path": "/rsm",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "#!venv/bin/python3\nimport argparse\nfrom rsm_lib.entry_points import \\\n execute_create_new_migration, \\\n create_database, \\\n execute_migration_up, \\\n execute_migration_down, \\\n execute_all_up, execute_all_down\n\ncommands = {\n '--create-migration': {\n 'help': 'optional :: creates a migration file with the specified name'\n },\n\n '--init-db': {\n 'help': 'optional :: creates the db as specified in .env file',\n 'action': 'store_true'\n },\n\n '--migrate-up': {\n 'action': 'store_true'\n },\n\n '--migrate-down': {\n 'action': 'store_true'\n },\n\n '--migrate-allup': {\n 'action': 'store_true'\n },\n\n '--migrate-alldown': {\n 'action': 'store_true'\n },\n\n}\n\n\ndef get_arg_parser():\n parser = argparse.ArgumentParser()\n for command, options in commands.items():\n parser.add_argument(command, **options)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n cmd_args = get_arg_parser()\n\n # create db if flag passed\n if cmd_args.init_db:\n create_database()\n\n # generates a new migration\n if cmd_args.create_migration:\n execute_create_new_migration(cmd_args.create_migration)\n\n if cmd_args.migrate_up:\n execute_migration_up()\n\n if cmd_args.migrate_down:\n execute_migration_down()\n\n if cmd_args.migrate_allup:\n execute_all_up()\n\n if cmd_args.migrate_alldown:\n execute_all_down()\n"
},
{
"alpha_fraction": 0.7551867365837097,
"alphanum_fraction": 0.7551867365837097,
"avg_line_length": 33.42856979370117,
"blob_id": "bd02e2fcdcea714bde1bfd87805da7c45ed41cb9",
"content_id": "82d0fb798a7ceb6a4bb252102a607bc33ee197ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 7,
"path": "/rsm_lib/common/__init__.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from .project_logger import logger_factory\nfrom pathlib import Path\n\nLOGGER_PATH = Path().cwd() / 'logs'\nLOGGER_PATH.mkdir(parents=True, exist_ok=True)\nLOGGER_FILE_PATH = LOGGER_PATH / 'migrate.log'\nlogger = logger_factory(LOGGER_FILE_PATH)\n"
},
{
"alpha_fraction": 0.6831313967704773,
"alphanum_fraction": 0.6831313967704773,
"avg_line_length": 25.825000762939453,
"blob_id": "920e27ecf4c5547bd8df6fb1fc0332d1b6f66042",
"content_id": "49b8db89db55a477d2221568700db0f01703cd07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 40,
"path": "/rsm_lib/entry_points/main_entry.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from rsm_lib.common import logger\nfrom rsm_lib.database.models import Migration\nfrom rsm_lib.migration_lib import create_new_migration, run_migration_up, run_migration_down\n\n\ndef execute_create_new_migration(migration_name):\n migration_name = migration_name.lower()\n migration_name = migration_name.replace(' ', '-')\n create_new_migration(migration_name)\n exit()\n\n\ndef execute_migration_up():\n logger.info('running migrate up')\n migration = Migration.get_next_up_migration()\n try:\n run_migration_up(migration)\n except AttributeError:\n logger.info('no migration to apply. Database up to date!')\n exit()\n\n\ndef execute_migration_down():\n logger.info('running migrate down')\n migration = Migration.get_next_down_migration()\n try:\n run_migration_down(migration)\n except AttributeError:\n logger.info('no migration to apply. Database up to date!')\n exit()\n\n\ndef execute_all_up():\n while True:\n execute_migration_up()\n\n\ndef execute_all_down():\n while True:\n execute_migration_down()\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 14,
"blob_id": "83749869c2713e49329caf4fce997acc5c19cd72",
"content_id": "62e860415c4474a6bce91adad85520df538d98d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 1,
"path": "/rsm_lib/__init__.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "name='rsm_lib'\n"
},
{
"alpha_fraction": 0.72794508934021,
"alphanum_fraction": 0.7332276701927185,
"avg_line_length": 32.1929817199707,
"blob_id": "38497ead5d450cc32d847f208fd3f0739fa2b343",
"content_id": "c119a652277ca7d2923163f82af2b79526743bc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1893,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 57,
"path": "/rsm_lib/tests/test_migration_lib/test_migration_parser.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom schema import SchemaError\nfrom pathlib import Path\nfrom rsm_lib.migration_lib.migration_parser import MigrationParser\nfrom rsm_lib.migration_lib.migration_parser import ParsedMigration\n\n# override parent path class attribute\nMigrationParser.parent_path = Path(__file__).parent / 'fixtures'\n\n\[email protected]()\ndef good_parser():\n return MigrationParser('good-migration')\n\n\ndef test_bad_parse():\n with pytest.raises(SchemaError):\n bad_parse = MigrationParser('bad-migration')\n migration_config = bad_parse.load_migration_yml()\n bad_parse.validate_config_structure(migration_config)\n\n\ndef test_good_parse(good_parser):\n migration_config = good_parser.load_migration_yml()\n good_parser.validate_config_structure(migration_config)\n assert isinstance(migration_config, dict)\n\n\ndef test_gets_migration(good_parser):\n parsed_migration = good_parser.get_parsed_migration_from_file()\n assert isinstance(parsed_migration, ParsedMigration)\n\n\ndef test_correct_number_of_up_steps(good_parser):\n parsed_migration = good_parser.get_parsed_migration_from_file()\n assert len(parsed_migration.up_steps) == 3\n\n\ndef test_correct_number_of_down_steps(good_parser):\n parsed_migration = good_parser.get_parsed_migration_from_file()\n assert len(parsed_migration.down_steps) == 3\n\n\ndef test_correct_up_step_order(good_parser):\n expected_up_order = ['migration1.sql', 'migration2.sql', 'migration3.sql']\n parsed_config = good_parser.get_parsed_migration_from_file()\n\n for i in range(3):\n assert expected_up_order[i] == parsed_config.up_steps[i]\n\n\ndef test_correct_down_step_order(good_parser):\n expected_up_order = ['migration9.sql', 'migration7.sql', 'migration8.sql']\n parsed_config = good_parser.get_parsed_migration_from_file()\n\n for i in range(3):\n assert expected_up_order[i] == parsed_config.down_steps[i]\n\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 11,
"blob_id": "f6d49191da70b85fe89b3096556a0e9ddbfbafbe",
"content_id": "c530d99e8836e57c407c68f218d7856018fca37f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/rsm_lib/tests/test_migration_lib/fixtures/good-migration/up/migration1.sql",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "-- put your migration here\nCREATE TABLE foo\n(\n id int\n);\n"
},
{
"alpha_fraction": 0.7555555701255798,
"alphanum_fraction": 0.7555555701255798,
"avg_line_length": 21.5,
"blob_id": "8b2c5f387b9676612f9876417c0c1877eef812b9",
"content_id": "3639f2192738f76405ff6174c8969d8de3e36678",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/rsm_lib/tests/test_migration_lib/fixtures/good-migration/down/migration8.sql",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "-- put your migration here\nDROP TABLE beans;\n"
},
{
"alpha_fraction": 0.6269739866256714,
"alphanum_fraction": 0.6269739866256714,
"avg_line_length": 30.62162208557129,
"blob_id": "3ff8214ab53ae4711a3d3733dd6ea3d08cf3f84f",
"content_id": "c9aa7615aa129a1aa5de09109d3a3c6510ac0573",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2343,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 74,
"path": "/rsm_lib/migration_lib/migration_parser.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "import yaml\nfrom schema import Schema, And, Use\nfrom rsm_lib.common import logger\nfrom pathlib import Path\n\n\nclass MigrationSteps(list):\n \"\"\"\" simple class to have named list of MigrationSteps \"\"\"\n pass\n\n\nclass ParsedMigration:\n\n \"\"\" simple data class to hold parsed migration up and down steps separately \"\"\"\n\n def __init__(self, up_steps, down_steps):\n self.up_steps = up_steps\n self.down_steps = down_steps\n\n\nclass MigrationParser:\n\n \"\"\" class to load and validate migration config yml file \"\"\"\n\n parent_path = Path().cwd().absolute() / 'migrations'\n\n config_schema = Schema({\n 'migration': {\n 'up': {\n 'steps': And(Use(list))\n },\n 'down': {\n 'steps': And(Use(list))\n }\n }\n })\n\n def __init__(self, migration_key):\n self.migration_key = migration_key\n self.migration_path = MigrationParser.parent_path / migration_key\n self.migration_file_path = self.migration_path / f'{migration_key}.yml'\n\n def load_migration_yml(self):\n\n \"\"\" loads a migration conf yml file for THIS migration \"\"\"\n\n logger.info(f'loading migration at config at {self.migration_file_path.as_posix()}')\n config = yaml.safe_load(self.migration_file_path.open())\n logger.info('migration config loaded successfully. ')\n return config\n\n def validate_config_structure(self, config):\n\n \"\"\" validates that the config yaml is in the expected format. if not we can not continue with the migration \"\"\"\n\n try:\n logger.info('validating migration config')\n MigrationParser.config_schema.validate(config)\n logger.info('migration config valid.')\n except Exception:\n logger.info(f'config structure invalid for {self.migration_file_path.as_posix()}')\n raise\n return config\n\n def get_parsed_migration_from_file(self):\n\n \"\"\" loads and validates the migration config file and parses out the keys into named steps \"\"\"\n\n migration = self.load_migration_yml()\n migration = self.validate_config_structure(migration)\n\n migrate_up = migration['migration']['up']['steps']\n migrate_down = migration['migration']['down']['steps']\n return ParsedMigration(migrate_up, migrate_down)\n\n\n\n"
},
{
"alpha_fraction": 0.6363198161125183,
"alphanum_fraction": 0.6406551003456116,
"avg_line_length": 38.92307662963867,
"blob_id": "84e20c4d440be48893df003de49b6cc851124878",
"content_id": "52de685a13a1cf8fd901e28d1314ec676337ab33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2076,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 52,
"path": "/rsm_lib/database/models.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from enum import Enum\nfrom rsm_lib.database.common import Base\nfrom sqlalchemy import Column, String, Integer, DateTime, Text\nfrom datetime import datetime\nfrom rsm_lib.database.common import Session\n\n\ndb_session = Session()\n\nMIGRATION_STATE_NAMES = ['not_applied',\n 'applied_down_success',\n 'applied_down_fail',\n 'applied_up_success',\n 'applied_up_fail']\n\nMIGRATION_STATES = Enum('MIGRATION_STATES', MIGRATION_STATE_NAMES)\n\n\nclass Migration(Base):\n \"\"\"\n simple model used to hold job title and id\n \"\"\"\n\n __tablename__ = 'migrations'\n\n file_name = Column(String(100), nullable=False, primary_key=True)\n state = Column(Integer, default=MIGRATION_STATES.not_applied.value)\n state_name = Column(String(100), default=MIGRATION_STATES.not_applied.name)\n steps_executed = Column(String(500), nullable=True)\n created_at = Column(DateTime, default=datetime.now())\n last_run_at = Column(DateTime, nullable=True)\n exception = Column(Text)\n\n def __init__(self, file_name):\n self.file_name = file_name\n\n @staticmethod\n def get_next_up_migration():\n \"\"\" gets the next migration which should be run up \"\"\"\n valid_states = [MIGRATION_STATES.not_applied.value,\n MIGRATION_STATES.applied_up_fail.value,\n MIGRATION_STATES.applied_down_success.value,\n MIGRATION_STATES.applied_down_fail.value]\n return db_session.query(Migration).filter(Migration.state.in_(valid_states)).order_by(Migration.file_name).first()\n\n @staticmethod\n def get_next_down_migration():\n \"\"\" gets the next migration which should be run down \"\"\"\n valid_states = [MIGRATION_STATES.applied_up_fail.value,\n MIGRATION_STATES.applied_up_success.value,\n MIGRATION_STATES.applied_down_fail.value]\n return db_session.query(Migration).filter(Migration.state.in_(valid_states)).order_by(Migration.file_name.desc()).first()\n"
},
{
"alpha_fraction": 0.831932783126831,
"alphanum_fraction": 0.831932783126831,
"avg_line_length": 58.5,
"blob_id": "4673abbf6a495949f64c8dede9364ed7f43e5905",
"content_id": "e2996cbe9d174ccf82a354f262c734272b52f167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 2,
"path": "/rsm_lib/migration_lib/__init__.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from .migration_manager import create_new_migration\nfrom .migration_runner import run_migration_down, run_migration_up\n"
},
{
"alpha_fraction": 0.6925025582313538,
"alphanum_fraction": 0.6990881562232971,
"avg_line_length": 32.440677642822266,
"blob_id": "6cb4b58423863ce7ec2467fd07737a0566d2fcd7",
"content_id": "a9f122f04da7e35221202ee004b5ea014cf0f672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1974,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 59,
"path": "/rsm_lib/tests/test_migration_lib/test_migration_manager.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom time import sleep\nfrom shutil import rmtree\nfrom pathlib import Path\nfrom rsm_lib.migration_lib.migration_manager import MigrationManager\n\n# patch the default path\ntemp_migrations_path = Path(__file__).parent.absolute() / 'test_migrations'\nfixtures_path = Path(__file__).parent.absolute() / 'fixtures'\nMigrationManager.migrations_path = temp_migrations_path\n\ndef test_creates_migrations_dir():\n migration_manager = MigrationManager()\n migration_manager.create_migrations_directory()\n\n assert temp_migrations_path.exists()\n temp_migrations_path.rmdir()\n assert not temp_migrations_path.exists()\n\n\ndef test_creates_migration_dirs():\n migration_template = fixtures_path / 'test_template.yml'\n migration_path = temp_migrations_path / 'migrate-moon-spam.yml'\n migration_manager = MigrationManager()\n migration_manager.create_migrations_directory()\n migration_manager.create_migration_directories(migration_path, migration_template)\n\n up_path = temp_migrations_path / 'up'\n up_path_migration = up_path / 'migration.sql'\n down_path = temp_migrations_path / 'down'\n down_path_migration = down_path / 'migration.sql'\n\n try:\n assert migration_path.exists()\n assert up_path.exists()\n assert down_path.exists()\n assert up_path_migration.exists()\n assert down_path_migration.exists()\n except AssertionError:\n raise\n finally:\n rmtree(temp_migrations_path.as_posix())\n\n\ndef test_file_id_increments():\n id1 = MigrationManager.get_file_id('b') # letter a should still be last\n sleep(1)\n id2 = MigrationManager.get_file_id('e') # e should still be newer than e\n sleep(1)\n id3 = MigrationManager.get_file_id('a') # a should still be greather than e\n sleep(1)\n id4 = MigrationManager.get_file_id('z') # z is the best\n\n assert id4 > id3 > id2 > id1\n\n\ndef test_file_id_format():\n file_id = MigrationManager.get_file_id('bar')\n assert 1 == 1\n\n"
},
{
"alpha_fraction": 0.7441860437393188,
"alphanum_fraction": 0.7441860437393188,
"avg_line_length": 20.5,
"blob_id": "22dd5ae3ce43e66ab63502b8255077cd23eeb85e",
"content_id": "a11c3bccfa770c8414c9ffffc724827bf260dd6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/rsm_lib/tests/test_migration_lib/fixtures/bad-migration/down/migration1.sql",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "-- put your migration here\nDROP TABLE foo;\n"
},
{
"alpha_fraction": 0.683555543422699,
"alphanum_fraction": 0.683555543422699,
"avg_line_length": 25.785715103149414,
"blob_id": "71c69481a3215d4163bab64a56c3928bfc60b678",
"content_id": "068711441008d2e88b70a13bcf81b24114adfacc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1125,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 42,
"path": "/rsm_lib/database/common.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFile declares all SQL alchemy / db related objects and models\n\"\"\"\nfrom rsm_lib.settings import SQL_ALCHEMY_MIGRATION_DB_CONN_STRING\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\n\"\"\"\n SQL alchemy configuration and engine creation\n\"\"\"\nengine = create_engine(SQL_ALCHEMY_MIGRATION_DB_CONN_STRING)\n\n\ndef session_factory():\n \"\"\"\n session factory returns a threadsafe scopped session object to access db through sql alchemy\n :return:\n \"\"\"\n return scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n\n\"\"\"\n necessary objects to define models using declarative base\n\"\"\"\nSession = session_factory()\nBase = declarative_base()\n\n\ndef drop_db():\n import rsm_lib.database.models\n Base.metadata.drop_all(engine)\n\n\ndef create_db():\n \"\"\"\n method simply drops and rebuilds the tables each time it's called.\n :return:\n \"\"\"\n import rsm_lib.database.models\n Base.metadata.create_all(engine)\n"
},
{
"alpha_fraction": 0.4855072498321533,
"alphanum_fraction": 0.6932367086410522,
"avg_line_length": 15.5600004196167,
"blob_id": "62e5e670be59988c2f212aad893d4fb372115dbf",
"content_id": "6afba13df09d7e43d0576e0d2cae3fee4477bdb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 25,
"path": "/requirements.txt",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "atomicwrites==1.3.0\nattrs==19.1.0\ncontextlib2==0.5.5\nimportlib-metadata==0.19\nJinja2==2.10.1\nlxml==4.4.1\nMarkupSafe==1.1.1\nmore-itertools==7.2.0\npackaging==19.1\npluggy==0.12.0\nprettytable==0.7.2\npy==1.8.0\npyaml==19.4.1\npyparsing==2.4.2\npytest==5.1.0\npython-dotenv==0.10.3\nPyYAML==5.1.2\nschema==0.7.0\nsix==1.12.0\nSQLAlchemy==1.3.7\nsty==1.0.0b11\nwcwidth==0.1.7\nzipp==0.5.2\npsycopg2==2.7.7\nsqlalchemy-redshift==0.7.3\n"
},
{
"alpha_fraction": 0.6121154427528381,
"alphanum_fraction": 0.6121154427528381,
"avg_line_length": 31.84375,
"blob_id": "e6fdaf41ba488af751f16f8ff02d00c5faf5062c",
"content_id": "1026549d9ae5b80e547b459347d8f2c24c91e4b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3153,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 96,
"path": "/rsm_lib/migration_lib/migration_runner.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nfrom rsm_lib.database.common import Session\nfrom rsm_lib.database.models import MIGRATION_STATES\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\nfrom rsm_lib.common import logger\nfrom rsm_lib.migration_lib.migration_parser import MigrationParser, MigrationSteps\n\n\nclass MigrationRunner:\n modes = ['up', 'down']\n\n def __init__(self, migration):\n self.db_session = Session()\n self.migration = migration\n\n def run(self, mode):\n\n # validate that we have passed a proper mode of operation\n if mode not in self.modes:\n raise ValueError(f'mode must be set to either up or down, not {mode}')\n\n parser = MigrationParser(self.migration.file_name)\n parsed_migration = parser.get_parsed_migration_from_file()\n\n base_path = parser.migration_path / mode\n steps = None\n\n if mode == 'up':\n steps = MigrationSteps(parsed_migration.up_steps)\n\n if mode == 'down':\n steps = MigrationSteps(parsed_migration.down_steps)\n\n steps_executed = []\n exception = ''\n\n for step in steps:\n\n search_path = base_path / step\n sql = search_path.read_text()\n\n try:\n logger.info(f\"running command {sql}\")\n\n self.db_session.execute(sql)\n self.db_session.commit()\n steps_executed.append(step)\n\n except OperationalError as e:\n logger.exception('An Operational Exception has occurred. Check logs.')\n self.db_session.rollback()\n exception = str(e)\n break\n\n except ProgrammingError as e:\n logger.exception('A programming error has occurred. Check logs.')\n self.db_session.rollback()\n exception = str(e)\n break\n\n self.migration.exception = exception\n self.migration.last_run_at = datetime.now()\n self.migration.steps_executed = str(steps_executed)\n\n if exception:\n\n if mode == 'up':\n self.migration.state = MIGRATION_STATES.applied_up_fail.value\n self.migration.state_name = MIGRATION_STATES.applied_up_fail.name\n\n if mode == 'down':\n self.migration.state = MIGRATION_STATES.applied_down_fail.value\n self.migration.state_name = MIGRATION_STATES.applied_down_fail.name\n\n else: # otherwise success\n\n if mode == 'up':\n self.migration.state = MIGRATION_STATES.applied_up_success.value\n self.migration.state_name = MIGRATION_STATES.applied_up_success.name\n\n if mode == 'down':\n self.migration.state = MIGRATION_STATES.applied_down_success.value\n self.migration.state_name = MIGRATION_STATES.applied_down_success.name\n\n self.db_session.add(self.migration)\n self.db_session.commit()\n\n\ndef run_migration_up(migration):\n runner = MigrationRunner(migration)\n runner.run('up')\n\n\ndef run_migration_down(migration):\n runner = MigrationRunner(migration)\n runner.run('down')\n"
},
{
"alpha_fraction": 0.7182539701461792,
"alphanum_fraction": 0.7182539701461792,
"avg_line_length": 26.66666603088379,
"blob_id": "6f853158f45cfcfb03d62b14082e31fffd1f6b65",
"content_id": "b0249036be5a503daea90443578fa74d943eaae1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 9,
"path": "/rsm_lib/settings.py",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "import os\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nenv_path = Path().cwd() / '.env'\n\nif env_path.exists():\n load_dotenv(env_path.as_posix())\n\nSQL_ALCHEMY_MIGRATION_DB_CONN_STRING = os.getenv('SQL_ALCHEMY_MIGRATION_DB_CONN_STRING')\n\n\n\n"
},
{
"alpha_fraction": 0.6557376980781555,
"alphanum_fraction": 0.6557376980781555,
"avg_line_length": 11.199999809265137,
"blob_id": "a1b501d9880dad37f33c84dd620d0c574b96718a",
"content_id": "fa9a9118958703c869e41bc1922e354e510130cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/rsm_lib/tests/test_migration_lib/fixtures/bad-migration/up/migration2.sql",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "-- put your migration here\nCREATE TABLE spam\n(\n id int\n);\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 21,
"blob_id": "64716de90a6ed8b0c1cfa2bad7bc054517462b1c",
"content_id": "669cdf31c103a0d91a44713c6f4f0307ec753e30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/rsm_lib/tests/test_migration_lib/fixtures/good-migration/down/migration7.sql",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "-- put your migration here\nDROP TABLE spam;\n"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.6399999856948853,
"avg_line_length": 24,
"blob_id": "ea4fe30ecebf66517e2a4370eea40e7075d03bf8",
"content_id": "42aed047fe9555d4e40e0960ff2a43111685e279",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 25,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 1,
"path": "/README.md",
"repo_name": "JesseMaitland/rsm",
"src_encoding": "UTF-8",
"text": "**A really great tool!**\n"
}
] | 25 |
xarion/frodo_experiment | https://github.com/xarion/frodo_experiment | 326499f23c29faddd70166a163a9ac47e17f88fb | 91ff9bf49ab749cc0e85b3e76a03a4a960c53886 | c70285da582445b219aee706b0cd2c6331cfb0e3 | refs/heads/master | 2016-08-08T20:53:57.905588 | 2015-10-18T09:31:08 | 2015-10-18T09:31:08 | 43,691,216 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5974025726318359,
"alphanum_fraction": 0.6233766078948975,
"avg_line_length": 13,
"blob_id": "6fe90d727456bb0a3d4b10c865ac65b90f5c95e1",
"content_id": "c85624c2d4d25e962ca7bd3fa685b0a19a81b88c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 11,
"path": "/app.py",
"repo_name": "xarion/frodo_experiment",
"src_encoding": "UTF-8",
"text": "from flask import Flask\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n return \"One ring to rule them all\"\n\n\napp.debug = True\napp.run('0.0.0.0')\n"
},
{
"alpha_fraction": 0.5377426147460938,
"alphanum_fraction": 0.5485262274742126,
"avg_line_length": 32.095237731933594,
"blob_id": "d11c482ff0b791fad457da9f510ff4cbea3872d7",
"content_id": "fd2413e0a70cb80cebd53470fd33b677c81bf897",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1391,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 42,
"path": "/download.py",
"repo_name": "xarion/frodo_experiment",
"src_encoding": "UTF-8",
"text": "import csv\nimport youtube_dl\nfrom subprocess import call\n\nwith open('music.csv', 'rb') as f:\n reader = csv.reader(f)\n music_list = list(reader)\n\nydl_opts = {'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': u\"%(id)s.%(ext)s\"\n }\n\nidentifier_counts = {}\nfiles = []\nwith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n call([\"mkdir\", \"out\"])\n for music in music_list:\n youtube_link = music[4]\n dat = ydl.download([youtube_link])\n start_at = music[5]\n end_at = music[6]\n culture = music[3] == \"Turkish\"\n popularity = music[2] == \"unknown\"\n feeling = music[1] == \"happy\"\n duration = str(int(end_at) - int(start_at))\n file_name = music[7] + \".mp3\"\n identifier = \"%d%d%d\" % (culture, popularity, feeling)\n if identifier not in identifier_counts:\n identifier_counts[identifier] = 1\n out_file_name = \"M%s%d.mp3\" % (identifier, identifier_counts[identifier])\n files.append(out_file_name)\n identifier_counts[identifier] += 1\n call([\"ffmpeg\", \"-y\", \"-ss\", start_at, \"-t\", duration, \"-i\", file_name, \"out/\" + out_file_name])\n call([\"rm\", file_name])\n\nfor each in files:\n print each\n\n"
}
] | 2 |
phani218/pyspark | https://github.com/phani218/pyspark | 199add09888fd4c535ec1949cd46513fb18831d6 | f5b1662682dfe9c42a62c535228b86df9e503a07 | 8f2797a70102032126c94bc830b6c9b72e47c55c | refs/heads/master | 2021-01-24T19:23:56.388229 | 2017-09-11T20:42:32 | 2017-09-11T20:42:32 | 82,969,729 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6728439331054688,
"alphanum_fraction": 0.6928011178970337,
"avg_line_length": 35.921051025390625,
"blob_id": "0056e5872ad3d8005403b0db395cf04fcd1cead2",
"content_id": "88cc6871bc68bd6b72e11cec0072ebe6071a5cf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1403,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 38,
"path": "/kafkaWindow.py",
"repo_name": "phani218/pyspark",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 06 23:34:10 2017\n\n@author: yoga.Phani\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport json\n\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\n#from pyspark.sql import SQLContext\n\ndef createContext():\n print(\"Creating new context\")\n sc = SparkContext(appName=\"PythonStreamingKafkaWordCount\")\n sc.setLogLevel(\"WARN\")\n ssc = StreamingContext(sc, 10)\n #sqlContext = SQLContext(sc)\n brokers, topic = sys.argv[1:]\n kvs = KafkaUtils.createDirectStream(ssc, [topic], {\"metadata.broker.list\": brokers})\n jsonstream=kvs.map(lambda x: json.loads(x[1])).map(lambda y : (y['netid'],y['src_sys']))\n jsonstream.pprint()\n jsonstream.countByValue().transform(lambda rdd:rdd.sortBy(lambda x:-x[1])).map(lambda x:\"counts this batch:\\tValue %s\\tCount %s\" % (x[0],x[1])).pprint()\n jsonstream.countByValueAndWindow(60,30).transform(lambda rdd:rdd.sortBy(lambda x:-x[1])).map(lambda x:\"Window counts this batch:\\tValue %s\\tCount %s\" % (x[0],x[1])).pprint()\n \n return ssc\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage: window.py <topic>\", file=sys.stderr)\n checkpoint='/hdfsproc/pyspark_checkpoint'\n ssc = StreamingContext.getOrCreate(checkpoint,lambda: createContext())\n ssc.start()\n ssc.awaitTermination()\n"
}
] | 1 |
kevinteng525/atm | https://github.com/kevinteng525/atm | 1f5080b9408692432f9e567917612570bbe4fbf4 | 06feea5ffae97f3346cac1fdd8b8b9a86f0a3978 | 6d297ede4f3efb9a86fc9698f11a8caf6e0d1582 | refs/heads/master | 2020-08-10T17:43:36.904167 | 2019-10-11T09:00:14 | 2019-10-11T09:00:14 | 214,388,582 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6380952596664429,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 51.5,
"blob_id": "a20a30a2b763c384d7badd6bcde0cde43f6f5ce3",
"content_id": "7ffe4ea6494543777ff52f3872130a35286b3d31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 2,
"path": "/sqls/ATM_Tag.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_Tag` VALUES (1, 'conv2d', NULL);\nINSERT INTO `ATM_Tag` VALUES (2, 'always_pass', NULL);\n"
},
{
"alpha_fraction": 0.8275862336158752,
"alphanum_fraction": 0.8275862336158752,
"avg_line_length": 57,
"blob_id": "c2ed72a155283996f741c0ed36a8fc160b8e5cb3",
"content_id": "cd31299caa2711f6fcd7f129c9a095dd46a546b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 1,
"path": "/ConfigModel/__init__.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "default_app_config = 'ConfigModel.apps.ConfigmodelConfig'\n"
},
{
"alpha_fraction": 0.6974790096282959,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 58.5,
"blob_id": "f144046c6d686147a48d494cde6dd205a5ae6950",
"content_id": "774aaafb5b4b736491d016bbd6555edc6ef0705e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 2,
"path": "/sqls/ATM_DLFramework.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_DLFramework` VALUES (1, 'tensorflow', NULL);\nINSERT INTO `ATM_DLFramework` VALUES (2, 'mxnet', NULL);\n"
},
{
"alpha_fraction": 0.6490166187286377,
"alphanum_fraction": 0.6490166187286377,
"avg_line_length": 22.64285659790039,
"blob_id": "fe3c5e57efa9bf4281bc51e1b1a223fa91e2d37d",
"content_id": "9967b78fc3cac5b7d49db5bf7a85b44df8169475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 661,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 28,
"path": "/migrations/6-rm_pseudo_from_cmd.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\n\ndef main():\n from TestModel.models import TestCmd, TestCase\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import Project\n from TestModel.models import TestType\n\n testCMDs = TestCmd.objects.all()\n for testCMD in testCMDs:\n cmdline = testCMD.cmdline\n print(cmdline)\n if \"-r pseudo\" in cmdline:\n new_cmdline = cmdline.replace(\" -r pseudo\", \"\")\n testCMD.cmdline = new_cmdline\n testCMD.save()\n\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.6297365427017212,
"alphanum_fraction": 0.6438109278678894,
"avg_line_length": 29.461538314819336,
"blob_id": "814d393ea5baab1fe72f823d1a03b8c1ec10ea88",
"content_id": "131f7ab02cf05f40ee59861ea566265a420a61b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2771,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 91,
"path": "/ConfigModel/models.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass Project(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True)\n is_delete = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_Project\"\n\nclass OSType(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=50, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_OSType\"\n\nclass Branch(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=50, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_Branch\"\n\nclass DLFramework(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=50, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_DLFramework\"\n\nclass Block(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=50, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_Block\"\n\nclass ConfigType(models.Model):\n name = models.CharField(max_length=50, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True, verbose_name=\"Description\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_ConfigType\"\n\nclass ConfigDetail(models.Model):\n name = models.CharField(max_length=20, unique=True)\n configtype = models.ForeignKey(to=\"ConfigType\", on_delete=models.PROTECT, verbose_name=\"Config Type\")\n desc = models.CharField(max_length=50, null=True, blank=True, verbose_name=\"Description\")\n content = models.TextField(max_length=1000, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_ConfigDetail\"\n verbose_name = \"Config Option\"\n verbose_name_plural = verbose_name\n\nclass ConfigPlan(models.Model):\n name = models.CharField(max_length=50, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True, verbose_name=\"Description\")\n config = models.ManyToManyField(to=\"ConfigModel.ConfigDetail\", blank=True)\n is_delete = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_ConfigPlan\"\n verbose_name = \"Config Plan\"\n verbose_name_plural = verbose_name"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.7050691246986389,
"avg_line_length": 53.25,
"blob_id": "a73a29284c979058be27936be05d4a887c3c06a2",
"content_id": "3ce28f66c59612b777a49756933acb27ee4ace20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 8,
"path": "/sqls/ATM_ConfigPlan_config.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_ConfigPlan_config` VALUES (1, 1, 1);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (3, 1, 4);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (4, 1, 6);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (2, 1, 10);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (5, 2, 1);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (7, 2, 4);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (8, 2, 6);\nINSERT INTO `ATM_ConfigPlan_config` VALUES (6, 2, 11);\n"
},
{
"alpha_fraction": 0.7424242496490479,
"alphanum_fraction": 0.7424242496490479,
"avg_line_length": 21,
"blob_id": "3e307c126690601b9623d7121fc06b0e4fc9531f",
"content_id": "01e2adaf5d15f6f7f9a4916888906df9cec83f17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 6,
"path": "/EnvModel/apps.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass EnvmodelConfig(AppConfig):\n name = 'EnvModel'\n verbose_name = \"Environment Manager\"\n"
},
{
"alpha_fraction": 0.6571428775787354,
"alphanum_fraction": 0.6761904954910278,
"avg_line_length": 51.5,
"blob_id": "f7d1499c20152e0be5e9791d65c54d8aa4d1a6e0",
"content_id": "c82653d2f8f86d7749ff5d31832b023af9a5194f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 2,
"path": "/sqls/ATM_RunMode.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_RunMode` VALUES (1, 'cmodel', NULL);\nINSERT INTO `ATM_RunMode` VALUES (2, 'npu', NULL);\n"
},
{
"alpha_fraction": 0.6766830682754517,
"alphanum_fraction": 0.6882404088973999,
"avg_line_length": 37.89887619018555,
"blob_id": "ad3f8162617535caa957295a67e5db51a3f9a702",
"content_id": "f42b4c8465b2ebc0c517d3cb94dda1632d612f02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3461,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 89,
"path": "/BuildModel/models.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\n# cmodel / npu\nclass RunMode(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=50, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_RunMode\"\n\n# rel / dbg\nclass BuildMode(models.Model):\n name = models.CharField(max_length=50, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_BuildMode\"\n\nclass BuildProject(models.Model):\n name = models.CharField(max_length=50, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_BuildProject\"\n\nclass BuildDependency(models.Model):\n buildproject = models.ForeignKey(to=\"BuildProject\", on_delete=models.PROTECT, related_name=\"tobuild\", unique=True, verbose_name=\"Project\")\n dir = models.CharField(max_length=50, null=True, blank=True, verbose_name=\"Directory\")\n dependprojects = models.ManyToManyField(to=\"BuildProject\", blank=True, related_name=\"depends\", verbose_name=\"Depends\")\n\n def __str__(self):\n return self.buildproject.name\n\n class Meta:\n db_table = \"ATM_BuildDependency\"\n verbose_name = \"Project Dependency\"\n verbose_name_plural = verbose_name\n\nclass BuildDetail(models.Model):\n BUILDTYPES = (\n ('source', 'source'),\n ('package', 'package')\n )\n name = models.CharField(max_length=50, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True, verbose_name=\"Description\")\n buildproject = models.ForeignKey(to=\"BuildProject\", on_delete=models.PROTECT, verbose_name=\"Project\")\n buildmode = models.ForeignKey(to=\"BuildMode\", on_delete=models.PROTECT, null=True, blank=True, verbose_name=\"Build Mode\")\n buildtype = models.CharField(max_length=20, choices=BUILDTYPES, default=\"source\", verbose_name=\"Build Type\")\n runmode = models.ForeignKey(to=\"RunMode\", on_delete=models.PROTECT, null=True, blank=True, verbose_name=\"Run Mode\")\n precondition = models.TextField(max_length=1000, null=True, blank=True, verbose_name=\"Pre-condition\")\n options = models.TextField(max_length=1000, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_BuildDetail\"\n verbose_name = \"Build Option\"\n verbose_name_plural = verbose_name\n\nclass BuildPlan(models.Model):\n name = models.CharField(max_length=50, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True, verbose_name=\"Description\")\n version = models.CharField(max_length=20, default=\"latest\")\n project = models.ForeignKey(to=\"ConfigModel.Project\", on_delete=models.PROTECT)\n branch = models.ForeignKey(to=\"ConfigModel.Branch\", on_delete=models.PROTECT)\n ostype = models.ForeignKey(to=\"ConfigModel.OSType\", on_delete=models.PROTECT, verbose_name=\"OS Type\")\n gcc_version = models.CharField(max_length=50, null=True, blank=True)\n archived = models.BooleanField(default=False)\n buildconfig = models.ManyToManyField(to=\"BuildDetail\", verbose_name=\"Build Config\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_BuildPlan\"\n verbose_name = \"Build Plan\"\n verbose_name_plural = verbose_name"
},
{
"alpha_fraction": 0.6642335653305054,
"alphanum_fraction": 0.6934306621551514,
"avg_line_length": 67.5,
"blob_id": "a54b5afb85079271bcd4b89ab36d18bde217c408",
"content_id": "d81fb1688d711cf3dcc6c9ad6e0e42b411c08ae9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 2,
"path": "/sqls/ATM_ConfigPlan.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_ConfigPlan` VALUES (1, 'ut_default_tf', NULL, 0);\nINSERT INTO `ATM_ConfigPlan` VALUES (2, 'ut_default_mxnet', NULL, 0);\n"
},
{
"alpha_fraction": 0.5615823864936829,
"alphanum_fraction": 0.5754486322402954,
"avg_line_length": 32.14864730834961,
"blob_id": "f82ddb51cdd3385a7b645e9d0d8e92fc7b3f6c19",
"content_id": "93d69eef398a0c6e14feda387153fd74a512e1b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2452,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 74,
"path": "/migrations/1-testcmd.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestCmd\n from ConfigModel.models import Block\n from ConfigModel.models import Project\n from TestModel.models import TestType\n\n blocks = {\n 'run_block_chub_test': 'chub',\n 'run_block_matmul_test': 'matmul',\n 'run_block_vop_test': 'vop',\n 'run_block_convEw_test': 'convew',\n 'run_block_cp_test': 'cp',\n 'run_block_interp_test': 'interp',\n 'run_block_lm_test': 'lm',\n 'run_block_pbu_test': 'pbu',\n 'run_block_random_test': 'cdu',\n 'run_block_rbu_test': 'rbu',\n 'run_block_roi_test': 'roi',\n 'run_block_se_test': 'se',\n 'run_top_npu_test': 'top',\n 'run_infer_checkin_test': 'infer_checkin',\n 'run_kaleido_network_checkin_test': 'kaleido_network',\n 'run_kaleido_v2_checkin_test': 'kaleido_v2',\n 'run_on_demand_test': 'on_demand',\n 'run_ratelnn_checkin_test': 'ratelnn_checkin'\n }\n\n CmdList = []\n namelist = []\n data = csv.reader(open('TF_CASE_GEN_TABLE.csv', 'r'))\n\n for line in data:\n parts = line\n name = parts[4]\n cmdline = parts[8]\n block_name = blocks[parts[5]]\n branch = parts[18]\n if branch == 'master':\n continue\n if name in namelist:\n continue\n namelist.append(name)\n block = Block.objects.get(name = block_name)\n project = Project.objects.get(id=1)\n testtype = TestType.objects.get(id=1)\n # change priority from P1, P2, P3 to 1, 2, 3, and if N/A, then set to 2\n priority = parts[7] if parts[7]!='N/A' else 2\n if priority != 2:\n priority = int(priority[1:])\n # if is_checkin, then set priority to 1\n is_checkin = parts[15]\n if is_checkin == \"1\":\n priority = 0\n\n comment = parts[28]\n testCMD = TestCmd.objects.filter(name=name)\n if len(testCMD) == 0:\n print(\"name: {0} cmdline: {1} block: {2} branch: {3} priority: {4}\".format(name, cmdline, block.name, branch, priority))\n CmdList.append(TestCmd(name=name, cmdline=cmdline, block=block, project=project, testtype=testtype, priority=priority, comment=comment))\n\n\n TestCmd.objects.bulk_create(CmdList)\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.6918465495109558,
"alphanum_fraction": 0.714628279209137,
"avg_line_length": 58.57143020629883,
"blob_id": "3e845413f6fe75c12ddfdb9de38472a843f50f25",
"content_id": "e42adde38a15b1020ef9147356861ce2b9d91b3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 834,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 14,
"path": "/sqls/ATM_BuildProject.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_BuildProject` VALUES (1, 'cmodel', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (2, 'umd', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (3, 'infer', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (4, 'ratelrt', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (5, 'drm', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (6, 'ratelnn', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (7, 'tensorflow', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (8, 'mxnet', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (9, 'caffe', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (10, 'kaleido', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (11, 'agraph', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (12, 'quantize', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (13, 'deepspeech', NULL);\nINSERT INTO `ATM_BuildProject` VALUES (14, 'npulp', NULL);\n"
},
{
"alpha_fraction": 0.709601879119873,
"alphanum_fraction": 0.709601879119873,
"avg_line_length": 39.64285659790039,
"blob_id": "141d9d0edf865a7145a47e0c52ac052825174152",
"content_id": "5016d96a96b53f5e9a57b2681ba60a7226a8ed05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1708,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 42,
"path": "/TestModel/serializers.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom . import models\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = ('id','name', 'desc', 'is_delete',)\n model = models.Project\n\nclass ProjectRelatedField(serializers.RelatedField):\n def to_representation(self, value):\n return value.name\n\nclass TestStepSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n project = serializers.PrimaryKeyRelatedField(many=False, queryset=models.Project.objects.all())\n # testplan = serializers.PrimaryKeyRelatedField(many=True, queryset=models.TestCase.testplan_set)\n\n class Meta:\n fields = ('id','name', 'cmdline', 'block', 'project', 'testtype',)\n model = models.TestCmd\n\nclass TestPlanSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n project = serializers.PrimaryKeyRelatedField(many=False, queryset=models.Project.objects.all())\n testcase = serializers.PrimaryKeyRelatedField(many=True, queryset=models.TestCase.objects.all())\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = models.TestPlan\n\nclass TestCaseSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n testcmd = serializers.PrimaryKeyRelatedField(many=False, queryset=models.TestCmd.objects.all())\n # testplan = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n\n class Meta:\n # fields = ('id','name', 'testcmd','testplan',)\n fields = '__all__'\n model = models.TestCase\n\n"
},
{
"alpha_fraction": 0.7164179086685181,
"alphanum_fraction": 0.7611940503120422,
"avg_line_length": 66,
"blob_id": "ea360f86bb6c9e977e8e825eb076ab7f73fe6e98",
"content_id": "e2138b664ce1f40dba5c04b01459292e88f60ed1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 469,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 7,
"path": "/sqls/ATM_BuildDependency_dependprojects.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_BuildDependency_dependprojects` VALUES (1, 5, 1);\nINSERT INTO `ATM_BuildDependency_dependprojects` VALUES (2, 5, 2);\nINSERT INTO `ATM_BuildDependency_dependprojects` VALUES (3, 6, 3);\nINSERT INTO `ATM_BuildDependency_dependprojects` VALUES (4, 7, 3);\nINSERT INTO `ATM_BuildDependency_dependprojects` VALUES (5, 7, 4);\nINSERT INTO `ATM_BuildDependency_dependprojects` VALUES (6, 8, 6);\nINSERT INTO `ATM_BuildDependency_dependprojects` VALUES (7, 9, 6);\n"
},
{
"alpha_fraction": 0.6202428936958313,
"alphanum_fraction": 0.6283400654792786,
"avg_line_length": 29.14634132385254,
"blob_id": "f2f94447c4e1018de025bd4a366873533da1b37a",
"content_id": "51628f85d5f25f867b09cdf21fe9fb295103dc57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1235,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 41,
"path": "/migrations/0-configplan.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestCmd\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import ConfigDetail\n from TestModel.models import TestType\n\n ConfigPlanList = []\n namelist = []\n data = csv.reader(open('config_plan.csv', 'r', encoding=\"utf-8-sig\"))\n\n for line in data:\n parts = line\n name = parts[0]\n desc = parts[2]\n configPlans = ConfigPlan.objects.filter(name=name)\n if len(configPlans) == 0:\n configPlan = ConfigPlan.objects.create(name=name, desc=desc, is_delete=False)\n else:\n configPlan = configPlans[0]\n config_options = parts[1]\n config_options_list = config_options.split(' ')\n for config_option in config_options_list:\n configOptions = ConfigDetail.objects.filter(name=config_option)\n if len(configOptions) != 0:\n configPlan.config.add(configOptions[0])\n configPlan.save()\n\n print(\"name: {0} config_options: {1}\".format(name, config_options))\n\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.7384615540504456,
"alphanum_fraction": 0.7384615540504456,
"avg_line_length": 20.66666603088379,
"blob_id": "416fc7e564b4656c4a3f4c134160017ef2493a0b",
"content_id": "bc84fd6117ec78468c28bcb5ac980583acf3287e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 130,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/BuildModel/apps.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass BuildmodelConfig(AppConfig):\n name = 'BuildModel'\n verbose_name = \"Build Manager\"\n"
},
{
"alpha_fraction": 0.6041769981384277,
"alphanum_fraction": 0.615614116191864,
"avg_line_length": 38.45098114013672,
"blob_id": "77473ed48315620f05008393ca28e81401c0b388",
"content_id": "910485676d26bd2d35d2c49fd03caf3099788363",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2011,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 51,
"path": "/migrations/8-update_checkin_test.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestCmd\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import ConfigDetail\n from TestModel.models import TestType\n from TestModel.models import TestCase\n from TestModel.models import TestPlan\n\n CheckinFailureList = []\n namelist = []\n tf_checkin_test_plans = [\"v1_checkin_test_tf_fixdata_graph_opt\", \"v1_checkin_test_tf_random_comp_graph_opt\", \"v1_checkin_test_tf_random_nocomp_graph_opt\"]\n mxnet_checkin_test_plans = [\"v1_checkin_test_mxnet_fixdata_graph_opt\", \"v1_checkin_test_mxnet_random_comp_graph_opt\", \"v1_checkin_test_mxnet_random_nocomp_graph_opt\"]\n\n data = csv.reader(open('checkin_fail.csv', 'r', encoding=\"utf-8-sig\"))\n\n for line in data:\n parts = line\n name = parts[0]\n if \"_tensorflow\" in name:\n case_name = name[:-11]\n testCases = TestCase.objects.filter(name=case_name)\n if(len(testCases)>0):\n for testplan_name in tf_checkin_test_plans:\n testPlans = TestPlan.objects.filter(name=testplan_name)\n testCases[0].testplan_set.remove(testPlans[0])\n print(\"remove testplan {0} from case: {1}\".format(testplan_name, case_name))\n testCases[0].save()\n elif \"_mxnet\" in name:\n case_name = name[:-6]\n testCases = TestCase.objects.filter(name=case_name)\n if (len(testCases) > 0):\n for testplan_name in mxnet_checkin_test_plans:\n testPlans = TestPlan.objects.filter(name=testplan_name)\n testCases[0].testplan_set.remove(testPlans[0])\n print(\"remove testplan {0} from case: {1}\".format(testplan_name, case_name))\n testCases[0].save()\n else:\n pass\n\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.704081654548645,
"alphanum_fraction": 0.704081654548645,
"avg_line_length": 23.58333396911621,
"blob_id": "713b1d8d56c0ce6c8964a5c187662082b4bfd740",
"content_id": "49629be7c0b6f2e6eb0f4a6fcce345e64e150443",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 12,
"path": "/EnvModel/admin.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom EnvModel.models import *\n\n# Register your models here.\nclass DockerImage_Admin(admin.ModelAdmin):\n list_filter = ('name', 'repo', 'tag')\n list_display = ('name', 'repo', 'tag')\n\n save_as = True\n\n\nadmin.site.register(DockerImage, DockerImage_Admin)"
},
{
"alpha_fraction": 0.5849056839942932,
"alphanum_fraction": 0.6415094137191772,
"avg_line_length": 52,
"blob_id": "527c516ea66603e2a81cbf70a1901e46e73e457e",
"content_id": "0e287198f3849658e0c56db16f77c303522a776f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 2,
"path": "/sqls/ATM_Project.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_Project` VALUES (1, 'v1', NULL, 0);\nINSERT INTO `ATM_Project` VALUES (2, 'v2', NULL, 0);\n"
},
{
"alpha_fraction": 0.6727272868156433,
"alphanum_fraction": 0.6909090876579285,
"avg_line_length": 54,
"blob_id": "e3488d07707a91b7cab69420bef8a53ccd9c999a",
"content_id": "9a583ead913b8a5f98e300837b33db91b43c37b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 3,
"path": "/sqls/ATM_BuildMode.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_BuildMode` VALUES (1, 'rel', NULL);\nINSERT INTO `ATM_BuildMode` VALUES (2, 'dbg', NULL);\nINSERT INTO `ATM_BuildMode` VALUES (3, 'fastbuild', NULL);\n"
},
{
"alpha_fraction": 0.5982742309570312,
"alphanum_fraction": 0.6088207364082336,
"avg_line_length": 41.283782958984375,
"blob_id": "4c6a253679fe93998b3413dc7199775eaba7355e",
"content_id": "32e89cd59bcb003fbebc3b0a75b25af4dc2ad427",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3129,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 74,
"path": "/BuildModel/migrations/0002_auto_20190807_0753.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-07 07:53\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('BuildModel', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='builddependency',\n name='buildproject',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='tobuild', to='BuildModel.BuildProject', unique=True, verbose_name='Project'),\n ),\n migrations.AlterField(\n model_name='builddependency',\n name='dependprojects',\n field=models.ManyToManyField(blank=True, related_name='depends', to='BuildModel.BuildProject', verbose_name='Depends'),\n ),\n migrations.AlterField(\n model_name='builddependency',\n name='dir',\n field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Directory'),\n ),\n migrations.AlterField(\n model_name='builddetail',\n name='buildmode',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.BuildMode', verbose_name='Build Mode'),\n ),\n migrations.AlterField(\n model_name='builddetail',\n name='buildproject',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='BuildModel.BuildProject', verbose_name='Project'),\n ),\n migrations.AlterField(\n model_name='builddetail',\n name='buildtype',\n field=models.CharField(choices=[('source', 'source'), ('package', 'package')], default='source', max_length=20, verbose_name='Build Type'),\n ),\n migrations.AlterField(\n model_name='builddetail',\n name='desc',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='builddetail',\n name='precondition',\n field=models.TextField(blank=True, max_length=1000, null=True, verbose_name='Pre-condition'),\n ),\n migrations.AlterField(\n model_name='builddetail',\n name='runmode',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.RunMode', verbose_name='Run Mode'),\n ),\n migrations.AlterField(\n model_name='buildplan',\n name='buildconfig',\n field=models.ManyToManyField(to='BuildModel.BuildDetail', verbose_name='Build Config'),\n ),\n migrations.AlterField(\n model_name='buildplan',\n name='desc',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='buildplan',\n name='ostype',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.OSType', verbose_name='OS Type'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5872092843055725,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 85,
"blob_id": "b39bdd21792c78c99e415f04819662d5b891d2fd",
"content_id": "beb1ba5a5767f5be9038c428e3ca77a286bb8233",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 344,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 4,
"path": "/sqls/ATM_TestPlan.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_TestPlan` VALUES (1, 'tf_daily_graph_opt', NULL, 0, 2, 1, 1, NULL);\nINSERT INTO `ATM_TestPlan` VALUES (2, 'tf_checkin_graph_opt', NULL, 0, 2, 1, 1, 2);\nINSERT INTO `ATM_TestPlan` VALUES (3, 'mxnet_daily_graph_opt', NULL, 0, 2, 2, 1, NULL);\nINSERT INTO `ATM_TestPlan` VALUES (4, 'mxnet_checkin_graph_opt', NULL, 0, 2, 2, 1, 1);\n"
},
{
"alpha_fraction": 0.6024554967880249,
"alphanum_fraction": 0.6078160405158997,
"avg_line_length": 50.64285659790039,
"blob_id": "7d19847d4732a40bd3c08cb74ee15ecd9e3e0057",
"content_id": "76b4eef27fec2dc02c7cf7f18d282f747a906398",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5783,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 112,
"path": "/migrations/3-testplan.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\nimport collections\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestPlan\n from TestModel.models import TestCase\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import ConfigDetail\n from ConfigModel.models import Project\n from ConfigModel.models import DLFramework\n from ConfigModel.models import Branch\n from TestModel.models import TestType\n\n TestPlanTuple = collections.namedtuple('testplan_name', ['project', 'dl_framework', 'branch', 'top_config', 'case_filter'])\n testplan_list = [\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='dump_mode_tf',\n case_filter='daily_test'),\n ]\n\n for testplan in testplan_list:\n project = testplan.project\n dl_framwork = testplan.dl_framework\n branch = testplan.branch\n top_config = testplan.top_config\n case_filter = testplan.case_filter\n testplan_name = \"{0}_{1}_{2}_{3}\".format(project, case_filter, top_config, branch)\n desc = \"Project: {0}; Test Purpose: {1}; Framework: {2}; Branch: {3}; Config: {4}\".format(project, case_filter, dl_framwork, branch, top_config)\n projectObject = Project.objects.get(name=project)\n dlframeworkObject = DLFramework.objects.get(name=dl_framwork)\n branchObject = Branch.objects.get(name=branch)\n configObject = ConfigPlan.objects.get(name=top_config)\n\n testPlans = TestPlan.objects.filter(name=testplan_name)\n if (len(testPlans) == 0):\n testPlan = TestPlan(name=testplan_name, desc=desc, project=projectObject, branch=branchObject, dlframework=dlframeworkObject,\n config=configObject)\n testPlan.save()\n else:\n testPlan = testPlans[0]\n\n\n if case_filter == 'daily_test':\n testCaseList = TestCase.objects.filter()\n elif case_filter == 'checkin_test':\n testCaseList = TestCase.objects.filter(priority='0')\n else:\n testCaseList = TestCase.objects.all()\n for testCase in testCaseList:\n testcase_name = testCase.name\n if dl_framwork == 'tensorflow':\n if 'mxnet' in testcase_name:\n pass\n # testPlan.testcase.remove(testCase)\n else:\n testPlan.testcase.add(testCase)\n elif dl_framwork == 'mxnet':\n if 'tf' in testcase_name:\n pass\n # testPlan.testcase.remove(testCase)\n else:\n testPlan.testcase.add(testCase)\n else:\n pass\n\n testPlan.save()\n\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.5078533887863159,
"alphanum_fraction": 0.5724258422851562,
"avg_line_length": 23.913043975830078,
"blob_id": "e8ef1b8c2604c1e28f400805d21bbc3faf470985",
"content_id": "8427d881b63adb2eb4618643a3051317393f712f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 23,
"path": "/TestModel/migrations/0008_auto_20191009_0203.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-10-09 02:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0007_auto_20190826_0814'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='testcase',\n name='tolerance',\n field=models.CharField(default='0', max_length=20),\n ),\n migrations.AlterField(\n model_name='testplan',\n name='name',\n field=models.CharField(max_length=100, unique=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5764706134796143,
"alphanum_fraction": 0.6352941393852234,
"avg_line_length": 84,
"blob_id": "3b88005135ff9f36840e06fb1c5ba06c12c938b9",
"content_id": "c072da3e31a48d0e13af7798b5d4882d64bdbd71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 1,
"path": "/sqls/ATM_BuildPlan.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_BuildPlan` VALUES (1, 'default', NULL, 'latest', NULL, 0, 2, 1, 1);\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 51,
"blob_id": "384508608c5c87d67c13568fa71d7c0117db4474",
"content_id": "156d665b4f5a991215ff09ff69877953ae099b48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/EnvModel/__init__.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "default_app_config = 'EnvModel.apps.EnvmodelConfig'\n"
},
{
"alpha_fraction": 0.6220666170120239,
"alphanum_fraction": 0.6288084983825684,
"avg_line_length": 53.70922088623047,
"blob_id": "f5e1da2ad40a865830d31c6260df8f2e251e4176",
"content_id": "3f6f479290637da9e2b59a72f70a175abcaa6d6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7713,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 141,
"path": "/migrations/7-llvm_testplan.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\nimport collections\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestPlan\n from TestModel.models import TestCase\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import ConfigDetail\n from ConfigModel.models import Project\n from ConfigModel.models import DLFramework\n from ConfigModel.models import Branch\n from TestModel.models import TestType\n\n TestPlanTuple = collections.namedtuple('testplan_name', ['project', 'dl_framework', 'branch', 'top_config', 'case_filter'])\n testplan_list = [\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='dump_mode_tf',\n case_filter='daily_test'),\n ]\n\n native_llvm_testplan_list = [\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='native_llvm_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='native_llvm_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='native_llvm_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='native_llvm_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='native_llvm_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='native_llvm_test'),\n ]\n\n # rm native llvm cases from common test plans\n for testplan in testplan_list:\n project = testplan.project\n dl_framwork = testplan.dl_framework\n branch = testplan.branch\n top_config = testplan.top_config\n case_filter = testplan.case_filter\n testplan_name = \"{0}_{1}_{2}_{3}\".format(project, case_filter, top_config, branch)\n desc = \"Project: {0}; Test Purpose: {1}; Framework: {2}; Branch: {3}; Config: {4}\".format(project, case_filter, dl_framwork, branch, top_config)\n projectObject = Project.objects.get(name=project)\n dlframeworkObject = DLFramework.objects.get(name=dl_framwork)\n branchObject = Branch.objects.get(name=branch)\n configObject = ConfigPlan.objects.get(name=top_config)\n\n testPlans = TestPlan.objects.filter(name=testplan_name)\n if (len(testPlans) == 0):\n testPlan = TestPlan(name=testplan_name, desc=desc, project=projectObject, branch=branchObject, dlframework=dlframeworkObject,\n config=configObject)\n testPlan.save()\n else:\n testPlan = testPlans[0]\n\n llvmCaseList = TestCase.objects.filter(name__icontains=\"llvm\")\n\n for testCase in llvmCaseList:\n print(\"rm {0} from {1}\".format(testCase.name, testPlan.name))\n if len(testPlan.testcase.filter(name=testCase.name)) > 0:\n testPlan.testcase.remove(testCase)\n\n testPlan.save()\n\n # add native llvm cases into native llvm test plan\n for testplan in native_llvm_testplan_list:\n project = testplan.project\n dl_framwork = testplan.dl_framework\n branch = testplan.branch\n top_config = testplan.top_config\n case_filter = testplan.case_filter\n testplan_name = \"{0}_{1}_{2}_{3}\".format(project, case_filter, top_config, branch)\n desc = \"Project: {0}; Test Purpose: {1}; Framework: {2}; Branch: {3}; Config: {4}\".format(project, case_filter, dl_framwork, branch, top_config)\n projectObject = Project.objects.get(name=project)\n dlframeworkObject = DLFramework.objects.get(name=dl_framwork)\n branchObject = Branch.objects.get(name=branch)\n configObject = ConfigPlan.objects.get(name=top_config)\n\n testPlans = TestPlan.objects.filter(name=testplan_name)\n if (len(testPlans) == 0):\n testPlan = TestPlan(name=testplan_name, desc=desc, project=projectObject, branch=branchObject, dlframework=dlframeworkObject,\n config=configObject)\n testPlan.save()\n else:\n testPlan = testPlans[0]\n\n llvmCaseList = TestCase.objects.filter(name__icontains=\"llvm\")\n\n for testCase in llvmCaseList:\n print(\"add {0} into {1}\".format(testCase.name, testPlan.name))\n testPlan.testcase.add(testCase)\n\n testPlan.save()\n\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.6597234010696411,
"alphanum_fraction": 0.6703882813453674,
"avg_line_length": 44.1278190612793,
"blob_id": "9f7daf0c28bd2b2c8dc5711ab043a1ecbc150298",
"content_id": "564ff2cd1cdc236a2faa58f958b01079bf7d3591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6001,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 133,
"path": "/TestModel/models.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\n# unit test or model test\nclass TestType(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=50, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_TestType\"\n verbose_name = \"Test Type\"\n verbose_name_plural = verbose_name\n\nclass Tag(models.Model):\n name = models.CharField(max_length=20, unique=True)\n desc = models.CharField(max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_Tag\"\n\nclass TestCmd(models.Model):\n name = models.CharField(max_length=50, unique=True)\n cmdline = models.TextField(max_length=1000)\n block = models.ForeignKey(to=\"ConfigModel.Block\", on_delete=models.PROTECT, verbose_name=\"Block\")\n project = models.ForeignKey(to=\"ConfigModel.Project\", on_delete=models.PROTECT, verbose_name=\"Project\")\n testtype = models.ForeignKey(to=\"TestType\", on_delete=models.PROTECT, verbose_name=\"Test Type\")\n priority = models.SmallIntegerField(default=2, verbose_name=\"Priority\")\n tag = models.ManyToManyField(to=\"Tag\", verbose_name=\"Tag\")\n comment = models.CharField(max_length=100, null=True, blank=True)\n is_delete = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_TestCmd\"\n verbose_name = \"Test Command\"\n verbose_name_plural = verbose_name\n\nclass TestCase(models.Model):\n name = models.CharField(max_length=100, unique=True, verbose_name=\"Name\")\n desc = models.CharField(max_length=500, null=True, blank=True, verbose_name=\"Description\")\n testcmd = models.ForeignKey(to=\"TestCmd\", on_delete=models.PROTECT, verbose_name=\"Commandline\")\n precondition = models.TextField(max_length=1000, null=True, blank=True)\n config = models.ForeignKey(to=\"ConfigModel.ConfigPlan\", on_delete=models.PROTECT, null=True, blank=True)\n priority = models.SmallIntegerField(default=2, verbose_name=\"Priority\")\n tolerance = models.CharField(max_length=20, default=\"0\")\n timeout = models.IntegerField(default=0)\n exectime = models.FloatField(default=0, verbose_name=\"Execution Time\")\n created = models.DateField(auto_now_add=True)\n updated = models.DateField(auto_now=True)\n is_delete = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_TestCase\"\n verbose_name = \"Test Case\"\n verbose_name_plural = verbose_name\n\nclass TestPlan(models.Model):\n name = models.CharField(max_length=100, unique=True)\n desc = models.CharField(max_length=500, null=True, blank=True, verbose_name=\"Description\")\n project = models.ForeignKey(to=\"ConfigModel.Project\", on_delete=models.PROTECT, verbose_name=\"Project\")\n branch = models.ForeignKey(to=\"ConfigModel.Branch\", on_delete=models.PROTECT, verbose_name=\"Branch\")\n dlframework = models.ForeignKey(to=\"ConfigModel.DLFramework\", on_delete=models.PROTECT, verbose_name=\"DL Framework\")\n runmode = models.ForeignKey(to=\"BuildModel.RunMode\", on_delete=models.PROTECT, null=True, blank=True, verbose_name=\"Run Mode\")\n testcase = models.ManyToManyField(to=\"TestCase\",blank=True)\n config = models.ForeignKey(to=\"ConfigModel.ConfigPlan\", on_delete=models.PROTECT, null=True, blank=True)\n #testcase = models.ManyToManyField(to=\"TestCase\", through='TestPlan_testcase')\n is_delete = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_TestPlan\"\n verbose_name = \"Test Plan\"\n verbose_name_plural = verbose_name\n\n# class TestPlan_testcase(models.Model):\n# testplan = models.ForeignKey(to=\"TestPlan\", on_delete=models.PROTECT)\n# testcase = models.ForeignKey(to=\"TestCase\", on_delete=models.PROTECT)\n# is_enabled = models.SmallIntegerField(default=1)\n#\n# def __str__(self):\n# return \"{0}-{1}\".format(self.testplan, self.testcase)\n#\n# class Meta:\n# db_table = \"ATM_TestPlan_testcase\"\n\nclass TestJob(models.Model):\n name = models.CharField(max_length=50, unique=True)\n project = models.ForeignKey(to=\"ConfigModel.Project\", on_delete=models.PROTECT, null=True, blank=True, default=None, verbose_name=\"Project\")\n buildplan = models.ForeignKey(to=\"BuildModel.BuildPlan\", on_delete=models.PROTECT, null=True, blank=True, default=None)\n testplan = models.ManyToManyField(to=\"TestPlan\",blank=True)\n env = models.ForeignKey(to=\"EnvModel.DockerImage\", on_delete=models.PROTECT, null=True, blank=True, default=None, verbose_name=\"Environment\")\n runMode = models.ForeignKey(to=\"BuildModel.RunMode\", on_delete=models.PROTECT, null=True, blank=True, default=None, verbose_name=\"Run Mode\")\n parent = models.ForeignKey(to='self', null=True, blank=True, default=None, on_delete=models.PROTECT, verbose_name=\"Parent Job\")\n created = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_TestJob\"\n verbose_name = \"Test Job\"\n verbose_name_plural = verbose_name\n#\n# class TestReport(models.Model):\n# task = models.CharField(max_length=50)\n# project = models.CharField(max_length=50)\n# testplan = models.CharField(max_length=100)\n# testenv = models.CharField(max_length=50)\n# testcase = models.CharField(max_length=100)\n# result = models.CharField(max_length=20)\n# failuretype = models.CharField(max_length=20)\n# testtime = models.DateField(auto_now_add=True)\n#\n#\n# def __str__(self):\n# return \"{0}_{1}\".format(self.task + self.testtime)\n#\n# class Meta:\n# db_table = \"ATM_TestReport\""
},
{
"alpha_fraction": 0.8214285969734192,
"alphanum_fraction": 0.8214285969734192,
"avg_line_length": 55,
"blob_id": "1cd06095b36b80c5dea59a6c497ceaff1278f2d1",
"content_id": "054042e19539b440112216f573ef191179df9d0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 1,
"path": "/BuildModel/__init__.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "default_app_config = 'BuildModel.apps.BuildmodelConfig'\n"
},
{
"alpha_fraction": 0.5041322112083435,
"alphanum_fraction": 0.64462810754776,
"avg_line_length": 79.66666412353516,
"blob_id": "45040031117678b56664c2dceaed70c9b780ebc9",
"content_id": "5e4da537b27f7ad729204a436a05a24728a6560e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 484,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 6,
"path": "/sqls/ATM_TestJob.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_TestJob` VALUES (1, 'tf_daily_test', '2019-07-30', 1, 6, 1);\nINSERT INTO `ATM_TestJob` VALUES (2, 'tf_checkin_test', '2019-07-30', 1, NULL, 1);\nINSERT INTO `ATM_TestJob` VALUES (3, 'mxnet_daily_test', '2019-07-30', 1, 6, 1);\nINSERT INTO `ATM_TestJob` VALUES (4, 'mxnet_checkin_test', '2019-07-30', 1, NULL, 1);\nINSERT INTO `ATM_TestJob` VALUES (5, 'daily_test', '2019-07-30', 1, 6, 1);\nINSERT INTO `ATM_TestJob` VALUES (6, 'whole_test', '2019-07-30', NULL, NULL, 1);\n"
},
{
"alpha_fraction": 0.6030238270759583,
"alphanum_fraction": 0.6059869527816772,
"avg_line_length": 32.23737335205078,
"blob_id": "d303e025cb1f216cdbf7af0c511fa97c17b8b588",
"content_id": "a35cc41616f24b20664c44deae6522693c2e937c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13162,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 396,
"path": "/api/views.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom rest_framework import generics, viewsets, filters\nfrom rest_framework.pagination import PageNumberPagination\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom TestModel.models import *\nfrom BuildModel.models import BuildPlan, BuildDependency, BuildProject, BuildDetail\nfrom ConfigModel.models import Project, Branch, DLFramework, Block, ConfigDetail, ConfigPlan\nfrom .serializers import *\nfrom django_filters.rest_framework import DjangoFilterBackend\n\n# Create your views here.\nclass StandardPageNumberPagination(PageNumberPagination):\n page_size_query_param = 'page_size'\n max_page_size = 10\n\nclass ProjectViewSet(viewsets.ModelViewSet):\n queryset = Project.objects.all()\n serializer_class = ProjectSerializer\n\nclass BranchViewSet(viewsets.ModelViewSet):\n queryset = Branch.objects.all()\n serializer_class = BranchSerializer\n\nclass DLFrameworkViewSet(viewsets.ModelViewSet):\n queryset = DLFramework.objects.all()\n serializer_class = DLFrameworkSerializer\n\nclass BlockViewSet(viewsets.ModelViewSet):\n queryset = Block.objects.all()\n serializer_class = BlockSerializer\n\nclass ConfigDetailViewSet(viewsets.ModelViewSet):\n queryset = ConfigDetail.objects.all()\n serializer_class = ConfigDetailSerializer\n\nclass ConfigPlanViewSet(viewsets.ModelViewSet):\n queryset = ConfigPlan.objects.all()\n serializer_class = ConfigPlanSerializer\n\nclass TestTypeViewSet(viewsets.ModelViewSet):\n queryset = TestType.objects.all()\n serializer_class = TestTypeSerializer\n\nclass TagViewSet(viewsets.ModelViewSet):\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n\n# with filter, search, pagination\nclass TestCmdViewSet(viewsets.ModelViewSet):\n queryset = TestCmd.objects.all()\n serializer_class = TestCmdSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filter_fields = ('name','block','project','priority','tag')\n search_fields = ['name']\n pagination_class = StandardPageNumberPagination\n\n# with filter, search and ordering\nclass TestCaseViewSet(viewsets.ModelViewSet):\n queryset = TestCase.objects.all()\n serializer_class = TestCaseSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n filter_fields = ('name',)\n search_fields = ['name']\n ordering_fields = ['name']\n pagination_class = StandardPageNumberPagination\n\nclass TestPlanViewSet(viewsets.ModelViewSet):\n queryset = TestPlan.objects.all()\n serializer_class = TestPlanUpdateSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filter_fields = ('name','project__name')\n search_fields = ['name','project__name']\n lookup_field = 'name'\n\n def list(self, request, *args, **kwargs):\n self.serializer_class = TestPlanSerializer\n return viewsets.ModelViewSet.list(self, request, *args, **kwargs)\n\nclass TestJobViewSet(viewsets.ModelViewSet):\n queryset = TestJob.objects.all()\n serializer_class = TestJobSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filter_fields = ('name',)\n search_fields = ['name']\n\nclass BuildDetailViewSet(viewsets.ModelViewSet):\n queryset = BuildDetail.objects.all()\n serializer_class = BuildDetailSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filter_fields = ('name',)\n search_fields = ['name']\n\nclass BuildPlanViewSet(viewsets.ModelViewSet):\n queryset = BuildPlan.objects.all()\n serializer_class = BuildPlanSerializer\n\n\n\ndef parseConfigs(config, is_cmd=False):\n if config == '':\n return ''\n items = config.split('\\r\\n')\n dict = {}\n for item in items:\n if is_cmd:\n dict['cmd'] = item.strip()\n else:\n pair = item.split('=')\n dict[pair[0].strip()] = pair[1].strip()\n return dict\n\ndef getTestPlanJson(testplan):\n testcase_set = testplan.testcase.all()\n testplan_name = testplan.name\n project = testplan.project.name\n branch = testplan.branch.name\n dlframework = testplan.dlframework.name\n runmode = testplan.runmode.name if testplan.runmode != None else ''\n testcase_configs = testplan.config.config.all() if testplan.config != None else 0\n config_list = []\n if testcase_configs:\n for config in testcase_configs:\n config_type = config.configtype.name\n is_cmd = False\n if config_type.lower() == \"cmdline\":\n is_cmd = True\n config_json = parseConfigs(config.content, is_cmd)\n config_list.append({config_type: config_json})\n blocks = []\n caselist = []\n\n for testcase in testcase_set:\n testcase_name = testcase.name\n testcase_block = testcase.testcmd.block.name\n blocks.append(testcase_block)\n caselist.append(\n {\n 'name': testcase_name,\n 'block': testcase_block,\n }\n )\n blocks = list(set(blocks))\n json = {\n 'name': testplan_name,\n 'project': project,\n 'branch': branch,\n 'dlframework': dlframework,\n 'runmode': runmode,\n 'blocks': blocks,\n 'topconfigs': config_list,\n 'testcases': caselist,\n }\n return json\n\ndef getBuildPlanJson(buildplan):\n name = buildplan.name\n version = buildplan.version\n project = buildplan.project.name\n branch = buildplan.branch.name\n ostype = buildplan.ostype.name\n gcc_version = buildplan.gcc_version.name if buildplan.gcc_version != None else ''\n archived = buildplan.archived\n buildconfigs = buildplan.buildconfig.all()\n buildinfolist = []\n for buildinfo in buildconfigs:\n options = parseConfigs(buildinfo.options)\n depends = BuildDependency.objects.filter(buildproject=buildinfo.buildproject)\n dependentprojectlist = []\n dir = ''\n if depends:\n dependentprojects = depends[0].dependprojects.all()\n if dependentprojects:\n for dependproject in dependentprojects:\n dependentprojectlist.append(dependproject.name)\n dir = depends[0].dir\n buildinfo_json = {\n 'project': buildinfo.buildproject.name,\n 'dir': dir,\n 'depends': dependentprojectlist,\n 'mode': buildinfo.buildmode.name,\n 'buildtype': buildinfo.buildtype,\n 'options': options\n }\n buildinfolist.append(buildinfo_json)\n\n json = {\n 'name': name,\n 'project': project,\n 'branch': branch,\n 'version': version,\n 'os_type': ostype,\n 'gcc_version': gcc_version,\n 'archived': archived,\n 'buildconfigs': buildinfolist\n }\n return json\n\ndef getEnvJson(env):\n if env == None:\n return {}\n else:\n image_name = env.name\n image_repo = env.repo\n image_tag = env.tag\n json = {\n 'image_name': image_name,\n 'image_repo': image_repo,\n 'image_tag': image_tag\n }\n return json\n\ndef getTestJobJson(testjob):\n resp = \"\"\n jobname = testjob.name\n project = testjob.project.name if testjob.project != None else ''\n testplans = testjob.testplan.all()\n resp_testplans = []\n buildplan = testjob.buildplan\n env = testjob.env\n env_json = getEnvJson(env)\n run_mode = testjob.runMode.name if testjob.runMode != None else ''\n\n if len(testplans) != 0:\n for testplan in testplans:\n resp_testplan = getTestPlanJson(testplan)\n resp_testplans.append(resp_testplan)\n resp_buildplan = getBuildPlanJson(buildplan) if buildplan != None else ''\n sub_jobs = testjob.testjob_set.all()\n sub_job_list = []\n if sub_jobs.count() == 0:\n resp = {\n 'name': jobname,\n 'project': project,\n 'env': env_json,\n 'run_mode': run_mode,\n 'buildplan': resp_buildplan,\n 'testplans': resp_testplans\n }\n else:\n for sub_job in sub_jobs:\n resp_testjob = getTestJobJson(sub_job)\n sub_job_list.append(resp_testjob)\n resp = {\n 'name': jobname,\n 'project': project,\n 'env': env_json,\n 'run_mode': run_mode,\n 'buildplan': resp_buildplan,\n 'testplans': resp_testplans,\n 'sub_jobs': sub_job_list\n }\n return resp\n\ndef GenTestPlan(request, plan_name):\n print(plan_name)\n try:\n testplan = TestPlan.objects.get(name=plan_name)\n data = getTestPlanJson(testplan)\n statuscode = \"200\"\n except TestPlan.DoesNotExist:\n data = \"\"\n statuscode = \"404\"\n resp = {\n 'statuscode': statuscode,\n 'data': data\n }\n return JsonResponse(resp)\n\ndef GenBuildPlan(request, plan_name):\n print(plan_name)\n try:\n testplan = BuildPlan.objects.get(name=plan_name)\n data = getBuildPlanJson(testplan)\n statuscode = \"200\"\n except BuildPlan.DoesNotExist:\n data = \"\"\n statuscode = \"404\"\n resp = {\n 'statuscode': statuscode,\n 'data': data\n }\n return JsonResponse(resp)\n\ndef GenTestJob(request, job_name):\n print(job_name)\n try:\n testjob = TestJob.objects.get(name=job_name)\n data = getTestJobJson(testjob)\n statuscode = \"200\"\n except TestJob.DoesNotExist:\n data = \"\"\n statuscode = \"404\"\n resp = {\n 'statuscode': statuscode,\n 'data': data\n }\n return JsonResponse(resp)\n\ndef GenWholeCaseList(request):\n try:\n testcases = TestCase.objects.filter(is_delete=False)\n blocks = []\n testcaselist = []\n for testcase in testcases:\n testcase_name = testcase.name\n testcase_block = testcase.testcmd.block.name\n testcase_cmd = testcase.testcmd.cmdline\n testcase_tolerance = testcase.tolerance\n testcase_timeout = testcase.timeout\n testcase_estimation = testcase.exectime\n testcase_configs = testcase.config.config.all()\n config_list = []\n if testcase_configs:\n for config in testcase_configs:\n config_type = config.configtype.name\n is_cmd = False\n if config_type.lower() == \"cmdline\":\n is_cmd = True\n config_json = parseConfigs(config.content, is_cmd)\n config_list.append({config_type : config_json})\n blocks.append(testcase_block)\n testcaselist.append(\n {\n 'name': testcase_name,\n 'block': testcase_block,\n 'cmdline': testcase_cmd,\n 'tolerance': testcase_tolerance,\n 'timeout': testcase_timeout,\n 'estimation': testcase_estimation,\n 'configs': config_list\n }\n )\n blocks = list(set(blocks))\n data = {\n 'blocks': blocks,\n 'caselist': testcaselist\n }\n statuscode = \"200\"\n except TestCase.DoesNotExist:\n data = \"\"\n statuscode = \"404\"\n resp = {\n 'statuscode': statuscode,\n 'data': data\n }\n return JsonResponse(resp)\n\n#for demo purpose\ndef GenDemoCaseList(request):\n try:\n testcases = TestCase.objects.filter(is_delete=False, name__contains='demo')\n blocks = []\n testcaselist = []\n for testcase in testcases:\n testcase_name = testcase.name\n testcase_block = testcase.testcmd.block.name\n testcase_cmd = testcase.testcmd.cmdline\n testcase_tolerance = testcase.tolerance\n testcase_timeout = testcase.timeout\n testcase_estimation = testcase.exectime\n testcase_configs = testcase.config.config.all()\n config_list = []\n if testcase_configs:\n for config in testcase_configs:\n config_type = config.configtype.name\n is_cmd = False\n if config_type.lower() == \"cmdline\":\n is_cmd = True\n config_json = parseConfigs(config.content, is_cmd)\n config_list.append({config_type : config_json})\n blocks.append(testcase_block)\n testcaselist.append(\n {\n 'name': testcase_name,\n 'block': testcase_block,\n 'cmdline': testcase_cmd,\n 'tolerance': testcase_tolerance,\n 'timeout': testcase_timeout,\n 'estimation': testcase_estimation,\n 'configs': config_list\n }\n )\n blocks = list(set(blocks))\n data = {\n 'blocks': blocks,\n 'caselist': testcaselist\n }\n statuscode = \"200\"\n except TestCase.DoesNotExist:\n data = \"\"\n statuscode = \"404\"\n resp = {\n 'statuscode': statuscode,\n 'data': data\n }\n return JsonResponse(resp)\n"
},
{
"alpha_fraction": 0.5672727227210999,
"alphanum_fraction": 0.6309090852737427,
"avg_line_length": 26.5,
"blob_id": "a56f5281fe5fdfd3d79c9c2cfc341522cf7d09c6",
"content_id": "d76045f553a61d1bff6531aa6872536fffa252a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 20,
"path": "/TestModel/migrations/0003_testjob_env.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-22 02:40\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('EnvModel', '0001_initial'),\n ('TestModel', '0002_auto_20190807_0753'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='testjob',\n name='env',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='EnvModel.DockerImage'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6164383292198181,
"alphanum_fraction": 0.7123287916183472,
"avg_line_length": 15.333333015441895,
"blob_id": "0527027a5584dbd574c2b05fec05431b156c53e6",
"content_id": "331438d2384062760ef70a4acd3bd5e0995a3399",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 9,
"path": "/uwsgi.ini",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "[uwsgi]\nsocket = :8080\nchdir = /atm\nmodule = testdj.wsgi\nmaster = true\nprocesses = 4\nvacuum = true\nlog-maxsize = 500000000\nlogto = /tmp/uwsgi.log"
},
{
"alpha_fraction": 0.7971014380455017,
"alphanum_fraction": 0.7971014380455017,
"avg_line_length": 31,
"blob_id": "19c46c19abb376d7f65da2b15a20ece3395f0f38",
"content_id": "947db5b13cb50989ea126c30770d6770fd59d452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1311,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 41,
"path": "/TestModel/views.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom rest_framework import generics\n\nfrom .models import Project, TestCmd, TestCase, TestPlan\nfrom .serializers import ProjectSerializer, TestStepSerializer, TestCaseSerializer, TestPlanSerializer\n\n# Create your views here.\n\nclass ProjectList(generics.ListAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectSerializer\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectSerializer\n\nclass TestCmdList(generics.ListAPIView):\n queryset = TestCmd.objects.all()\n serializer_class = TestStepSerializer\n\nclass TestCmdDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = TestCmd.objects.all()\n serializer_class = TestStepSerializer\n\n\nclass TestCaseList(generics.ListAPIView):\n queryset = TestCase.objects.all()\n serializer_class = TestCaseSerializer\n\nclass TestCaseDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = TestCase.objects.all()\n serializer_class = TestCaseSerializer\n\nclass TestPlanList(generics.ListAPIView):\n queryset = TestPlan.objects.all()\n serializer_class = TestPlanSerializer\n\nclass TestPlanDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = TestPlan.objects.all()\n serializer_class = TestPlanSerializer"
},
{
"alpha_fraction": 0.6347427368164062,
"alphanum_fraction": 0.6464595198631287,
"avg_line_length": 22.66265106201172,
"blob_id": "91e604ed920199a4ae293b191996ad5e76d6848e",
"content_id": "ec5b9d20c576ef7c7aa2fe07a4481343b80d6f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 83,
"path": "/testdj/view.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom ConfigModel.models import Project\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators import csrf\n\ndef hello(request):\n context = {}\n context['hello'] = 'Hello World!'\n return render(request, 'hello.html', context)\n\ndef search_post(request):\n ctx ={}\n if request.POST:\n ctx['rlt'] = request.POST['q']\n return render(request, \"post.html\", ctx)\n\ndef search_form(request):\n return render_to_response('search_form.html')\n\n\n# 接收请求数据\ndef search(request):\n request.encoding = 'utf-8'\n if 'q' in request.GET:\n message = '你搜索的内容为: ' + request.GET['q']\n else:\n message = '你提交了空表单'\n return HttpResponse(message)\n\ndef testdb(request):\n test1 = Project(name='runoob')\n test1.save()\n return HttpResponse(\"<p>数据库添加成功!</p>\")\n\ndef readdb(request):\n response = \"\"\n response1 = \"\"\n\n list = Project.objects.all()\n\n response2 = Project.objects.filter(id=1)\n response3 = Project.objects.get(id=1)\n\n list = Project.objects.order_by('name')\n list = list[0:2]\n\n #Test.objects.order_by('id')\n\n #Test.objects.filter(name=\"runoob\").order_by(\"id\")\n\n for var in list:\n response1 += str(var.id) + \" \"\n response = response1\n return HttpResponse(\"<p>\" + response + \"</p>\")\n\n\ndef updatedb(request):\n # 修改其中一个id=1的name字段,再save,相当于SQL中的UPDATE\n test1 = Project.objects.get(id=1)\n test1.name = 'Google'\n test1.save()\n\n # 另外一种方式\n # Test.objects.filter(id=1).update(name='Google')\n\n # 修改所有的列\n # Test.objects.all().update(name='Google')\n\n return HttpResponse(\"<p>修改成功</p>\")\n\ndef deletedb(request):\n # 修改其中一个id=1的name字段,再save,相当于SQL中的UPDATE\n test1 = Project.objects.get(id=3)\n test1.delete()\n\n # 另外一种方式\n # Test.objects.filter(id=1).update(name='Google')\n\n # 修改所有的列\n # Test.objects.all().update(name='Google')\n\n return HttpResponse(\"<p>删除成功</p>\")"
},
{
"alpha_fraction": 0.6630197167396545,
"alphanum_fraction": 0.6914660930633545,
"avg_line_length": 29.53333282470703,
"blob_id": "9e0a4c54224576463bb54eb8b951f7583f5bca00",
"content_id": "cf44b2a230e8a51bf1a9506fdd8282979c16b9cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 15,
"path": "/EnvModel/models.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\nclass DockerImage(models.Model):\n name = models.CharField(max_length=50, unique=True)\n repo = models.CharField(max_length=100, null=True, blank=True)\n tag = models.CharField(max_length=100, null=True, blank=True)\n dockerfile = models.TextField(max_length=10000, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"ATM_DockerImage\""
},
{
"alpha_fraction": 0.6628788113594055,
"alphanum_fraction": 0.6628788113594055,
"avg_line_length": 36.78571319580078,
"blob_id": "90864c12d53e85f290f759287b1d5ee58bb3abb2",
"content_id": "3b362847584542133b35985df2ff38822534885b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 14,
"path": "/TestModel/urls.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('project/', views.ProjectList.as_view()),\n path('project/<int:pk>/', views.ProjectDetail.as_view()),\n path('testcmd/', views.TestCmdList.as_view()),\n path('testcmd/<int:pk>/', views.TestCmdDetail.as_view()),\n path('testcase/', views.TestCaseList.as_view()),\n path('testcase/<int:pk>/', views.TestCaseDetail.as_view()),\n path('testplan/', views.TestPlanList.as_view()),\n path('testplan/<int:pk>/', views.TestPlanDetail.as_view()),\n]"
},
{
"alpha_fraction": 0.5423761010169983,
"alphanum_fraction": 0.5535376667976379,
"avg_line_length": 45.3684196472168,
"blob_id": "81c011e28ce6877ef260de7108d059c3f9af5c43",
"content_id": "d8884bd6871860b1e031862316bf8cec1b31b085",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5286,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 114,
"path": "/BuildModel/migrations/0001_initial.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-02 02:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('ConfigModel', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BuildDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ('buildtype', models.CharField(choices=[('source', 'source'), ('package', 'package')], default='source', max_length=20)),\n ('precondition', models.TextField(blank=True, max_length=1000, null=True)),\n ('options', models.TextField(blank=True, max_length=1000, null=True)),\n ],\n options={\n 'verbose_name': 'Build Option',\n 'verbose_name_plural': 'Build Option',\n 'db_table': 'ATM_BuildDetail',\n },\n ),\n migrations.CreateModel(\n name='BuildMode',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ],\n options={\n 'db_table': 'ATM_BuildMode',\n },\n ),\n migrations.CreateModel(\n name='BuildProject',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ],\n options={\n 'db_table': 'ATM_BuildProject',\n },\n ),\n migrations.CreateModel(\n name='RunMode',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'db_table': 'ATM_RunMode',\n },\n ),\n migrations.CreateModel(\n name='BuildPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ('version', models.CharField(default='latest', max_length=20)),\n ('gcc_version', models.CharField(blank=True, max_length=50, null=True)),\n ('archived', models.BooleanField(default=False)),\n ('branch', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Branch')),\n ('buildconfig', models.ManyToManyField(to='BuildModel.BuildDetail')),\n ('ostype', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.OSType')),\n ('project', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Project')),\n ],\n options={\n 'verbose_name': 'Build Plan',\n 'verbose_name_plural': 'Build Plan',\n 'db_table': 'ATM_BuildPlan',\n },\n ),\n migrations.AddField(\n model_name='builddetail',\n name='buildmode',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.BuildMode'),\n ),\n migrations.AddField(\n model_name='builddetail',\n name='buildproject',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='BuildModel.BuildProject'),\n ),\n migrations.AddField(\n model_name='builddetail',\n name='runmode',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.RunMode'),\n ),\n migrations.CreateModel(\n name='BuildDependency',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('dir', models.CharField(blank=True, max_length=50, null=True)),\n ('buildproject', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='tobuild', to='BuildModel.BuildProject', unique=True)),\n ('dependprojects', models.ManyToManyField(blank=True, related_name='depends', to='BuildModel.BuildProject')),\n ],\n options={\n 'verbose_name': 'Project Dependency',\n 'verbose_name_plural': 'Project Dependency',\n 'db_table': 'ATM_BuildDependency',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.6769605875015259,
"alphanum_fraction": 0.7192198038101196,
"avg_line_length": 86.89286041259766,
"blob_id": "21ad479fb7a8bcc7b027b11d0e72726aac8d4b11",
"content_id": "6421c7933a878c145ffb6d399a2ce2f45a35a05e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 9844,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 112,
"path": "/sqls/auth_permission.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `auth_permission` VALUES (1, 'Can add log entry', 1, 'add_logentry');\nINSERT INTO `auth_permission` VALUES (2, 'Can change log entry', 1, 'change_logentry');\nINSERT INTO `auth_permission` VALUES (3, 'Can delete log entry', 1, 'delete_logentry');\nINSERT INTO `auth_permission` VALUES (4, 'Can view log entry', 1, 'view_logentry');\nINSERT INTO `auth_permission` VALUES (5, 'Can add permission', 2, 'add_permission');\nINSERT INTO `auth_permission` VALUES (6, 'Can change permission', 2, 'change_permission');\nINSERT INTO `auth_permission` VALUES (7, 'Can delete permission', 2, 'delete_permission');\nINSERT INTO `auth_permission` VALUES (8, 'Can view permission', 2, 'view_permission');\nINSERT INTO `auth_permission` VALUES (9, 'Can add group', 3, 'add_group');\nINSERT INTO `auth_permission` VALUES (10, 'Can change group', 3, 'change_group');\nINSERT INTO `auth_permission` VALUES (11, 'Can delete group', 3, 'delete_group');\nINSERT INTO `auth_permission` VALUES (12, 'Can view group', 3, 'view_group');\nINSERT INTO `auth_permission` VALUES (13, 'Can add user', 4, 'add_user');\nINSERT INTO `auth_permission` VALUES (14, 'Can change user', 4, 'change_user');\nINSERT INTO `auth_permission` VALUES (15, 'Can delete user', 4, 'delete_user');\nINSERT INTO `auth_permission` VALUES (16, 'Can view user', 4, 'view_user');\nINSERT INTO `auth_permission` VALUES (17, 'Can add content type', 5, 'add_contenttype');\nINSERT INTO `auth_permission` VALUES (18, 'Can change content type', 5, 'change_contenttype');\nINSERT INTO `auth_permission` VALUES (19, 'Can delete content type', 5, 'delete_contenttype');\nINSERT INTO `auth_permission` VALUES (20, 'Can view content type', 5, 'view_contenttype');\nINSERT INTO `auth_permission` VALUES (21, 'Can add session', 6, 'add_session');\nINSERT INTO `auth_permission` VALUES (22, 'Can change session', 6, 'change_session');\nINSERT INTO `auth_permission` VALUES (23, 'Can delete session', 6, 'delete_session');\nINSERT INTO `auth_permission` VALUES (24, 'Can view session', 6, 'view_session');\nINSERT INTO `auth_permission` VALUES (25, 'Can add block', 7, 'add_block');\nINSERT INTO `auth_permission` VALUES (26, 'Can change block', 7, 'change_block');\nINSERT INTO `auth_permission` VALUES (27, 'Can delete block', 7, 'delete_block');\nINSERT INTO `auth_permission` VALUES (28, 'Can view block', 7, 'view_block');\nINSERT INTO `auth_permission` VALUES (29, 'Can add branch', 8, 'add_branch');\nINSERT INTO `auth_permission` VALUES (30, 'Can change branch', 8, 'change_branch');\nINSERT INTO `auth_permission` VALUES (31, 'Can delete branch', 8, 'delete_branch');\nINSERT INTO `auth_permission` VALUES (32, 'Can view branch', 8, 'view_branch');\nINSERT INTO `auth_permission` VALUES (33, 'Can add config', 9, 'add_config');\nINSERT INTO `auth_permission` VALUES (34, 'Can change config', 9, 'change_config');\nINSERT INTO `auth_permission` VALUES (35, 'Can delete config', 9, 'delete_config');\nINSERT INTO `auth_permission` VALUES (36, 'Can view config', 9, 'view_config');\nINSERT INTO `auth_permission` VALUES (37, 'Can add config detail', 10, 'add_configdetail');\nINSERT INTO `auth_permission` VALUES (38, 'Can change config detail', 10, 'change_configdetail');\nINSERT INTO `auth_permission` VALUES (39, 'Can delete config detail', 10, 'delete_configdetail');\nINSERT INTO `auth_permission` VALUES (40, 'Can view config detail', 10, 'view_configdetail');\nINSERT INTO `auth_permission` VALUES (41, 'Can add dl framework', 11, 'add_dlframework');\nINSERT INTO `auth_permission` VALUES (42, 'Can change dl framework', 11, 'change_dlframework');\nINSERT INTO `auth_permission` VALUES (43, 'Can delete dl framework', 11, 'delete_dlframework');\nINSERT INTO `auth_permission` VALUES (44, 'Can view dl framework', 11, 'view_dlframework');\nINSERT INTO `auth_permission` VALUES (45, 'Can add project', 12, 'add_project');\nINSERT INTO `auth_permission` VALUES (46, 'Can change project', 12, 'change_project');\nINSERT INTO `auth_permission` VALUES (47, 'Can delete project', 12, 'delete_project');\nINSERT INTO `auth_permission` VALUES (48, 'Can view project', 12, 'view_project');\nINSERT INTO `auth_permission` VALUES (49, 'Can add tag', 13, 'add_tag');\nINSERT INTO `auth_permission` VALUES (50, 'Can change tag', 13, 'change_tag');\nINSERT INTO `auth_permission` VALUES (51, 'Can delete tag', 13, 'delete_tag');\nINSERT INTO `auth_permission` VALUES (52, 'Can view tag', 13, 'view_tag');\nINSERT INTO `auth_permission` VALUES (53, 'Can add test case', 14, 'add_testcase');\nINSERT INTO `auth_permission` VALUES (54, 'Can change test case', 14, 'change_testcase');\nINSERT INTO `auth_permission` VALUES (55, 'Can delete test case', 14, 'delete_testcase');\nINSERT INTO `auth_permission` VALUES (56, 'Can view test case', 14, 'view_testcase');\nINSERT INTO `auth_permission` VALUES (57, 'Can add test type', 15, 'add_testtype');\nINSERT INTO `auth_permission` VALUES (58, 'Can change test type', 15, 'change_testtype');\nINSERT INTO `auth_permission` VALUES (59, 'Can delete test type', 15, 'delete_testtype');\nINSERT INTO `auth_permission` VALUES (60, 'Can view test type', 15, 'view_testtype');\nINSERT INTO `auth_permission` VALUES (61, 'Can add test plan', 16, 'add_testplan');\nINSERT INTO `auth_permission` VALUES (62, 'Can change test plan', 16, 'change_testplan');\nINSERT INTO `auth_permission` VALUES (63, 'Can delete test plan', 16, 'delete_testplan');\nINSERT INTO `auth_permission` VALUES (64, 'Can view test plan', 16, 'view_testplan');\nINSERT INTO `auth_permission` VALUES (65, 'Can add test job', 17, 'add_testjob');\nINSERT INTO `auth_permission` VALUES (66, 'Can change test job', 17, 'change_testjob');\nINSERT INTO `auth_permission` VALUES (67, 'Can delete test job', 17, 'delete_testjob');\nINSERT INTO `auth_permission` VALUES (68, 'Can view test job', 17, 'view_testjob');\nINSERT INTO `auth_permission` VALUES (69, 'Can add test cmd', 18, 'add_testcmd');\nINSERT INTO `auth_permission` VALUES (70, 'Can change test cmd', 18, 'change_testcmd');\nINSERT INTO `auth_permission` VALUES (71, 'Can delete test cmd', 18, 'delete_testcmd');\nINSERT INTO `auth_permission` VALUES (72, 'Can view test cmd', 18, 'view_testcmd');\nINSERT INTO `auth_permission` VALUES (73, 'Can add build combinations', 19, 'add_buildcombinations');\nINSERT INTO `auth_permission` VALUES (74, 'Can change build combinations', 19, 'change_buildcombinations');\nINSERT INTO `auth_permission` VALUES (75, 'Can delete build combinations', 19, 'delete_buildcombinations');\nINSERT INTO `auth_permission` VALUES (76, 'Can view build combinations', 19, 'view_buildcombinations');\nINSERT INTO `auth_permission` VALUES (77, 'Can add build dependency', 20, 'add_builddependency');\nINSERT INTO `auth_permission` VALUES (78, 'Can change build dependency', 20, 'change_builddependency');\nINSERT INTO `auth_permission` VALUES (79, 'Can delete build dependency', 20, 'delete_builddependency');\nINSERT INTO `auth_permission` VALUES (80, 'Can view build dependency', 20, 'view_builddependency');\nINSERT INTO `auth_permission` VALUES (81, 'Can add build detail', 21, 'add_builddetail');\nINSERT INTO `auth_permission` VALUES (82, 'Can change build detail', 21, 'change_builddetail');\nINSERT INTO `auth_permission` VALUES (83, 'Can delete build detail', 21, 'delete_builddetail');\nINSERT INTO `auth_permission` VALUES (84, 'Can view build detail', 21, 'view_builddetail');\nINSERT INTO `auth_permission` VALUES (85, 'Can add build mode', 22, 'add_buildmode');\nINSERT INTO `auth_permission` VALUES (86, 'Can change build mode', 22, 'change_buildmode');\nINSERT INTO `auth_permission` VALUES (87, 'Can delete build mode', 22, 'delete_buildmode');\nINSERT INTO `auth_permission` VALUES (88, 'Can view build mode', 22, 'view_buildmode');\nINSERT INTO `auth_permission` VALUES (89, 'Can add build plan', 23, 'add_buildplan');\nINSERT INTO `auth_permission` VALUES (90, 'Can change build plan', 23, 'change_buildplan');\nINSERT INTO `auth_permission` VALUES (91, 'Can delete build plan', 23, 'delete_buildplan');\nINSERT INTO `auth_permission` VALUES (92, 'Can view build plan', 23, 'view_buildplan');\nINSERT INTO `auth_permission` VALUES (93, 'Can add build project', 24, 'add_buildproject');\nINSERT INTO `auth_permission` VALUES (94, 'Can change build project', 24, 'change_buildproject');\nINSERT INTO `auth_permission` VALUES (95, 'Can delete build project', 24, 'delete_buildproject');\nINSERT INTO `auth_permission` VALUES (96, 'Can view build project', 24, 'view_buildproject');\nINSERT INTO `auth_permission` VALUES (97, 'Can add os type', 25, 'add_ostype');\nINSERT INTO `auth_permission` VALUES (98, 'Can change os type', 25, 'change_ostype');\nINSERT INTO `auth_permission` VALUES (99, 'Can delete os type', 25, 'delete_ostype');\nINSERT INTO `auth_permission` VALUES (100, 'Can view os type', 25, 'view_ostype');\nINSERT INTO `auth_permission` VALUES (101, 'Can add run mode', 26, 'add_runmode');\nINSERT INTO `auth_permission` VALUES (102, 'Can change run mode', 26, 'change_runmode');\nINSERT INTO `auth_permission` VALUES (103, 'Can delete run mode', 26, 'delete_runmode');\nINSERT INTO `auth_permission` VALUES (104, 'Can view run mode', 26, 'view_runmode');\nINSERT INTO `auth_permission` VALUES (105, 'Can add build info', 19, 'add_buildinfo');\nINSERT INTO `auth_permission` VALUES (106, 'Can change build info', 19, 'change_buildinfo');\nINSERT INTO `auth_permission` VALUES (107, 'Can delete build info', 19, 'delete_buildinfo');\nINSERT INTO `auth_permission` VALUES (108, 'Can view build info', 19, 'view_buildinfo');\nINSERT INTO `auth_permission` VALUES (109, 'Can add config type', 27, 'add_configtype');\nINSERT INTO `auth_permission` VALUES (110, 'Can change config type', 27, 'change_configtype');\nINSERT INTO `auth_permission` VALUES (111, 'Can delete config type', 27, 'delete_configtype');\nINSERT INTO `auth_permission` VALUES (112, 'Can view config type', 27, 'view_configtype');\n"
},
{
"alpha_fraction": 0.6603773832321167,
"alphanum_fraction": 0.6792452931404114,
"avg_line_length": 52,
"blob_id": "0e2918b2293826c7657543cd3f03e3028509d35f",
"content_id": "dbc867443fe1e24f0b758748496e7f6b5f4f484f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 1,
"path": "/sqls/ATM_OSType.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_OSType` VALUES (1, 'ubuntu', NULL);\n"
},
{
"alpha_fraction": 0.6937122941017151,
"alphanum_fraction": 0.6937122941017151,
"avg_line_length": 32.1485710144043,
"blob_id": "249c2261aef00688d16008ad70b2e755c8931625",
"content_id": "9b1e07bb27f50253d3966851e54d74b88da7290a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5805,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 175,
"path": "/api/serializers.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom TestModel import models as testmodels\nfrom BuildModel import models as buildmodels\nfrom ConfigModel import models as configmodels\nfrom EnvModel import models as envmodels\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = configmodels.Project\n\nclass ProjectRelatedField(serializers.RelatedField):\n def to_representation(self, value):\n return value.name\n\nclass BranchSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = configmodels.Branch\n\nclass DLFrameworkSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = configmodels.DLFramework\n\nclass BlockSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = configmodels.Block\n\nclass OSTypeSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = configmodels.OSType\n\nclass ConfigTypeSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = configmodels.ConfigType\n\nclass ConfigDetailSerializer(serializers.ModelSerializer):\n configtype = serializers.ReadOnlyField(source='configtype.name')\n\n class Meta:\n fields = '__all__'\n model = configmodels.ConfigDetail\n\nclass ConfigPlanSerializer(serializers.ModelSerializer):\n config = ConfigDetailSerializer(many=True)\n\n class Meta:\n fields = '__all__'\n model = configmodels.ConfigPlan\n\nclass TestTypeSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = testmodels.TestType\n\nclass TagSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = testmodels.Tag\n\nclass DockerImageSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = envmodels.DockerImage\n\nclass TestCmdSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n # project = serializers.PrimaryKeyRelatedField(many=False, queryset=testmodels.Project.objects.all())\n # testplan = serializers.PrimaryKeyRelatedField(many=True, queryset=testmodels.TestCase.testplan_set)\n block = serializers.ReadOnlyField(source='block.name')\n project = serializers.ReadOnlyField(source='project.name')\n testtype = serializers.ReadOnlyField(source='testtype.name')\n tag = TagSerializer(many=True)\n class Meta:\n fields = '__all__'\n model = testmodels.TestCmd\n\nclass TestCaseSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n # testcmd = serializers.PrimaryKeyRelatedField(many=False, queryset=testmodels.TestCmd.objects.all())\n # testplan = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n testcmd = serializers.ReadOnlyField(source='testcmd.cmdline')\n block = serializers.ReadOnlyField(source='testcmd.block.name')\n config = ConfigPlanSerializer()\n\n class Meta:\n # fields = ('id','name', 'testcmd','testplan',)\n fields = '__all__'\n model = testmodels.TestCase\n\nclass TestPlanSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n # project = ProjectSerializer()\n project = serializers.ReadOnlyField(source='project.name')\n branch = serializers.ReadOnlyField(source='branch.name')\n dlframework = serializers.ReadOnlyField(source='dlframework.name')\n testcase = TestCaseSerializer(many=True)\n config = ConfigPlanSerializer()\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = testmodels.TestPlan\n\nclass TestPlanUpdateSerializer(serializers.ModelSerializer):\n # project = serializers.CharField(source='project.name', read_only=True)\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = testmodels.TestPlan\n\nclass BuildProjectSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = buildmodels.BuildProject\n\nclass BuildModeSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = buildmodels.BuildMode\n\nclass RunModeSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = buildmodels.RunMode\n\nclass BuildDetailSerializer(serializers.ModelSerializer):\n buildproject = serializers.ReadOnlyField(source='buildproject.name')\n buildmode = serializers.ReadOnlyField(source='buildmode.name')\n runmode = serializers.ReadOnlyField(source='runmode.name')\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = buildmodels.BuildDetail\n\nclass BuildPlanSerializer(serializers.ModelSerializer):\n project = serializers.ReadOnlyField(source='project.name')\n branch = serializers.ReadOnlyField(source='branch.name')\n ostype = serializers.ReadOnlyField(source='ostype.name')\n buildconfig = BuildDetailSerializer(many=True)\n\n class Meta:\n fields = '__all__'\n # fields = ('id', 'name', 'project', 'testcase',)\n model = buildmodels.BuildPlan\n\nclass TestJobSerializer(serializers.ModelSerializer):\n project = serializers.ReadOnlyField(source='project.name')\n buildplan = BuildPlanSerializer()\n testplan = TestPlanSerializer(many=True)\n class Meta:\n fields = '__all__'\n model = testmodels.TestJob\n\n\n\n\n"
},
{
"alpha_fraction": 0.6435643434524536,
"alphanum_fraction": 0.6633663177490234,
"avg_line_length": 49.5,
"blob_id": "fb7b2c210ebbe91235cb6bd467539c9b1f95e17a",
"content_id": "27b062c4c0f115961dca5a877cd7430d105983da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 2,
"path": "/sqls/ATM_Block.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_Block` VALUES (1, 'matmul', NULL);\nINSERT INTO `ATM_Block` VALUES (2, 'vop', NULL);\n"
},
{
"alpha_fraction": 0.5403422713279724,
"alphanum_fraction": 0.5990220308303833,
"avg_line_length": 21.72222137451172,
"blob_id": "c2d87c82db35e29b9e29614063fdb64a127233db",
"content_id": "d58142957222f6b8dcf852235bb1b55a4ce6b2a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 18,
"path": "/EnvModel/migrations/0002_dockerimage_dockerfile.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-22 03:44\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('EnvModel', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='dockerimage',\n name='dockerfile',\n field=models.TextField(blank=True, max_length=10000, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6754176616668701,
"alphanum_fraction": 0.7136037945747375,
"avg_line_length": 58.85714340209961,
"blob_id": "d73f309196fb7af77c5d06f8f724c0ade5d60bce",
"content_id": "f8e9c3b0f8d6d591a84a9d9e11c21b06472cbfac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 7,
"path": "/sqls/ATM_BuildDependency.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_BuildDependency` VALUES (3, 'cmodel', 1);\nINSERT INTO `ATM_BuildDependency` VALUES (4, 'umd/core', 2);\nINSERT INTO `ATM_BuildDependency` VALUES (5, 'infer', 3);\nINSERT INTO `ATM_BuildDependency` VALUES (6, 'ratelrt', 4);\nINSERT INTO `ATM_BuildDependency` VALUES (7, 'ratelnn', 6);\nINSERT INTO `ATM_BuildDependency` VALUES (8, 'AGraph', 11);\nINSERT INTO `ATM_BuildDependency` VALUES (9, 'Kaleido', 10);\n"
},
{
"alpha_fraction": 0.5609756112098694,
"alphanum_fraction": 0.5873170495033264,
"avg_line_length": 31.03125,
"blob_id": "05dfc1bdf1969136710a8381a38e1d6284154200",
"content_id": "6a2db4571785264a8e01a2a3f879c1308547ea01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1025,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 32,
"path": "/ConfigModel/migrations/0002_auto_20190807_0753.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-07 07:53\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ConfigModel', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='configdetail',\n options={'verbose_name': 'Config Option', 'verbose_name_plural': 'Config Option'},\n ),\n migrations.AlterField(\n model_name='configdetail',\n name='desc',\n field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='configplan',\n name='desc',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='configtype',\n name='desc',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5716845989227295,
"alphanum_fraction": 0.6344085931777954,
"avg_line_length": 26.899999618530273,
"blob_id": "fced9dd33141f683bb69d10d075817fe0c72a4af",
"content_id": "105525bb35ff448a1370b608cb8861178cf633af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 558,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 20,
"path": "/TestModel/migrations/0004_testjob_runmode.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-22 03:44\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('BuildModel', '0002_auto_20190807_0753'),\n ('TestModel', '0003_testjob_env'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='testjob',\n name='runMode',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.RunMode'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7342056035995483,
"alphanum_fraction": 0.7342056035995483,
"avg_line_length": 47.654544830322266,
"blob_id": "851a47b0157d9f95b9a5aafe3a3e64031e5f36b5",
"content_id": "c819131dbee4bb2b40b9fe6c09820d937f3efad3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2675,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 55,
"path": "/api/urls.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom django.conf.urls import include\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework import routers\nfrom . import views\nfrom rest_framework.documentation import include_docs_urls\n\nrouter = DefaultRouter()\nrouter.register('project', views.ProjectViewSet, base_name='projects')\nrouter.register('branch', views.BranchViewSet, base_name='branches')\nrouter.register('dlframework', views.DLFrameworkViewSet, base_name='dlframeworks')\nrouter.register('block', views.BlockViewSet, base_name='blocks')\nrouter.register('configdetail', views.ConfigDetailViewSet, base_name='configdetails')\nrouter.register('configplan', views.ConfigPlanViewSet, base_name='configplans')\nrouter.register('testtype', views.TestTypeViewSet, base_name='testtypes')\nrouter.register('tag', views.TagViewSet, base_name='tags')\nrouter.register('testcmd', views.TestCmdViewSet, base_name='testcmds')\nrouter.register('testcase', views.TestCaseViewSet, base_name='testcases')\nrouter.register('testplan', views.TestPlanViewSet, base_name='testplans')\nrouter.register('testjob', views.TestJobViewSet, base_name='testjobs')\nrouter.register('builddetail', views.BuildDetailViewSet, base_name='builddetails')\nrouter.register('buildplan', views.BuildPlanViewSet, base_name='buildplans')\n\nurlpatterns = [\n path('', include(router.urls)),\n path('docs/', include_docs_urls(title=\"Alinpu Test Management\", description=\"API design for Alinpu Test Management\", authentication_classes=[], permission_classes=[])),\n path('open/testplan/<str:plan_name>/', views.GenTestPlan),\n path('open/testjob/<str:job_name>/', views.GenTestJob),\n path('open/buildplan/<str:plan_name>/', views.GenBuildPlan),\n path('open/wholecaselist/', views.GenWholeCaseList),\n path('open/democaselist/', views.GenDemoCaseList)\n]\n# urlpatterns.append(\n# path('open/testplan/<str:plan_name>/', views.GenTestPlan)\n# )\n# urlpatterns.append(\n# path('open/testjob/<str:job_name>/', views.GenTestJob)\n# )\n# urlpatterns.append(\n# path('open/buildplan/<str:plan_name>/', views.GenBuildPlan)\n# )\n# urlpatterns.append(\n# path('open/wholecaselist/', views.GenWholeCaseList)\n# )\n#\n# urlpatterns = [\n# path('project/', views.ProjectList.as_view()),\n# path('project/<int:pk>/', views.ProjectDetail.as_view()),\n# path('teststep/', views.TestStepList.as_view()),\n# path('teststep/<int:pk>/', views.TestStepDetail.as_view()),\n# path('testcase/', views.TestCaseList.as_view()),\n# path('testcase/<int:pk>/', views.TestCaseDetail.as_view()),\n# path('testplan/', views.TestPlanList.as_view()),\n# path('testplan/<int:pk>/', views.TestPlanDetail.as_view()),\n# ]"
},
{
"alpha_fraction": 0.6752136945724487,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 57.5,
"blob_id": "62bcc3386854b0256f2a2322c443ace47ed5adef",
"content_id": "7c5c3f8f7fdca6a48397c8e174f20d9988df8358",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 2,
"path": "/sqls/ATM_TestType.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_TestType` VALUES (1, 'unit test', NULL);\nINSERT INTO `ATM_TestType` VALUES (2, 'model test', NULL);\n"
},
{
"alpha_fraction": 0.6481481194496155,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 53,
"blob_id": "a65a8a032e0422365b9ac0bf96edd9e03f7a5332",
"content_id": "2e471d281b13406299a24f6d447f25fe82e53ae8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 7,
"path": "/sqls/ATM_TestPlan_testcase.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_TestPlan_testcase` VALUES (1, 1, 1);\nINSERT INTO `ATM_TestPlan_testcase` VALUES (3, 1, 2);\nINSERT INTO `ATM_TestPlan_testcase` VALUES (2, 2, 1);\nINSERT INTO `ATM_TestPlan_testcase` VALUES (4, 3, 1);\nINSERT INTO `ATM_TestPlan_testcase` VALUES (5, 3, 2);\nINSERT INTO `ATM_TestPlan_testcase` VALUES (6, 3, 3);\nINSERT INTO `ATM_TestPlan_testcase` VALUES (7, 4, 1);\n"
},
{
"alpha_fraction": 0.695652186870575,
"alphanum_fraction": 0.7125604152679443,
"avg_line_length": 58.14285659790039,
"blob_id": "d12fafc83a73883b524c2d6ed878ee8566a75579",
"content_id": "a6ca6aa5c7430f4740096065b8d3b038a541dc96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 7,
"path": "/sqls/ATM_ConfigType.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_ConfigType` VALUES (1, 'cmodel', NULL);\nINSERT INTO `ATM_ConfigType` VALUES (2, 'compilation', NULL);\nINSERT INTO `ATM_ConfigType` VALUES (3, 'driver', NULL);\nINSERT INTO `ATM_ConfigType` VALUES (4, 'inference', NULL);\nINSERT INTO `ATM_ConfigType` VALUES (5, 'performance', NULL);\nINSERT INTO `ATM_ConfigType` VALUES (6, 'kaleido', NULL);\nINSERT INTO `ATM_ConfigType` VALUES (7, 'ratelnn', NULL);\n"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 52,
"blob_id": "ddb121501f580cc19a22d1c3b1ef5979775220d7",
"content_id": "27610a5837772b9e294cfae5e17284402afaa12f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 6,
"path": "/sqls/ATM_TestJob_testplan.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_TestJob_testplan` VALUES (1, 1, 1);\nINSERT INTO `ATM_TestJob_testplan` VALUES (2, 2, 2);\nINSERT INTO `ATM_TestJob_testplan` VALUES (3, 3, 3);\nINSERT INTO `ATM_TestJob_testplan` VALUES (4, 4, 4);\nINSERT INTO `ATM_TestJob_testplan` VALUES (5, 5, 1);\nINSERT INTO `ATM_TestJob_testplan` VALUES (6, 5, 3);\n"
},
{
"alpha_fraction": 0.6984505653381348,
"alphanum_fraction": 0.6984505653381348,
"avg_line_length": 29,
"blob_id": "2157a55a517ade7539a6f64507b5afe0aa03f72b",
"content_id": "7accbfefcd2e580cddd137601407ce40c2700ea1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 839,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/ConfigModel/admin.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom ConfigModel.models import *\nfrom TestModel.admin import ExportCsvMixin\n# Register your models here.\n\nclass ConfigDetail_Admin(admin.ModelAdmin):\n search_fields = ('name', 'content')\n list_filter = ('configtype',)\n list_display = ('name', 'desc', 'configtype', 'content')\n\n save_as = True\n\nclass ConfigPlan_Admin(admin.ModelAdmin, ExportCsvMixin):\n actions = ['export_as_csv']\n search_fields = ('name',)\n\n def configs(self, obj):\n return [a.name for a in obj.config.all()]\n\n list_display = ('name', 'desc', 'configs', 'is_delete')\n filter_horizontal = ('config',)\n\n save_as = True\n\nadmin.site.register(ConfigDetail, ConfigDetail_Admin)\nadmin.site.register(ConfigPlan, ConfigPlan_Admin)\n\nadmin.site.register([Project, Branch, DLFramework, OSType, Block, ConfigType])"
},
{
"alpha_fraction": 0.6041666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 47,
"blob_id": "7b6e5144bec4e79e9c5c3672f5ebb7cce484273f",
"content_id": "c1499c6479f4abf5a468c50a4424b36e17dbbedd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 2,
"path": "/sqls/ATM_TestCmd_tag.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_TestCmd_tag` VALUES (1, 1, 2);\nINSERT INTO `ATM_TestCmd_tag` VALUES (2, 2, 2);\n"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 48,
"blob_id": "c4ee7389ac8c5193912f88a5964144055c7da8cc",
"content_id": "0fa6af6a222c10329fd6b0a9b3ab59aeef71db33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 2,
"path": "/sqls/auth_user_groups.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `auth_user_groups` VALUES (1, 2, 1);\nINSERT INTO `auth_user_groups` VALUES (2, 2, 2);\n"
},
{
"alpha_fraction": 0.5430463552474976,
"alphanum_fraction": 0.5844370722770691,
"avg_line_length": 25.2608699798584,
"blob_id": "e4fc8866cb89f695a0116ca116f5fe3e41d4da87",
"content_id": "30bb7772030e4cff1a8537fd333f208203f3378f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 23,
"path": "/TestModel/migrations/0005_auto_20190823_0924.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-23 09:24\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('TestModel', '0004_testjob_runmode'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='testcase',\n name='desc',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='testcase',\n name='name',\n field=models.CharField(max_length=100, unique=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6605504751205444,
"alphanum_fraction": 0.6788991093635559,
"avg_line_length": 53.5,
"blob_id": "58af8f3b255d1b7570d5722103ad799562bfed54",
"content_id": "a2acf5a34436fe3bb5fed4ab36f95f6867b55d7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 2,
"path": "/sqls/ATM_Branch.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_Branch` VALUES (1, 'master', NULL);\nINSERT INTO `ATM_Branch` VALUES (2, 'graph_opt', NULL);\n"
},
{
"alpha_fraction": 0.697175145149231,
"alphanum_fraction": 0.697175145149231,
"avg_line_length": 31.200000762939453,
"blob_id": "ef8feef87a9dc1697f491728d06d0255aba852f0",
"content_id": "f129b5ce0364aded5df4ba134bfcb576b993361e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1770,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 55,
"path": "/BuildModel/admin.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom BuildModel.models import *\nfrom TestModel.admin import ExportCsvMixin\n\n# Register your models here.\n\nclass BuildDependency_Admin(admin.ModelAdmin):\n def dependprojectlist(self, obj):\n return [a.name for a in obj.dependprojects.all()]\n list_display = ('buildproject', 'dir', 'dependprojectlist')\n filter_horizontal = ('dependprojects',)\n\n save_as = True\n\nclass BuildDetail_Admin(admin.ModelAdmin):\n search_fields = ('name', 'options')\n list_filter = ('buildproject',)\n list_display = ('name', 'desc', 'buildproject', 'buildmode', 'buildtype', 'runmode', 'options', 'precondition')\n\n save_as = True\n\nclass BuildInfo_Admin(admin.ModelAdmin):\n def buildproject(self, obj):\n return obj.buildconfig.buildproject\n\n def buildmode(self, obj):\n return obj.buildconfig.buildmode\n\n def buildtype(self, obj):\n return obj.buildconfig.buildtype\n\n def runmode(self, obj):\n return obj.buildconfig.runmode\n\n def buildoptions(self, obj):\n return obj.buildconfig.options\n\n list_display = ('name', 'desc', 'buildproject', 'buildmode', 'buildtype', 'runmode', 'buildoptions')\n\nclass BuildPlan_Admin(admin.ModelAdmin, ExportCsvMixin):\n actions = ['export_as_csv']\n def buildconfigs(self, obj):\n return [a.name for a in obj.buildconfig.all()]\n\n list_filter = ('project', 'branch',)\n list_display = ('name', 'desc', 'project', 'branch', 'version', 'archived', 'buildconfigs')\n filter_horizontal = ('buildconfig',)\n\n save_as = True\n\nadmin.site.register(BuildPlan, BuildPlan_Admin)\n\nadmin.site.register(BuildDependency, BuildDependency_Admin)\nadmin.site.register(BuildDetail, BuildDetail_Admin)\nadmin.site.register([RunMode, BuildMode, BuildProject])"
},
{
"alpha_fraction": 0.6884236335754395,
"alphanum_fraction": 0.6884236335754395,
"avg_line_length": 26.100000381469727,
"blob_id": "6603a753b5d25546f3b73b260454398e52391aef",
"content_id": "7b4b370fb8ee39b03b8b2881464251f5771ac6c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 812,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 30,
"path": "/migrations/4-updatecase.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\nimport collections\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestPlan\n from TestModel.models import TestCase\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import ConfigDetail\n from ConfigModel.models import Project\n from ConfigModel.models import DLFramework\n from ConfigModel.models import Branch\n from TestModel.models import TestType\n\n testCases = TestCase.objects.all()\n for testCase in testCases:\n if '[' in testCase.name:\n testCase.name = testCase.name.replace('[', '_')\n testCase.name = testCase.name.replace(']', '')\n testCase.save()\n\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.5015040636062622,
"alphanum_fraction": 0.5131070017814636,
"avg_line_length": 39.120689392089844,
"blob_id": "d3d74200ea84ff2179dcefce0ef4fe88ee8d8add",
"content_id": "f3add769062b67b7afc8a4ed5f472a7f177cf9b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4654,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 116,
"path": "/ConfigModel/migrations/0001_initial.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-02 02:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Block',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'db_table': 'ATM_Block',\n },\n ),\n migrations.CreateModel(\n name='Branch',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'db_table': 'ATM_Branch',\n },\n ),\n migrations.CreateModel(\n name='ConfigDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ('content', models.TextField(blank=True, max_length=1000, null=True)),\n ],\n options={\n 'verbose_name': 'Configuration',\n 'verbose_name_plural': 'Configuration',\n 'db_table': 'ATM_ConfigDetail',\n },\n ),\n migrations.CreateModel(\n name='ConfigType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ],\n options={\n 'db_table': 'ATM_ConfigType',\n },\n ),\n migrations.CreateModel(\n name='DLFramework',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'db_table': 'ATM_DLFramework',\n },\n ),\n migrations.CreateModel(\n name='OSType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'db_table': 'ATM_OSType',\n },\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ('is_delete', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'ATM_Project',\n },\n ),\n migrations.CreateModel(\n name='ConfigPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ('is_delete', models.BooleanField(default=False)),\n ('config', models.ManyToManyField(blank=True, to='ConfigModel.ConfigDetail')),\n ],\n options={\n 'verbose_name': 'Config Plan',\n 'verbose_name_plural': 'Config Plan',\n 'db_table': 'ATM_ConfigPlan',\n },\n ),\n migrations.AddField(\n model_name='configdetail',\n name='configtype',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.ConfigType'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6347107291221619,
"alphanum_fraction": 0.7179063558578491,
"avg_line_length": 55.71875,
"blob_id": "af266dcb678f02151eef9df6f303c99af3345f88",
"content_id": "439d04665831ef03b1f8e551bace7b9bbdcc2c5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1815,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 32,
"path": "/sqls/auth_group_permissions.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `auth_group_permissions` VALUES (1, 1, 77);\nINSERT INTO `auth_group_permissions` VALUES (2, 1, 78);\nINSERT INTO `auth_group_permissions` VALUES (3, 1, 79);\nINSERT INTO `auth_group_permissions` VALUES (4, 1, 80);\nINSERT INTO `auth_group_permissions` VALUES (5, 1, 81);\nINSERT INTO `auth_group_permissions` VALUES (6, 1, 82);\nINSERT INTO `auth_group_permissions` VALUES (7, 1, 83);\nINSERT INTO `auth_group_permissions` VALUES (8, 1, 84);\nINSERT INTO `auth_group_permissions` VALUES (9, 1, 89);\nINSERT INTO `auth_group_permissions` VALUES (10, 1, 90);\nINSERT INTO `auth_group_permissions` VALUES (11, 1, 91);\nINSERT INTO `auth_group_permissions` VALUES (12, 1, 92);\nINSERT INTO `auth_group_permissions` VALUES (13, 2, 37);\nINSERT INTO `auth_group_permissions` VALUES (14, 2, 38);\nINSERT INTO `auth_group_permissions` VALUES (15, 2, 39);\nINSERT INTO `auth_group_permissions` VALUES (16, 2, 40);\nINSERT INTO `auth_group_permissions` VALUES (17, 2, 53);\nINSERT INTO `auth_group_permissions` VALUES (18, 2, 54);\nINSERT INTO `auth_group_permissions` VALUES (19, 2, 55);\nINSERT INTO `auth_group_permissions` VALUES (20, 2, 56);\nINSERT INTO `auth_group_permissions` VALUES (21, 2, 61);\nINSERT INTO `auth_group_permissions` VALUES (22, 2, 62);\nINSERT INTO `auth_group_permissions` VALUES (23, 2, 63);\nINSERT INTO `auth_group_permissions` VALUES (24, 2, 64);\nINSERT INTO `auth_group_permissions` VALUES (25, 2, 65);\nINSERT INTO `auth_group_permissions` VALUES (26, 2, 66);\nINSERT INTO `auth_group_permissions` VALUES (27, 2, 67);\nINSERT INTO `auth_group_permissions` VALUES (28, 2, 68);\nINSERT INTO `auth_group_permissions` VALUES (29, 2, 69);\nINSERT INTO `auth_group_permissions` VALUES (30, 2, 70);\nINSERT INTO `auth_group_permissions` VALUES (31, 2, 71);\nINSERT INTO `auth_group_permissions` VALUES (32, 2, 72);\n"
},
{
"alpha_fraction": 0.5869565010070801,
"alphanum_fraction": 0.6063829660415649,
"avg_line_length": 38.30908966064453,
"blob_id": "8b51f97b545f987a007e3d03c52cd561e489bd6c",
"content_id": "feec2a899e62504414955c038ea279664cb0cf09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2162,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 55,
"path": "/TestModel/migrations/0002_auto_20190807_0753.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-07 07:53\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ConfigModel', '0002_auto_20190807_0753'),\n ('TestModel', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='testplan',\n name='config',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.ConfigPlan'),\n ),\n migrations.AlterField(\n model_name='testcase',\n name='desc',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='testcase',\n name='exectime',\n field=models.FloatField(default=0, verbose_name='Execution Time'),\n ),\n migrations.AlterField(\n model_name='testcase',\n name='testcmd',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='TestModel.TestCmd', verbose_name='Commandline'),\n ),\n migrations.AlterField(\n model_name='testjob',\n name='parent',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='TestModel.TestJob', verbose_name='Parent Job'),\n ),\n migrations.AlterField(\n model_name='testplan',\n name='desc',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='testplan',\n name='dlframework',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.DLFramework', verbose_name='DL Framework'),\n ),\n migrations.AlterField(\n model_name='testplan',\n name='runmode',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.RunMode', verbose_name='Run Mode'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5517755746841431,
"alphanum_fraction": 0.5813190340995789,
"avg_line_length": 33.20408248901367,
"blob_id": "d43005d3d6e0ef11e06de87ba159450fe071783f",
"content_id": "cb535e2218a9563d28ff1244f894344adfacb7e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3351,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 98,
"path": "/migrations/2-testcase.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef handleParameters(parameters):\n if '15' in parameters:\n return '15'\n elif parameters == '0,1':\n return '0-1'\n elif '0+101,1+101' in parameters:\n return '0-1'\n else:\n return '0'\n\ndef main():\n from TestModel.models import TestCmd, TestCase\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import Project\n from TestModel.models import TestType\n\n case_config_mapping = {}\n mappings = csv.reader(open('case_config_mapping.csv', 'r', encoding=\"utf-8-sig\"))\n for mapping in mappings:\n source = mapping[0]\n coremask = mapping[1]\n lmcopy = mapping[2]\n compression = mapping[3]\n parameters = mapping[4]\n configplan = mapping[5]\n key = \"{0}_{1}_{2}_{3}_{4}\".format(source, coremask.replace(' ', '-'), lmcopy, compression, parameters.replace(' ', '-'))\n case_config_mapping[key] = configplan.replace(' ', '-')\n\n CaseList = []\n namelist = []\n data = csv.reader(open('TF_CASE_GEN_TABLE.csv', 'r'))\n\n for line in data:\n cmdline_name = line[4]\n case_name = line[1]\n source = line[19]\n coremask = line[20]\n tolerance = line[21]\n timeout = line[22]\n lmcopy = line[25]\n compression = line[26]\n comment = line[27]\n parameters = line[28]\n\n if tolerance == 'N/A':\n tolerance = 0\n elif tolerance == 'inf':\n tolerance = 65536\n elif tolerance == '1.98E+37' or tolerance == '9.63E+27':\n tolerance = 65536\n elif tolerance == '2.0f':\n tolerance = 2\n else:\n tolerance = int(tolerance)\n branch = line[18]\n if branch == 'master':\n continue\n\n if coremask == 'N/A' or coremask == '1':\n coremask = 'N/A-1'\n\n parameters = handleParameters(parameters)\n\n key = \"{0}_{1}_{2}_{3}_{4}\".format(source, coremask, lmcopy, compression, parameters)\n configplans = case_config_mapping[key].split('-')\n\n for configplan in configplans:\n print(\"name: {0} config_plan: {1} testcmd: {2}\".format(case_name.ljust(70), configplan.ljust(30), cmdline_name))\n testCmd = TestCmd.objects.get(name=cmdline_name)\n\n configPlan = ConfigPlan.objects.get(name=configplan)\n priority = testCmd.priority\n new_case_name = case_name\n if len(configplans) > 1:\n new_case_name = \"{0}[{1}]\".format(case_name, configplan)\n\n testCase = TestCase.objects.filter(name=new_case_name)\n if len(testCase) == 0:\n testCase = TestCase(name=new_case_name, desc=comment, testcmd=testCmd, config=configPlan, priority=priority, tolerance=tolerance, timeout=int(timeout), exectime=5)\n testCase.save()\n #\n # print(\"name: {0} cmdline: {1} block: {2} branch: {3} priority: {4}\".format(name, cmdline, block.name, branch, priority))\n # CmdList.append(TestCmd(name=name, cmdline=cmdline, block=block, project=project, testtype=testtype, priority=priority, comment=comment))\n\n\n # TestCmd.objects.bulk_create(CmdList)\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.6594001054763794,
"alphanum_fraction": 0.6612030863761902,
"avg_line_length": 31.983783721923828,
"blob_id": "acdb78f9a205b80e6cc3be5f3e7cc1109c065e33",
"content_id": "381a0aca3e8daf207ae79dc08d0b1bd747cbab93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6101,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 185,
"path": "/TestModel/admin.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin, sites\nfrom django.contrib.admin import SimpleListFilter\nfrom TestModel.models import *\nfrom django.contrib.admin.helpers import ActionForm\nfrom django import forms\nfrom django.http import HttpResponse\nimport csv\nimport copy\n\n# Register your models here.\n\nclass ExportCsvMixin(object):\n def export_as_csv(self, request, queryset):\n meta = self.model._meta\n field_names = [field.name for field in meta.fields]\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = f'attachment; filename={meta}.csv'\n response.charset = 'utf-8-sig'\n writer = csv.writer(response)\n writer.writerow(field_names)\n for obj in queryset:\n row = writer.writerow([getattr(obj, field) for field in field_names])\n\n return response\n\n export_as_csv.short_description = 'Export Selected as CSV'\n\nclass TagInline(admin.TabularInline):\n model = Tag\n\nclass TestCaseInline(admin.TabularInline):\n model = TestCase\n\nclass TestCmd_Admin(admin.ModelAdmin, ExportCsvMixin):\n actions = ['export_as_csv']\n search_fields = ('name', 'cmdline')\n inlines = [TestCaseInline, ]\n def tags(self, obj):\n return [a.name for a in obj.tag.all()]\n list_filter = ('block', 'project', 'priority', 'testtype')\n\n list_display = ('name', 'cmdline', 'block', 'project', 'priority', 'testtype', 'tags', 'comment')\n filter_horizontal = ('tag',)\n\n save_as = True\n\n# class TestCaseInline(admin.TabularInline):\n# model = TestPlan_testcase\n\nclass TestPlanInline(admin.TabularInline):\n model = TestPlan.testcase.through\n fields = ['testplan']\n\n def testplan(self, instance):\n return instance.testplan.name\n\n testplan.short_description = 'testplan name'\n\n\ndef copy_testplan(modeladmin, request, queryset):\n for testplan in queryset:\n testplan_copy = copy.copy(testplan) # (2) django copy object\n testplan_copy.id = None # (3) set 'id' to None to create new object\n testplan_copy.name = testplan.name + \"(cloned)\"\n testplan_copy.save() # initial save\n\n # (4) copy M2M relationship: testcase\n for testcase in testplan.testcase.all():\n testplan_copy.testcase.add(testcase)\n\n testplan_copy.save() # (7) save the copy to the database for M2M relations\n\ncopy_testplan.short_description = \"Clone Selected TestPlan\"\n\nclass TestPlan_Admin(admin.ModelAdmin, ExportCsvMixin):\n # def cases(self, obj):\n # return [a.name for a in obj.testcase.all()]\n\n # list_display = ('name', 'desc', 'project', 'branch', 'dlframework', 'runmode', 'cases', 'config')\n actions = [copy_testplan, 'export_as_csv']\n search_fields = ('name',)\n list_filter = ('project', 'branch', 'dlframework', 'runmode')\n list_display = ('name', 'desc', 'project', 'branch', 'dlframework', 'runmode', 'config')\n filter_horizontal = ('testcase',)\n save_as = True\n\nclass BlockFilter(SimpleListFilter):\n title = 'Block'\n parameter_name = 'block'\n\n\nclass AddTestPlanForm(ActionForm):\n testplans = forms.CharField(required=False)\n\ndef add_testplan(modeladmin, request, queryset):\n testplans_name = request.POST['testplans']\n testplans_name = testplans_name.replace(\";\", \",\").replace(\" \", \",\")\n testplans_name_list = testplans_name.split(\",\")\n for testplan_name in testplans_name_list:\n testplans = TestPlan.objects.filter(name=testplan_name)\n if len(testplans) > 0:\n for testcase in queryset:\n testcase.testplan_set.add(testplans[0])\n testcase.save()\n\nadd_testplan.short_description = \"Add Into TestPlans\"\n\ndef remove_testplan(modeladmin, request, queryset):\n testplans_name = request.POST['testplans']\n testplans_name = testplans_name.replace(\";\", \",\").replace(\" \", \",\")\n testplans_name_list = testplans_name.split(\",\")\n for testplan_name in testplans_name_list:\n testplans = TestPlan.objects.filter(name=testplan_name)\n if len(testplans) > 0:\n for testcase in queryset:\n testcase.testplan_set.remove(testplans[0])\n testcase.save()\n\nremove_testplan.short_description = \"Remove From TestPlans\"\n\n\nclass TestCase_Admin(admin.ModelAdmin, ExportCsvMixin):\n action_form = AddTestPlanForm\n actions = [add_testplan, remove_testplan, 'export_as_csv']\n\n inlines = [TestPlanInline, ]\n def plans(self, obj):\n return [a.name for a in obj.testplan_set.all()]\n\n def configs(self, obj):\n ret = []\n if obj.config != None:\n for a in obj.config.config.all():\n ret.append(a)\n return ret\n\n def project(self, obj):\n return obj.testcmd.project\n\n def block(self, obj):\n return obj.testcmd.block\n\n def commandline(self, obj):\n return obj.testcmd.cmdline\n\n search_fields = ('name',)\n list_filter = ('testcmd__project', 'testcmd__block', 'priority')\n list_display = ('name', 'project', 'block', 'plans', 'commandline', 'priority', 'configs', 'desc', 'updated')\n\n save_as = True\n\nclass TestJob_Inline(admin.TabularInline):\n model = TestJob\n\nclass TestJob_Admin(admin.ModelAdmin, ExportCsvMixin):\n # inlines = [TestJob_Inline, ]\n # display be foreigned objects\n actions = ['export_as_csv']\n def sub_testjob(self, obj):\n return [a.name for a in obj.testjob_set.all()]\n\n def testplans(self, obj):\n return [a.name for a in obj.testplan.all()]\n\n search_fields = ('name',)\n list_filter = ('project', 'env', 'runMode')\n list_display = ('name', 'project', 'env', 'runMode', 'buildplan', 'testplans', 'sub_testjob')\n filter_horizontal = ('testplan',)\n\n save_as = True\n\nadmin.site.site_header = 'Alinpu Test Management'\nadmin.site.site_title = 'Login Alinpu Test Management Admin Site'\nadmin.site.index_title = 'Alinpu Test Management'\n\nadmin.site.register(TestPlan, TestPlan_Admin)\n\nadmin.site.register(TestCmd, TestCmd_Admin)\n\nadmin.site.register(TestCase, TestCase_Admin)\n\nadmin.site.register(TestJob, TestJob_Admin)\n\nadmin.site.register([TestType, Tag])"
},
{
"alpha_fraction": 0.7443609237670898,
"alphanum_fraction": 0.7443609237670898,
"avg_line_length": 21.16666603088379,
"blob_id": "ec2034eed86df706e35af641ec3e59a9a378df3f",
"content_id": "52157c9c080c38df4adbbcc33c88a962e8328c4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/ConfigModel/apps.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass ConfigmodelConfig(AppConfig):\n name = 'ConfigModel'\n verbose_name = \"Config Manager\"\n"
},
{
"alpha_fraction": 0.5476499199867249,
"alphanum_fraction": 0.5576985478401184,
"avg_line_length": 49.573768615722656,
"blob_id": "2c4294b8eb395bb1e467208194bd8333dcb52357",
"content_id": "2e94912f25314cc143b7ff1c12da86b5e105349f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6170,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 122,
"path": "/TestModel/migrations/0001_initial.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-02 02:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('BuildModel', '0001_initial'),\n ('ConfigModel', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ],\n options={\n 'db_table': 'ATM_Tag',\n },\n ),\n migrations.CreateModel(\n name='TestCase',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ('precondition', models.TextField(blank=True, max_length=1000, null=True)),\n ('priority', models.SmallIntegerField(default=2)),\n ('tolerance', models.IntegerField(default=0)),\n ('timeout', models.IntegerField(default=0)),\n ('exectime', models.FloatField(default=0)),\n ('created', models.DateField(auto_now_add=True)),\n ('updated', models.DateField(auto_now=True)),\n ('is_delete', models.BooleanField(default=False)),\n ('config', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.ConfigPlan')),\n ],\n options={\n 'verbose_name': 'Test Case',\n 'verbose_name_plural': 'Test Case',\n 'db_table': 'ATM_TestCase',\n },\n ),\n migrations.CreateModel(\n name='TestType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, unique=True)),\n ('desc', models.CharField(blank=True, max_length=50, null=True)),\n ],\n options={\n 'db_table': 'ATM_TestType',\n },\n ),\n migrations.CreateModel(\n name='TestPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('desc', models.CharField(blank=True, max_length=100, null=True)),\n ('is_delete', models.BooleanField(default=False)),\n ('branch', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Branch')),\n ('dlframework', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.DLFramework')),\n ('project', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Project')),\n ('runmode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.RunMode')),\n ('testcase', models.ManyToManyField(blank=True, to='TestModel.TestCase')),\n ],\n options={\n 'verbose_name': 'Test Plan',\n 'verbose_name_plural': 'Test Plan',\n 'db_table': 'ATM_TestPlan',\n },\n ),\n migrations.CreateModel(\n name='TestJob',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('created', models.DateField(auto_now_add=True)),\n ('buildplan', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='BuildModel.BuildPlan')),\n ('parent', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='TestModel.TestJob')),\n ('project', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Project')),\n ('testplan', models.ManyToManyField(blank=True, to='TestModel.TestPlan')),\n ],\n options={\n 'verbose_name': 'Test Job',\n 'verbose_name_plural': 'Test Job',\n 'db_table': 'ATM_TestJob',\n },\n ),\n migrations.CreateModel(\n name='TestCmd',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ('cmdline', models.TextField(max_length=1000)),\n ('priority', models.SmallIntegerField(default=2)),\n ('comment', models.CharField(blank=True, max_length=100, null=True)),\n ('is_delete', models.BooleanField(default=False)),\n ('block', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Block')),\n ('project', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ConfigModel.Project')),\n ('tag', models.ManyToManyField(to='TestModel.Tag')),\n ('testtype', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='TestModel.TestType')),\n ],\n options={\n 'verbose_name': 'Command Lines',\n 'verbose_name_plural': 'Command Lines',\n 'db_table': 'ATM_TestCmd',\n },\n ),\n migrations.AddField(\n model_name='testcase',\n name='testcmd',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='TestModel.TestCmd'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5743080377578735,
"alphanum_fraction": 0.5870938897132874,
"avg_line_length": 48.61940383911133,
"blob_id": "46177caf013334d635191db00b242cede2f6942e",
"content_id": "74cd1de01cf9a2995ee093113363ea0a246b4a6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6648,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 134,
"path": "/migrations/5-rm_unsupported_case_from_testplan.py",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "import os\nimport django\nimport csv\nimport collections\nfrom django.db.models import Q\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testdj.settings\")\ndjango.setup()\n\ndef main():\n from TestModel.models import TestPlan\n from TestModel.models import TestCase\n from ConfigModel.models import ConfigPlan\n from ConfigModel.models import ConfigDetail\n from ConfigModel.models import Project\n from ConfigModel.models import DLFramework\n from ConfigModel.models import Branch\n from TestModel.models import TestType\n\n TestPlanTuple = collections.namedtuple('testplan_name', ['project', 'dl_framework', 'branch', 'top_config', 'case_filter'])\n testplan_list = [\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='tf_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='mxnet', branch='graph_opt', top_config='mxnet_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_fixdata',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_comp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_nocomp',\n case_filter='daily_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_fixdata',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_comp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='legacy_random_nocomp',\n case_filter='checkin_test'),\n TestPlanTuple(project='v1', dl_framework='tensorflow', branch='graph_opt', top_config='dump_mode_tf',\n case_filter='daily_test'),\n ]\n\n for testplan in testplan_list:\n project = testplan.project\n dl_framwork = testplan.dl_framework\n branch = testplan.branch\n top_config = testplan.top_config\n case_filter = testplan.case_filter\n testplan_name = \"{0}_{1}_{2}_{3}\".format(project, case_filter, top_config, branch)\n desc = \"Project: {0}; Test Purpose: {1}; Framework: {2}; Branch: {3}; Config: {4}\".format(project, case_filter, dl_framwork, branch, top_config)\n projectObject = Project.objects.get(name=project)\n dlframeworkObject = DLFramework.objects.get(name=dl_framwork)\n branchObject = Branch.objects.get(name=branch)\n configObject = ConfigPlan.objects.get(name=top_config)\n\n testPlans = TestPlan.objects.filter(name=testplan_name)\n if (len(testPlans) == 0):\n testPlan = TestPlan(name=testplan_name, desc=desc, project=projectObject, branch=branchObject, dlframework=dlframeworkObject,\n config=configObject)\n testPlan.save()\n else:\n testPlan = testPlans[0]\n\n if dl_framwork == \"tensorflow\":\n csv_file = 'TF_CASE_GEN_TABLE.csv'\n else:\n csv_file = 'MXNET_CASE_GEN_TABLE.csv'\n\n data = csv.reader(open(csv_file, 'r'))\n rm_count = 0\n for line in data:\n cmdline_name = line[4]\n case_name = line[1]\n unsupported = line[16]\n case_failure = line[17]\n source = line[19]\n coremask = line[20]\n timeout = line[22]\n lmcopy = line[25]\n compression = line[26]\n comment = line[27]\n parameters = line[28]\n\n # if tolerance == 'N/A':\n # tolerance = 0\n # elif tolerance == 'inf':\n # tolerance = 65536\n # elif tolerance == '1.98E+37' or tolerance == '9.63E+27':\n # tolerance = 65536\n # elif tolerance == '2.0f':\n # tolerance = 2\n # else:\n # tolerance = int(tolerance)\n branch = line[18]\n if branch == 'master':\n continue\n\n if coremask == 'N/A' or coremask == '1':\n coremask = 'N/A-1'\n\n # remove unsupported test cases from test plan\n if unsupported == \"1\" or case_failure == \"1\" or parameters == \"101\":\n # print(case_name)\n testCases = TestCase.objects.filter(Q(name=case_name) | Q(name=case_name + \"_image_default\") | Q(name=case_name + \"_image_vops\"))\n for testCase in testCases:\n # print(testCase.name)\n rm_count = rm_count + 1\n testPlan.testcase.remove(testCase)\n print(rm_count)\n testPlan.save()\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')"
},
{
"alpha_fraction": 0.6822429895401001,
"alphanum_fraction": 0.7009345889091492,
"avg_line_length": 52.5,
"blob_id": "e3efc1ec1407b53f868ae91eb8a5ef106a5a774c",
"content_id": "3aaa7c62c5f149c4cddc03689fa788a64e1c4372",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 2,
"path": "/sqls/auth_group.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `auth_group` VALUES (1, 'Build Manager');\nINSERT INTO `auth_group` VALUES (2, 'Test Manager');\n"
},
{
"alpha_fraction": 0.6724137663841248,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 57,
"blob_id": "4bbd1de6821b50f3c52929e2493a8c9474e39990",
"content_id": "b911a46bccf03546a18ccd3734e6b5ed23c57700",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 7,
"path": "/sqls/ATM_BuildPlan_buildconfig.sql",
"repo_name": "kevinteng525/atm",
"src_encoding": "UTF-8",
"text": "INSERT INTO `ATM_BuildPlan_buildconfig` VALUES (1, 1, 1);\nINSERT INTO `ATM_BuildPlan_buildconfig` VALUES (2, 1, 2);\nINSERT INTO `ATM_BuildPlan_buildconfig` VALUES (3, 1, 3);\nINSERT INTO `ATM_BuildPlan_buildconfig` VALUES (4, 1, 4);\nINSERT INTO `ATM_BuildPlan_buildconfig` VALUES (5, 1, 5);\nINSERT INTO `ATM_BuildPlan_buildconfig` VALUES (6, 1, 6);\nINSERT INTO `ATM_BuildPlan_buildconfig` VALUES (7, 1, 7);\n"
}
] | 68 |
kouroshHakha/bag_testbenches_kh | https://github.com/kouroshHakha/bag_testbenches_kh | 34c94c9d860e8ef02e10786bddcd50db97825b1d | 31baee40861e149fb05ba9e044b7275dacd406eb | 6f16c24c58be4743365a252ea319240a8c669c4b | refs/heads/master | 2020-04-15T08:27:57.342169 | 2019-10-03T00:46:19 | 2019-10-03T00:46:19 | 164,521,565 | 0 | 3 | null | null | null | null | null | [
{
"alpha_fraction": 0.7066532373428345,
"alphanum_fraction": 0.7066532373428345,
"avg_line_length": 32.099998474121094,
"blob_id": "38eb18351b155dedd74c7934397dc78893fd4f66",
"content_id": "7178b6ed81bd8025ec7bfa4edd01c3234fb5e17e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 30,
"path": "/verification_kh/CTLEMeasurementUnit.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "from typing import TYPE_CHECKING, List, Tuple, Dict, Any, Sequence, Optional\n\nfrom bag.simulation.core import MeasurementManager, TestbenchManager\nimport numpy as np\nfrom scipy import interpolate\nimport IPython\nimport pdb\nimport matplotlib.pyplot as plt\nimport itertools\nimport os\nfrom bag.io.sim_data import save_sim_results\n\nif TYPE_CHECKING:\n from verification_ec.ac.core import ACTB\n\nfrom verification_kh.GenericACMM import GenericACMM\n\nclass CTLEMeasurementManager(GenericACMM):\n\n def run_ac_forward_post_process(self, data, tb_manager):\n output_dict = tb_manager.get_dc_gain_max_gain_first_pole(data, ['outdiff'])\n results = dict(\n dc_gain=output_dict['dc_gain_outdiff'],\n max_gain_ratio=output_dict['max_gain_outdiff']/output_dict['dc_gain_outdiff'],\n first_pole=output_dict['first_pole_outdiff'],\n ibias=np.abs(data['ibias']),\n corners=data['corner'],\n )\n\n self.overall_results.update(**results)"
},
{
"alpha_fraction": 0.5096238255500793,
"alphanum_fraction": 0.5154563784599304,
"avg_line_length": 34.53886032104492,
"blob_id": "62c53eae119bee353ec2b5c036e314f2c1fc04ee",
"content_id": "5ae1fe455e99bfd900a07acd1cfa3f09a2ead473",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6858,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 193,
"path": "/verification_kh/ComparatorTB.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "from bag.simulation.core import TestbenchManager\nimport scipy.interpolate as interp\nimport scipy.optimize as sciopt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.special import erf\nimport os\n\nfrom typing import TYPE_CHECKING, Dict, List, Union, Any\nif TYPE_CHECKING:\n from bag.core import Testbench\n\n\nclass NoiseWithCDFFitting(TestbenchManager):\n def setup_testbench(self, tb):\n # type: (Testbench) -> None\n sim_vars = self.specs['sim_vars']\n sim_outputs = self.specs.get('sim_outputs', None)\n\n for key, val in sim_vars.items():\n if isinstance(val, int) or isinstance(val, float):\n tb.set_parameter(key, val)\n else:\n tb.set_sweep_parameter(key,\n start=float(val[0]),\n stop=float(val[1]),\n step=float(val[2]))\n\n if sim_outputs:\n for key, val in sim_outputs.items():\n tb.add_output(key, val)\n\n def get_noise_offset(self, data, Tper, tdelay, fig_loc=None):\n # type : (Dict[str, Any], float, float) -> Dict[str, List[float]]\n\n sigma_list, offset_list = list(), list()\n axis_names = ['corner', 'vin', 'time']\n sweep_vars = data['sweep_params']['VO']\n swp_corner = ('corner' in sweep_vars)\n time = data['time']\n vin = data['vin']\n vo_corner = data['VO'].copy()\n\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n vo_corner = vo_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n vo_corner = np.transpose(vo_corner, axes=order)\n for corner, vo_arr in zip(corner_list, vo_corner):\n prob_one_list = list()\n for vo in vo_arr:\n fval = interp.interp1d(time, vo, kind='cubic')\n tsample = tdelay + Tper/2\n zero_counter = 0\n one_counter = 0\n while tsample < time[-1]:\n vsample = fval(tsample)\n\n if vsample > 1e-6:\n one_counter += 1\n elif vsample < 1e-6:\n zero_counter += 1\n else:\n raise ValueError(\"encountered a zero during sampling\")\n\n tsample += Tper\n\n prob_one = one_counter / (one_counter+zero_counter)\n prob_one_list.append(prob_one)\n\n prob_one_array = np.array(prob_one_list)\n\n def cdf(x, sigma, mu):\n return 0.5 * (erf((x - mu) / (2 ** 0.5 * sigma)) + 1)\n\n (sigma, mu), fit_cov = sciopt.curve_fit(cdf, vin, prob_one_array)\n\n x_val = np.linspace(vin[0], vin[-1], 50)\n fitted_val = cdf(x_val, sigma, mu)\n # print(\"sigma_{}={}\".format(corner, sigma))\n # print(\"offset_{}={}\".format(corner, mu))\n\n if fig_loc:\n plt.plot(x_val, fitted_val, 'r--')\n plt.plot(vin, prob_one_array, \"*-\")\n plt.savefig(os.path.join(fig_loc, \"Noise_CDF_{}.png\".format(corner)), dpi=200)\n plt.close()\n sigma_list.append(float(sigma))\n offset_list.append(float(mu))\n\n noise_offset_params = dict(\n corner=corner_list,\n sigma=sigma_list,\n offset=offset_list,\n )\n\n return noise_offset_params\n\n\nclass OverDriveTB(TestbenchManager):\n def setup_testbench(self, tb):\n # not done properly, safer to make them equal\n sim_vars = self.specs.get('sim_vars', None)\n if sim_vars is not None:\n sim_vars['td'] = sim_vars['Tper']/4\n sim_outputs = self.specs.get('sim_outputs', None)\n\n for key, value in sim_vars.items():\n tb.set_parameter(key, value)\n if sim_outputs is not None:\n for key, val in sim_outputs.items():\n tb.add_output(key, val)\n\n def add_plot(self, data, yaxis_key=None, xaxis_key='time'):\n if yaxis_key is None:\n raise ValueError('yaxis_key should be specified')\n if yaxis_key not in data:\n raise ValueError('yaxis_key = {} not found in data keywords'.format(yaxis_key))\n plt.plot(data[xaxis_key], data[yaxis_key])\n\n def save_plot(self, fname):\n plt.grid()\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n\n def get_overdrive_params(self, data, Tper, tsetup, c_wait, fig_loc=None):\n v_charge_list, v_reset_list, v_out_list, ibias_list = list(), list(), list(), list()\n axis_names = ['corner', 'time']\n sweep_vars = data['sweep_params']['outdiff']\n swp_corner = ('corner' in sweep_vars)\n time = data['time']\n vout_corner = data['outdiff'].copy()\n\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n vout_corner = vout_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n vout_corner = np.transpose(vout_corner, axes=order)\n\n for corner, vout in zip(corner_list, vout_corner):\n\n fvout = interp.interp1d(time, vout, kind='cubic')\n t_charge = c_wait * Tper - tsetup\n t_reset = (c_wait+0.5) * Tper - tsetup\n t_out = (c_wait + 1) * Tper - tsetup\n\n v_charge = fvout(t_charge)\n v_reset = fvout(t_reset)\n v_out = fvout(t_out)\n\n index = corner_list.index(corner)\n\n if swp_corner:\n ibias = np.abs(data['ibias'][index])\n else:\n ibias = np.abs(data['ibias'])\n\n if fig_loc:\n if swp_corner:\n plt.plot(time, data['inclk'][index])\n plt.plot(time, data['outdiff'][index])\n plt.plot(time, data['indiff'][index])\n else:\n plt.plot(time, data['inclk'])\n plt.plot(time, data['outdiff'])\n plt.plot(time, data['indiff'])\n plt.savefig(os.path.join(fig_loc, 'overdrive_{}.png'.format(corner)), dpi=200)\n plt.close()\n\n v_charge_list.append(float(v_charge))\n v_reset_list.append(float(v_reset))\n v_out_list.append(float(v_out))\n ibias_list.append(float(ibias))\n\n output = dict(\n v_charge=v_charge_list,\n v_reset=v_reset_list,\n v_out=v_out_list,\n ibias=ibias_list,\n corner=corner_list,\n )\n\n return output"
},
{
"alpha_fraction": 0.5825870633125305,
"alphanum_fraction": 0.5872305035591125,
"avg_line_length": 35.319278717041016,
"blob_id": "483e0e2ad4f4ec27184193790ab14abf93f9dcd2",
"content_id": "f4667d228cee363555b8293e277ed762ec82a16f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6030,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 166,
"path": "/verification_kh/GenericACMM.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "\n\"\"\"\nThis module defines an generic Measurment manager for any amplifier architecture, with voltage input and\nvoltage output. For more complicated mangement of testbench results one should use subclasses of this module\nin which they override the basic functionalities.\n\"\"\"\n\nfrom bag.simulation.core import MeasurementManager, TestbenchManager\nimport numpy as np\nimport IPython\nimport pdb\nimport matplotlib.pyplot as plt\nimport itertools\nimport os\nfrom bag.io.sim_data import save_sim_results\n\nclass GenericACMM(MeasurementManager):\n\n def __init__(self, *args, **kwargs):\n MeasurementManager.__init__(self, *args, **kwargs)\n self.overall_results = {}\n\n def get_initial_state(self):\n # type: () -> str\n \"\"\"Returns the initial FSM state.\"\"\"\n return 'ac_forward'\n\n def process_output(self, state, data, tb_manager):\n # type: (str, Dict[str, Any], TestbenchManager) -> Tuple[bool, str, Dict[str, Any]]\n \"\"\"Process simulation output data.\n\n Parameters\n ----------\n state : str\n the current FSM state\n data : Dict[str, Any]\n simulation data dictionary.\n tb_manager : GenericACTB\n the testbench manager object.\n\n Returns\n -------\n done : bool\n True if this measurement is finished.\n next_state : str\n the next FSM state.\n output : Dict[str, Any]\n a dictionary containing post-processed data.\n \"\"\"\n if state == 'ac_forward':\n self.add_plot(state, data, 'outdiff', title='ac_forward')\n self.run_ac_forward_post_process(data, tb_manager)\n\n elif state == 'ac_common_mode':\n self.add_plot(state, data, 'outcm', title=state)\n self.run_common_mode_post_process(data, tb_manager)\n\n elif state == 'ac_power_supply':\n self.add_plot(state, data, 'outcm', title=state)\n self.run_power_supply_post_process(data, tb_manager)\n\n next_state = self.get_next_state(state)\n done = self.is_done(next_state)\n\n return done, next_state, self.overall_results\n\n def is_done(self, state):\n return state == ''\n\n def get_next_state(self, state):\n if state == 'ac_forward':\n next_state = 'ac_common_mode' if 'ac_common_mode' in self.specs['testbenches'] else ''\n elif state == 'ac_common_mode':\n next_state = 'ac_power_supply' if 'ac_power_supply' in self.specs['testbenches'] else ''\n elif state == 'ac_power_supply':\n next_state = ''\n else:\n raise ValueError('Unknown state: %s' % state)\n return next_state\n\n def run_ac_forward_post_process(self, data, tb_manager):\n\n output_dict = tb_manager.get_gain_and_w3db(data, ['outdiff'])\n results = dict(\n dc_gain=output_dict['gain_outdiff'],\n f3db=1/2/np.pi*output_dict['w3db_outdiff'],\n ibias=np.abs(data['ibias']),\n corners=data['corner'],\n )\n self.overall_results.update(**results)\n\n return results\n\n def run_common_mode_post_process(self, data, tb_manager):\n\n output_dict = tb_manager.get_gain_and_w3db(data, ['outcm'])\n # preprocess cm gain before computing cmrr, clip it to some epsilon if it's too small\n output_dict['gain_outcm'] = np.clip(output_dict['gain_outcm'], a_min=1.0e-10, a_max=None)\n # compute cmrr\n dc_gain = self.overall_results['dc_gain']\n cmrr_db = 20 * np.log10(dc_gain / output_dict['gain_outcm'])\n results = dict(\n gain_cm=output_dict['gain_outcm'],\n cmrr_db=cmrr_db,\n corners=data['corner'],\n )\n self.overall_results.update(results)\n\n def run_power_supply_post_process(self, data, tb_manager):\n\n output_dict = tb_manager.get_gain_and_w3db(data, ['outcm'])\n # preprocess power supply gain before computing psrr, clip it to some epsilon if it's too small\n output_dict['gain_outcm'] = np.clip(output_dict['gain_outcm'], a_min=1.0e-10, a_max=None)\n # compute psrr\n dc_gain = self.overall_results['dc_gain']\n psrr_db = 20 * np.log10(dc_gain / output_dict['gain_outcm'])\n results = dict(\n gain_ps=output_dict['gain_outcm'],\n psrr_db=psrr_db,\n corners=data['corner'],\n )\n self.overall_results.update(**results)\n\n def add_plot(self, state, data, y_axis, x_axis='freq', ax=None, title=None, log_axis= 'x', save=True, show=False):\n \"\"\"\n this function should plot the data and maybe save it if needed. It depends on the MeasurementManager subclass.\n For more comlpex ploting function it should be overwritten and use state variable for conditioning\n :param state:\n :param data:\n :param y_axis:\n :param x_axis:\n :param ax:\n :param title:\n :param save:\n :param show:\n :param log_axis: 'x'|'y'|'both'|'none'\n :return:\n \"\"\"\n\n functions_dict = {'x': plt.semilogx, 'y': plt.semilogx, 'both': plt.loglog, 'none': plt.plot}\n #TODO: Unfinished\n\n if ax is None:\n fig = plt.figure()\n ax = plt.gca()\n\n if title is None:\n title = y_axis\n\n sweep_kwrds = data['sweep_params'][y_axis]\n sweep_kwrds = [kwrd for kwrd in sweep_kwrds if kwrd != x_axis]\n combos = itertools.product(*(data[swp_kwrd] for swp_kwrd in sweep_kwrds))\n\n # for values in zip(combos):\n plt.grid()\n functions_dict[log_axis](data[x_axis], np.abs(data[y_axis][0]), label='ff')\n functions_dict[log_axis](data[x_axis], np.abs(data[y_axis][1]), label='tt')\n plt.ylabel(y_axis)\n plt.xlabel(x_axis)\n if save:\n fname = os.path.join(self.data_dir, title + \".png\")\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n if show:\n plt.show()\n"
},
{
"alpha_fraction": 0.5803366303443909,
"alphanum_fraction": 0.5937260985374451,
"avg_line_length": 39.53488540649414,
"blob_id": "75b8c8770e3b2f74b2bc80d8d2c9a413e8d89ba2",
"content_id": "a991758299954e3865278bcd6f40396d76a7eedb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5228,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 129,
"path": "/verification_kh/GenericACTB.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines an extension of AC testbench class found in bag_testbenches_ec.\"\"\"\n\nimport numpy as np\nfrom scipy import interpolate\nimport IPython\nimport pdb\nimport matplotlib.pyplot as plt\nimport itertools\nimport os\nfrom bag.io.sim_data import save_sim_results\n\nfrom verification_ec.ac.core import ACTB\n\n\n\nclass GenericACTB(ACTB):\n @classmethod\n def get_dc_gain_max_gain_first_pole(cls, data, output_list):\n \"\"\"\n Returns a dictionary of dc gain, maximum gain, and first pole\n This is makes sense if we have a CTLE where there is a LHP zero at low frequencies\n But if the frequency behaviour is monotonically decreasing it should be equivalent\n to get_gain_and_w3db(...)\n\n Parameters\n ----------\n data : Dict[str, Any]\n the simulation data dictionary.\n output_list : Sequence[str]\n list of output names to compute gain/bandwidth for.\n\n Returns\n -------\n output_dict : Dict[str, Any]\n A BAG data dictionary containing the gain/bandwidth information.\n \"\"\"\n output_dict = {}\n swp_info = data['sweep_params']\n f_vec = data['freq']\n for out_name in output_list:\n out_arr = data[out_name]\n swp_params = swp_info[out_name]\n freq_idx = swp_params.index('freq')\n new_swp_params = [par for par in swp_params if par != 'freq']\n dc_gain, max_gain, first_pole = cls._compute_dc_gain_max_gain_first_pole(f_vec,np.abs(out_arr), freq_idx)\n\n\n cls.record_array(output_dict, data, dc_gain, 'dc_gain_' + out_name, new_swp_params)\n cls.record_array(output_dict, data, max_gain, 'max_gain_' + out_name, new_swp_params)\n cls.record_array(output_dict, data, first_pole, 'first_pole_' + out_name, new_swp_params)\n\n return output_dict\n\n @classmethod\n def _compute_dc_gain_max_gain_first_pole(cls, f_vec, out_arr, freq_idx):\n \"\"\"\n General Idea, we have 3 cases:\n 1. normal CTLE operation with a bump\n 2. no zero at low frequency, just like an amplifier\n 3. wierd behavior of going down and then comming back up\n We find the intersections with c and 0.99*dc_gain\n in case 1, 1.01*dc_gain is gonna happen before 0.99*dc_gain\n in case 2, 1.01*dc_gain has no crossings and 0.99*dc_gain has one\n in case 3, 0.99*dc_gain is gonna happen before 1.01*dc_gain\n In case 2,3 first pole is going to be w3db, but in case 3 it is \n computed assuming 2nd pole onwards are far.\n \n Simpler solution is adopted for now\n \n Parmeters\n ---------\n f_vec : np.ndarray\n the frequency vector. Must be sorted.\n out_arr : np.ndarray\n the block's output transfer function. Could be multidimensional.\n freq_idx : int\n frequency axis index.\n\n Returns\n -------\n dc_gain : np.ndarray\n the DC gain array.\n max_gain : np.ndarray\n the maximum gain array.\n first_pole: np.ndarray\n the first pole array, it could be w3db if max_gain is dc_gain.\n If that's not the case the first pole is drived from the intersection\n of the theoretical line with transfer function if the remaining poles are\n assumed to be far\n \"\"\"\n\n first_pole_list = []\n # move frequency axis to last axis\n out_arr = np.moveaxis(out_arr, freq_idx, -1)\n gain_arr = out_arr[..., 0]\n max_gain_arr = np.max(out_arr, axis=-1)\n\n output_shape = gain_arr.shape\n _, w3db_arr = cls._compute_gain_and_w3db(f_vec, out_arr, freq_idx)\n\n # gain_flat = gain_arr.flatten()\n # max_gain_flat = max_gain_arr.flatten()\n w3db_flat = w3db_arr.flatten()\n out_arr_flat = np.reshape(out_arr, newshape=(-1, out_arr.shape[-1]))\n\n for w3db, vout in zip(w3db_flat, out_arr_flat):\n dc_gain = vout[0]\n max_gain = np.max(vout)\n upper_bound_idx = np.argmax(vout)\n \n # fun_upper = interpolate.interp1d(f_vec, vout-1.01*dc_gain, kind='cubic')\n # fun_lower = interpolate.interp1d(f_vec, vout-0.99*dc_gain, kind='cubic')\n # upper_intersect = cls._get_intersect(fun_upper, f_vec[0], f_vec[upper_bound_idx])\n # lower_intersect = cls._get_intersect(fun_lower, f_vec[0], f_vec[-1])\n \n # rule of thumb: if there is at list 1% bump the behaviour is like CTLE otherwise it's just w3db\n if (max_gain / dc_gain) <= 1.01:\n first_pole_list.append(w3db)\n else:\n # compute the intersection with the theoretical line\n intersect = dc_gain / np.sqrt(2) * np.sqrt(1 + (max_gain / dc_gain) ** 2)\n fzero = interpolate.interp1d(f_vec, vout - intersect, kind='cubic')\n first_pole = cls._get_intersect(fzero, f_vec[0], f_vec[upper_bound_idx])\n first_pole_list.append(first_pole)\n\n first_pole_arr = np.reshape(first_pole_list, newshape=output_shape)\n return gain_arr, max_gain_arr, first_pole_arr"
},
{
"alpha_fraction": 0.49779006838798523,
"alphanum_fraction": 0.4983425438404083,
"avg_line_length": 43.14634323120117,
"blob_id": "3f46a0d56b59ab34ab38fb0a25e6611f79b7b309",
"content_id": "b71a17c7b2611ef10a1b31a5d5e129d78007e558",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1810,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 41,
"path": "/verification_kh/MyComparatorMM.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "from bag.simulation.core import MeasurementManager\n\nfrom typing import TYPE_CHECKING, Tuple, Dict, Any, Union\nif TYPE_CHECKING:\n from .ComparatorTB import NoiseWithCDFFitting, OverDriveTB\n\n\nclass MyComparatorMM (MeasurementManager):\n def __init__(self, *args, **kwargs):\n MeasurementManager.__init__(self, *args, **kwargs)\n self.overall_results = dict()\n\n def get_initial_state(self):\n # type: () -> str\n \"\"\"Returns the initial FSM state.\"\"\"\n return 'od'\n\n def process_output(self, state, data, tb_manager):\n # type: (str, Dict[str, Any], Union[NoiseWithCDFFitting, OverDriveTB]) -> Tuple[bool, str, Dict[str, Any]]\n done = False\n next_state = ''\n\n if state == 'od':\n results = tb_manager.get_overdrive_params(data,\n Tper=self.specs['testbenches']['od']['sim_vars']['Tper'],\n tsetup=self.specs['tsetup'],\n c_wait=self.specs['testbenches']['od']['sim_vars']['c_wait'],\n fig_loc=self.data_dir)\n next_state = 'noise'\n elif state == 'noise':\n results = tb_manager.get_noise_offset(data,\n Tper=self.specs['testbenches']['noise']['sim_vars']['Tper'],\n tdelay=self.specs['testbenches']['noise']['sim_vars']['tdelay']/2,\n fig_loc=self.data_dir)\n done = True\n\n else:\n raise ValueError('Unknown state: %s' % state)\n\n self.overall_results.update(**results)\n return done, next_state, self.overall_results\n"
},
{
"alpha_fraction": 0.5583293437957764,
"alphanum_fraction": 0.5625540018081665,
"avg_line_length": 39.81960678100586,
"blob_id": "de625b029e0b4c89ca5fee6c9006ebd194b495dd",
"content_id": "cdbbeb5ecebeb24f4736bda60cb1e9be38aff97c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10415,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 255,
"path": "/verification_kh/PhotonicLinkAFERX.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "from typing import TYPE_CHECKING, List, Tuple, Dict, Any, Sequence, Optional\n\nif TYPE_CHECKING:\n from bag.core import Testbench\n\nfrom bag.simulation.core import TestbenchManager, MeasurementManager\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport numpy as np\nfrom verification_kh.GenericACMM import GenericACMM\nfrom verification_kh.TIA import TIATBM\nimport itertools\nimport os\nimport scipy.interpolate as interp\nimport scipy.optimize as sciopt\nimport scipy.integrate as integ\nimport pdb\n\n\nclass PhotonicLinkAFERXMM(MeasurementManager):\n\n def __init__(self, *args, **kwargs):\n MeasurementManager.__init__(self, *args, **kwargs)\n self.overall_results = {}\n\n def get_initial_state(self):\n # type: () -> str\n \"\"\"Returns the initial FSM state.\"\"\"\n return 'tia_diff'\n\n def get_testbench_info(self, # type: MeasurementManager\n state, # type: str\n prev_output, # type: Optional[Dict[str, Any]]\n ):\n tb_type = state\n tb_name = self.get_testbench_name(tb_type)\n tb_specs = self.get_testbench_specs(tb_type).copy()\n tb_specs['sim_vars']['ibias_in'] = self.specs['ibias_in']\n tb_params = self.get_default_tb_sch_params(tb_type)\n\n return tb_name, tb_type, tb_specs, tb_params\n\n def process_output(self, state, data, tb_manager):\n # type: (str, Dict[str, Any], TIAMM) -> Tuple[bool, str, Dict[str, Any]]\n\n done = True\n next_state = ''\n\n if state == 'tia_diff':\n self.post_process_diff(state, data, tb_manager)\n next_state = 'tia_cm'\n done = False\n\n elif state == 'tia_cm':\n self.post_process_cm(state, data, tb_manager)\n next_state = ''\n done = True\n\n return done, next_state, self.overall_results\n\n def post_process_diff(self, state, data, tb_manager):\n # ax = plt.gca()\n # self.add_plot(state, data, 'outpTIA', ax=ax, title='R_tia', function=lambda x: 20*np.log10(2*np.abs(x)), save=False)\n # self.add_plot(state, data, 'outdiff', ax=ax, title='R_tia_ctle', function=lambda x: 20 * np.log10(np.abs(x)),\n # save=True)\n # self.add_plot(state, data, 'input_noise', title='input_noise', function=lambda x: np.abs(x))\n # self.add_plot(state, data, 'out_tran', x_axis='time', log_axis='none', title='out_tran',\n # function=lambda x: x)\n\n ac_res = tb_manager.get_R_and_f3db_new(data, 'outdiff', fig_loc=self.data_dir)\n # if you want to run settling time simulation (i.e. inject a small signal step and measure the settling time to\n # the total desired percentage error value uncomment the tb_manager.get_test command, comment the get_eye_height\n # command and also in the yaml file make sure the pulse width is 500ms. Also don't\n\n tran_res = None\n # tran_res = tb_manager.get_tset_new(data, output_name='out_tran', input_name='in_tran',\n # tot_err=self.specs['tset_tol'], gain_list=ac_res['gain'], plot_loc='./tset_debug.png')\n\n eye_char = None\n # if you want the eye_height make sure the settling time is commented and pulse width in the yaml file is Tbit.\n eye_char = self.get_eye_height_approximation(data, 'out_tran', tstop=tb_manager.specs['sim_vars']['tstop'],\n Tbit=self.specs['Tbit'], tmargin=self.specs['tmargin'], fig_loc=self.data_dir)\n f3db_list = ac_res['f3db']\n input_noise, output_noise = tb_manager.get_integrated_noise_new(data, ['input_noise', 'output_noise'], f3db_list, fig_loc=self.data_dir)\n\n if data['ibias'].shape:\n ibias= np.abs(data['ibias']).tolist()\n else:\n ibias = [np.abs(data['ibias']).tolist()]\n\n output = dict(\n corner=ac_res['corner'],\n r_afe=ac_res['gain'],\n f3db=f3db_list,\n rms_input_noise=input_noise['integ_noise'],\n rms_output_noise=output_noise['integ_noise'],\n ibias=ibias,\n )\n\n if tran_res:\n output['tset'] = tran_res['tset']\n\n if eye_char:\n eye_level_thickness_ratio = []\n for height in eye_char['height']:\n if height == 0:\n eye_level_thickness_ratio.append(1)\n else:\n index = eye_char['height'].index(height)\n eye_level_thickness_ratio.append(eye_char['level_thickness'][index]/eye_char['height'][index])\n\n output.update(dict(eye_height=eye_char['height'],\n eye_span_height=eye_char['span_height'],\n eye_level_thickness = eye_char['level_thickness'],\n eye_level_thickness_ratio = eye_level_thickness_ratio,\n ))\n\n self.overall_results.update(**output)\n\n def get_eye_height_approximation(self, data, output_name, tstop, Tbit, tmargin, thresh=2e-5, fig_loc=None):\n axis_names = ['corner', 'time']\n height_list, span_height_list, level_thickness_list = list(), list(), list()\n\n sweep_vars = data['sweep_params'][output_name]\n swp_corner = ('corner' in sweep_vars)\n output_corner = data[output_name]\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n output_corner = output_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n output_corner = np.transpose(output_corner, axes=order)\n\n for corner, v_output in zip(corner_list, output_corner):\n if fig_loc:\n plt.plot(data['time'], v_output)\n plt.savefig(os.path.join(fig_loc, 'tran_out_{}'.format(corner)), dpi=200)\n plt.close()\n\n out = np.abs(v_output)\n time_max_out = data['time'][np.argmax(out)]\n time_max_out = min(time_max_out, Tbit)\n\n f_out = interp.interp1d(data['time'], out, kind='cubic')\n\n # compute the main eye height\n sample_time = time_max_out + Tbit\n eye_level_thickness=0\n while sample_time < tstop:\n v = f_out(sample_time)\n eye_level_thickness+=abs(v)\n sample_time += Tbit\n\n eye_height = f_out(time_max_out) - eye_level_thickness\n if eye_height < 0 :\n eye_height = 0\n\n # compute the left marginal eye height\n left_eye_height = f_out(time_max_out-tmargin)\n sample_time = time_max_out - tmargin + Tbit\n while sample_time < tstop:\n v = f_out(sample_time)\n left_eye_height-=abs(v)\n sample_time += Tbit\n\n # compute the right marginal eye height\n right_eye_height = f_out(time_max_out+tmargin)\n sample_time = time_max_out + tmargin + Tbit\n while sample_time < tstop:\n v = f_out(sample_time)\n right_eye_height-=abs(v)\n sample_time += Tbit\n\n # the overall eye_span_height is going to be the minimum of the right and left one\n eye_span_height = min(left_eye_height, right_eye_height)\n if eye_span_height < 0:\n eye_span_height = 0\n\n height_list.append(float(eye_height))\n span_height_list.append(float(eye_span_height))\n level_thickness_list.append(float(eye_level_thickness))\n\n eye_char = dict(\n corner=corner_list,\n height=height_list,\n span_height=span_height_list,\n level_thickness=level_thickness_list,\n )\n\n return eye_char\n\n\n def post_process_cm(self, state, data, tb_manager):\n # self.add_plot(state, data, 'outcm', title='cm_cm', function=lambda x: 20 * np.log10(np.abs(x)))\n # self.add_plot(state, data, 'outdiff', title='cm_diff', function=lambda x: 20 * np.log10(np.abs(x)))\n # outcm_res, outdiff_res = tb_manager.get_R_and_f3db_new(data, ['outcm', 'outdiff'], fig_loc=self.data_dir)\n outcm_res, outdiff_res = tb_manager.get_gain_new(data, ['outcm', 'outdiff'], fig_loc=self.data_dir)\n output = dict(\n cmcm_gain=outcm_res['gain'],\n cmdm_gain=outdiff_res['gain'],\n cmrr=(np.array(self.overall_results['r_afe'])/np.array(outcm_res['gain'])).tolist(),\n )\n self.overall_results.update(**output)\n\n def add_plot(self, state, data, y_axis, function=lambda x: x, x_axis='freq', ax=None, title=None, log_axis='x', save=True, show=False):\n \"\"\"\n this function should plot the data and maybe save it if needed. It depends on the MeasurementManager subclass.\n For more comlpex ploting function it should be overwritten and use state variable for conditioning\n :param state:\n :param data:\n :param y_axis:\n :param x_axis:\n :param ax:\n :param title:\n :param save:\n :param show:\n :param log_axis: 'x'|'y'|'both'|'none'\n :return:\n \"\"\"\n\n functions_dict = {'x': plt.semilogx, 'y': plt.semilogx, 'both': plt.loglog, 'none': plt.plot}\n #TODO: Unfinished\n\n if ax is None:\n fig = plt.figure()\n ax = plt.gca()\n\n if title is None:\n title = y_axis\n\n # import IPython\n sweep_kwrds = data['sweep_params'][y_axis]\n sweep_kwrds = [kwrd for kwrd in sweep_kwrds if kwrd != x_axis]\n # combos = itertools.product(*(list(range(len(data[swp_kwrd]))) for swp_kwrd in sweep_kwrds))\n\n # IPython.embed()\n # if combos:\n # for index in combos:\n # functions_dict[log_axis](data[x_axis], np.abs(data[y_axis][index, :]))\n # else:\n functions_dict[log_axis](data[x_axis], function(data[y_axis]))\n plt.ylabel(y_axis)\n plt.xlabel(x_axis)\n ax.grid()\n if save:\n fname = os.path.join(self.data_dir, title + \".png\")\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n if show:\n plt.show()\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5935065150260925,
"alphanum_fraction": 0.5948051810264587,
"avg_line_length": 31.10416603088379,
"blob_id": "421302c4ce5e16b7c66ae84e5346504a65f7d4c2",
"content_id": "6428b524a8fb34ac7b727ec42035a1f5115efca0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1540,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 48,
"path": "/BagModules/bag_testbenches_kh/DTSA_dsn_wrapper.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design import Module\n\n\nyaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'DTSA_dsn_wrapper.yaml'))\n\n\n# noinspection PyPep8Naming\nclass bag_testbenches_kh__DTSA_dsn_wrapper(Module):\n \"\"\"Module for library bag_testbenches_kh cell DTSA_dsn_wrapper.\n \"\"\"\n\n def __init__(self, bag_config, parent=None, prj=None, **kwargs):\n Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n \"\"\"Returns a dictionary from parameter names to descriptions.\n\n Returns\n -------\n param_info : Optional[Dict[str, str]]\n dictionary from parameter names to descriptions.\n \"\"\"\n return dict(\n dut_lib='DUT library name.',\n dut_cell='DUT cell name.',\n dut_conns='DUT connection dictionary.',\n )\n\n def design(self, # type: bag_testbenches_kh__DTSA_dsn_wrapper\n dut_lib='', # type: str\n dut_cell='', # type: str\n dut_conns=None, # type: Dict[str, str]\n ):\n # type: (...) -> None\n self.replace_instance_master('XDUT', dut_lib, dut_cell, static=True)\n\n # if dut_conns are different from the default, reconnect the terminals\n for dut_pin, net_name in dut_conns.items():\n self.reconnect_instance_terminal('XDUT', dut_pin, net_name)"
},
{
"alpha_fraction": 0.5757372379302979,
"alphanum_fraction": 0.5790884494781494,
"avg_line_length": 32.8863639831543,
"blob_id": "d4bb1f7132c3e44cf272873099948487e4b41d0c",
"content_id": "e60a8447482ecd3b92e4ea41e7031fa000883f2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1492,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 44,
"path": "/BagModules/bag_testbenches_kh/diff2SingleEnded_wrapper_ac_forward.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design.module import Module\n\n\n# noinspection PyPep8Naming\nclass bag_testbenches_kh__diff2SingleEnded_wrapper_ac_forward(Module):\n \"\"\"Module for library bag_testbenches_kh cell diff2SingleEnded_wrapper_ac_forward.\n\n Fill in high level description here.\n \"\"\"\n yaml_file = pkg_resources.resource_filename(__name__,\n os.path.join('netlist_info',\n 'diff2SingleEnded_wrapper_ac_forward.yaml'))\n\n\n def __init__(self, database, parent=None, prj=None, **kwargs):\n Module.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n return dict(\n dut_lib='Device-under-test library name.',\n dut_cell='Device-under-test cell name.',\n dut_conns='DUT connection dictionary.',\n )\n\n def design(self,\n dut_lib='', # type: str\n dut_cell='', # type: str\n dut_conns=None, # type: Dict[str, str]\n ):\n\n self.replace_instance_master('XDUT', dut_lib, dut_cell, static=True)\n\n # if dut_conns are different from the default, reconnect the terminals\n for dut_pin, net_name in dut_conns.items():\n self.reconnect_instance_terminal('XDUT', dut_pin, net_name)\n\n"
},
{
"alpha_fraction": 0.5350221395492554,
"alphanum_fraction": 0.5440182685852051,
"avg_line_length": 36.73141860961914,
"blob_id": "597f32c77855d35c980047707355fd78f6508a40",
"content_id": "77f761fbc3cd8fbaabc84738d992f0942b1dcd40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22343,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 592,
"path": "/verification_kh/TIA.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "from typing import TYPE_CHECKING, List, Tuple, Dict, Any, Sequence, Optional\n\nif TYPE_CHECKING:\n from bag.core import Testbench\n\nfrom bag.simulation.core import TestbenchManager, MeasurementManager\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport numpy as np\nfrom verification_kh.GenericACMM import GenericACMM\nimport itertools\nimport os\nimport scipy.interpolate as interp\nimport scipy.optimize as sciopt\nimport scipy.integrate as integ\nimport pdb\n\n\nclass TIATBM(TestbenchManager):\n def __init__(self,\n data_fname, # type: str\n tb_name, # type: str\n impl_lib, # type: str\n specs, # type: Dict[str, Any]\n sim_view_list, # type: Sequence[Tuple[str, str]]\n env_list, # type: Sequence[str]\n ):\n # type: (...) -> None\n TestbenchManager.__init__(self, data_fname, tb_name, impl_lib, specs,\n sim_view_list, env_list)\n def setup_testbench(self, tb):\n # not done properly, safer to make them equal\n sim_vars = self.specs.get('sim_vars', None)\n sim_outputs = self.specs.get('sim_outputs', None)\n\n # print(sim_vars)\n if sim_vars is not None:\n for key, value in sim_vars.items():\n tb.set_parameter(key, value)\n if sim_outputs is not None:\n if sim_outputs is not None:\n for key, val in sim_outputs.items():\n tb.add_output(key, val)\n\n def get_gain_new(self, data, output_name, fig_loc=None):\n if not isinstance(output_name, List):\n output_names = [output_name]\n else:\n output_names = output_name\n\n result_list = list()\n axis_names = ['corner', 'freq']\n\n for name in output_names:\n gain_list = list()\n sweep_vars = data['sweep_params'][name]\n swp_corner = ('corner' in sweep_vars)\n output_corner = data[name]\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n output_corner = output_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n output_corner = np.transpose(output_corner, axes=order)\n\n for corner, output in zip(corner_list, output_corner):\n gain_list.append(float(np.abs(output)[0]))\n if fig_loc:\n freq = data['freq']\n plt.semilogx(freq, 20*np.log10(np.abs(output)))\n plt.savefig(os.path.join(fig_loc, 'gain_only_{}_{}.png'.format(name, corner)), dpi=200)\n plt.grid()\n plt.close()\n\n result_list.append(dict(\n corner=corner_list,\n gain=gain_list,\n ))\n\n if not isinstance(output_name, List):\n return result_list[0]\n else:\n return result_list\n\n def get_R_and_f3db_new(self, data, output_name, fig_loc=None):\n\n if not isinstance(output_name, List):\n output_names = [output_name]\n else:\n output_names = output_name\n\n result_list = list()\n axis_names = ['corner', 'freq']\n\n for name in output_names:\n gain_list, f3db_list = list(), list()\n sweep_vars = data['sweep_params'][name]\n swp_corner = ('corner' in sweep_vars)\n output_corner = data[name]\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n output_corner = output_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n output_corner = np.transpose(output_corner, axes=order)\n\n for corner, output in zip(corner_list, output_corner):\n freq = data['freq']\n index = corner_list.index(corner)\n gain, f3db = self._compute_gain_and_f3db_new(freq, output)\n gain_list.append(gain)\n f3db_list.append(f3db)\n if fig_loc:\n plt.semilogx(freq, 20*np.log10(np.abs(output)))\n plt.savefig(os.path.join(fig_loc, 'gain_{}_{}.png'.format(name, corner)), dpi=200)\n plt.grid()\n plt.close()\n\n result_list.append(dict(\n corner=corner_list,\n gain=gain_list,\n f3db=f3db_list,\n ))\n\n if not isinstance(output_name, List):\n return result_list[0]\n else:\n return result_list\n\n\n def _compute_gain_and_f3db_new(self, f_vec, output_vec):\n gain_vec = np.abs(output_vec)\n gain_dc = gain_vec[0]\n\n gain_log = 20 * np.log10(gain_vec)\n gain_dc_log_3db = 20 * np.log10(gain_dc) - 3\n\n # find first index at which gain goes below gain_log 3db\n diff_arr = gain_log - gain_dc_log_3db\n idx_arr = np.argmax(diff_arr < 0)\n freq_log = np.log10(f_vec)\n freq_log_max = freq_log[idx_arr]\n\n fun = interp.interp1d(freq_log, diff_arr, kind='cubic', copy=False, assume_sorted=True)\n f3db = 10.0 ** (self._get_intersect(fun, freq_log[0], freq_log_max))\n\n return float(gain_dc), float(f3db)\n\n def get_tset_new(self, data, output_name, input_name, tot_err, gain_list, fig_loc=None):\n axis_names = ['corner', 'time']\n\n tset_list = list()\n sweep_vars = data['sweep_params'][output_name]\n swp_corner = ('corner' in sweep_vars)\n output_corner = data[output_name]\n input_corner = data[input_name]\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n output_corner = output_corner[None, :]\n input_corner = input_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n output_corner = np.transpose(output_corner, axes=order)\n input_corner = np.transpose(input_corner, axes=order)\n\n for corner, v_output, v_input, gain in \\\n zip(corner_list, output_corner, input_corner, gain_list):\n time = data['time']\n tset = self._compute_tset_new(time, v_output, v_input, tot_err, gain, fig_loc)\n tset_list.append(tset)\n\n if fig_loc:\n plt.semilogx(time, v_output)\n plt.savefig(os.path.join(fig_loc, 'transient_output_{}.png'.format(corner)), dpi=200)\n plt.grid()\n plt.close()\n\n result = dict(\n corner = corner_list,\n tset = tset_list,\n )\n\n return result\n\n def _compute_tset_new(self, t, vout, vin, tot_err, gain, fig_loc=None):\n if fig_loc:\n plt.figure()\n plt.plot(t, [1+tot_err]*len(t), 'r--')\n plt.plot(t, [1-tot_err]*len(t), 'r--')\n\n y = np.abs((vout - vout[0]) / gain / 2 / (vin[-1] - vin[0]))\n\n if fig_loc:\n plt.plot(t, y)\n plt.savefig(os.path.join(fig_loc, 'tran_debug.png'), dpi=200)\n plt.close()\n\n last_idx = np.where(y < 1.0 - tot_err)[0][-1]\n last_max_vec = np.where(y > 1.0 + tot_err)[0]\n if last_max_vec.size > 0 and last_max_vec[-1] > last_idx:\n last_idx = last_max_vec[-1]\n last_val = 1.0 + tot_err\n else:\n last_val = 1.0 - tot_err\n\n if last_idx == t.size - 1:\n return t[-1]\n f = interp.InterpolatedUnivariateSpline(t, y - last_val)\n t0 = t[last_idx]\n t1 = t[last_idx + 1]\n tset = self._get_intersect(f, t0, t1)\n return tset\n\n\n def get_integrated_noise_new(self, data, output_name, bandwidth_list, fig_loc):\n if not isinstance(output_name, list):\n output_names = [output_name]\n else:\n output_names = output_name\n\n result_list = list()\n axis_names = ['corner', 'freq']\n\n for name in output_names:\n integ_noise_list = list()\n sweep_vars = data['sweep_params'][name]\n swp_corner = ('corner' in sweep_vars)\n output_corner = data[name]\n if not swp_corner:\n corner_list = [self.env_list[0]]\n sweep_vars = ['corner'] + sweep_vars\n output_corner = output_corner[None, :]\n else:\n corner_list = data['corner'].tolist()\n\n order = [sweep_vars.index(swp) for swp in axis_names]\n output_corner = np.transpose(output_corner, axes=order)\n\n for corner, output, bw in zip(corner_list, output_corner, bandwidth_list):\n freq = data['freq']\n integ_noise = self._compute_integrated_noise_new(freq, output, bw)\n integ_noise_list.append(integ_noise)\n\n if fig_loc:\n plt.semilogx(freq, np.abs(output))\n plt.savefig(os.path.join(fig_loc, 'noise_{}_{}.png'.format(name, corner)), dpi=200)\n plt.grid()\n plt.close()\n\n result_list.append(dict(\n corner=corner_list,\n integ_noise=integ_noise_list,\n ))\n\n if not isinstance(output_name, list):\n return result_list[0]\n else:\n return result_list\n\n def _compute_integrated_noise_new(self, f_vec, density, bw):\n noise_fun = interp.interp1d(f_vec, density**2, kind='cubic')\n integ_noise = integ.quad(noise_fun, f_vec[0], bw)[0]\n return integ_noise**0.5\n\n\n @classmethod\n def get_R_and_f3db(cls, data, output_list, output_dict=None):\n # type: (Dict[str, Any], List[str], Optional[Dict[str, Any]]) -> Dict[str, Any]\n \"\"\"Returns a dictionary of gain and 3db bandwidth information.\n\n Parameters\n ----------\n data : Dict[str, Any]\n the simulation data dictionary.\n output_list : Sequence[str]\n list of output names to compute gain/bandwidth for.\n output_dict : Optional[Dict[str, Any]]\n If not None, append to the given output dictionary instead.\n\n Returns\n -------\n output_dict : Dict[str, Any]\n A BAG data dictionary containing the gain/bandwidth information.\n \"\"\"\n if output_dict is None:\n output_dict = {}\n swp_info = data['sweep_params']\n f_vec = data['freq']\n for out_name in output_list:\n out_arr = data[out_name]\n swp_params = swp_info[out_name]\n freq_idx = swp_params.index('freq')\n new_swp_params = [par for par in swp_params if par != 'freq']\n gain_arr, f3db_arr = cls._compute_R_and_f3db(f_vec, np.abs(out_arr), freq_idx)\n cls.record_array(output_dict, data, gain_arr, 'R_TIA_' + out_name, new_swp_params)\n cls.record_array(output_dict, data, f3db_arr, 'f3db_' + out_name, new_swp_params)\n return output_dict\n\n @classmethod\n def get_gain(cls, data, output_list, output_dict=None):\n # type: (Dict[str, Any], List[str], Optional[Dict[str, Any]]) -> Dict[str, Any]\n \"\"\"Returns a dictionary of gain bandwidth information.\n\n Parameters\n ----------\n data : Dict[str, Any]\n the simulation data dictionary.\n output_list : Sequence[str]\n list of output names to compute gain/bandwidth for.\n output_dict : Optional[Dict[str, Any]]\n If not None, append to the given output dictionary instead.\n\n Returns\n -------\n output_dict : Dict[str, Any]\n A BAG data dictionary containing the gain/bandwidth information.\n \"\"\"\n if output_dict is None:\n output_dict = {}\n swp_info = data['sweep_params']\n f_vec = data['freq']\n for out_name in output_list:\n out_arr = data[out_name]\n swp_params = swp_info[out_name]\n freq_idx = swp_params.index('freq')\n new_swp_params = [par for par in swp_params if par != 'freq']\n out_arr = np.abs(out_arr)\n out_arr = np.moveaxis(out_arr, freq_idx, -1)\n gain_arr = out_arr[..., 0]\n cls.record_array(output_dict, data, gain_arr, 'gain_' + out_name, new_swp_params)\n\n return output_dict\n\n @classmethod\n def get_integrated_noise(cls, data, output_list, bandwidth_arr, output_dict=None):\n # type: (Dict[str, Any], List[str], np.ndarray, Optional[Dict[str, Any]]) -> Dict[str, Any]\n \"\"\"Returns a dictionary of gain and 3db bandwidth information.\n\n Parameters\n ----------\n data : Dict[str, Any]\n the simulation data dictionary.\n output_list : Sequence[str]\n list of output names to compute gain/bandwidth for.\n bandwidth_arr: np.ndarray\n list of bandwidths for every swept parameter (in units of Hz)\n output_dict : Optional[Dict[str, Any]]\n If not None, append to the given output dictionary instead.\n\n Returns\n -------\n output_dict : Dict[str, Any]\n A BAG data dictionary containing the gain/bandwidth information.\n \"\"\"\n\n if output_dict is None:\n output_dict = {}\n swp_info = data['sweep_params']\n f_vec = data['freq']\n for out_name in output_list:\n out_density = data[out_name]\n swp_params = swp_info[out_name]\n freq_idx = swp_params.index('freq')\n new_swp_params = [par for par in swp_params if par != 'freq']\n\n bandwidth_arr = np.expand_dims(bandwidth_arr, axis=0)\n rms_arr = cls._compute_integrated_noise(f_vec, np.abs(out_density), bandwidth_arr, freq_idx)\n cls.record_array(output_dict, data, rms_arr, 'rms_' + out_name, new_swp_params)\n\n return output_dict\n\n @classmethod\n def get_tset(cls, data, out_name, input_name, tot_err, gain, output_dict=None, plot_flag=False):\n\n if output_dict is None:\n output_dict = {}\n swp_info = data['sweep_params']\n time = data['time']\n\n swp_params = swp_info[out_name]\n time_idx = swp_params.index('time')\n new_swp_params = [par for par in swp_params if par != 'time']\n tset_arr = cls._compute_tset(time, data[out_name], data[input_name], tot_err, gain, time_idx, plot_flag)\n cls.record_array(output_dict, data, tset_arr, 'tset_' + out_name, new_swp_params)\n\n return output_dict\n\n\n @classmethod\n def _compute_R_and_f3db(cls, f_vec, out_arr, freq_idx):\n # type: (np.ndarray, np.ndarray, int) -> Tuple[np.ndarray, np.ndarray]\n \"\"\"Compute the DC R_TIA and bandwidth of the amplifier given output array.\n\n Parmeters\n ---------\n f_vec : np.ndarray\n the frequency vector. Must be sorted.\n out_arr : np.ndarray\n the amplifier output transfer function. Could be multidimensional.\n freq_idx : int\n frequency axis index.\n\n Returns\n -------\n gain_arr : np.ndarray\n the DC gain array.\n f3db_arr : np.ndarray\n the 3db bandwidth array. Contains NAN if the transfer function never\n intersect the gain.\n \"\"\"\n # move frequency axis to last axis\n out_arr = np.moveaxis(out_arr, freq_idx, -1)\n gain_arr = out_arr[..., 0]\n\n # convert\n orig_shape = out_arr.shape\n num_pts = orig_shape[-1]\n out_log = 20 * np.log10(out_arr.reshape(-1, num_pts))\n gain_log_3db = 20 * np.log10(gain_arr.reshape(-1)) - 3\n\n # find first index at which gain goes below gain_log 3db\n diff_arr = out_log - gain_log_3db[:, np.newaxis]\n idx_arr = np.argmax(diff_arr < 0, axis=1)\n freq_log = np.log10(f_vec)\n freq_log_max = freq_log[idx_arr]\n\n num_swp = out_log.shape[0]\n f3db_list = []\n for idx in range(num_swp):\n fun = interp.interp1d(freq_log, diff_arr[idx, :], kind='cubic', copy=False,\n assume_sorted=True)\n f3db_list.append(10.0 ** (cls._get_intersect(fun, freq_log[0], freq_log_max[idx])))\n\n return gain_arr, np.array(f3db_list).reshape(gain_arr.shape)\n\n @classmethod\n def _compute_integrated_noise(cls, f_vec, out_density_arr, bw_arr, freq_idx):\n # move frequency axis to last axis\n out_density_arr = np.moveaxis(out_density_arr, freq_idx, -1)\n\n integ_noise_list = []\n for density, bw in zip(out_density_arr.reshape([-1, out_density_arr.shape[-1]]),\n bw_arr.reshape([-1, bw_arr.shape[-1]])):\n noise_fun = interp.interp1d(f_vec, density**2, kind='cubic')\n integ_noise_list.append(integ.quad(noise_fun, f_vec[0], bw[0])[0])\n return np.sqrt(integ_noise_list)\n\n @classmethod\n def _compute_tset(cls, t, vout_arr, vin_arr, tot_err, gain_arr, time_idx, plot_flag=False):\n # move time axis to last axis\n vout_arr = np.moveaxis(vout_arr, time_idx, -1)\n vin_arr = np.moveaxis(vin_arr, time_idx, -1)*2\n tset_list = []\n if plot_flag:\n plt.figure()\n plt.plot(t, [1+tot_err]*len(t), 'r--')\n plt.plot(t, [1-tot_err]*len(t), 'r--')\n\n for vout, vin, gain in zip(vout_arr.reshape([-1, t.size]), vin_arr.reshape([-1, t.size]), gain_arr.flatten()):\n # since the evaluation of the raw data needs some of the constraints we need to do tset calculation here\n y = np.abs((vout - vout[0]) / gain / (vin[-1] - vin[0]))\n\n if plot_flag:\n plt.plot(t, y)\n plt.savefig('tset_debug.png', dpi=200)\n plt.close()\n\n last_idx = np.where(y < 1.0 - tot_err)[0][-1]\n last_max_vec = np.where(y > 1.0 + tot_err)[0]\n if last_max_vec.size > 0 and last_max_vec[-1] > last_idx:\n last_idx = last_max_vec[-1]\n last_val = 1.0 + tot_err\n else:\n last_val = 1.0 - tot_err\n\n if last_idx == t.size - 1:\n return t[-1]\n f = interp.InterpolatedUnivariateSpline(t, y - last_val)\n t0 = t[last_idx]\n t1 = t[last_idx + 1]\n tset_list.append(cls._get_intersect(f, t0, t1))\n\n\n tset_arr = np.reshape(tset_list, newshape=gain_arr.shape)\n\n return tset_arr\n\n @classmethod\n def _get_intersect(cls, fun, xmin, xmax):\n try:\n return sciopt.brentq(fun, xmin, xmax)\n except ValueError:\n return np.NAN\n\nclass TIAMM(MeasurementManager):\n\n def __init__(self, *args, **kwargs):\n MeasurementManager.__init__(self, *args, **kwargs)\n\n def get_initial_state(self):\n # type: () -> str\n \"\"\"Returns the initial FSM state.\"\"\"\n return 'tia'\n\n def process_output(self, state, data, tb_manager):\n # type: (str, Dict[str, Any], TIAMM) -> Tuple[bool, str, Dict[str, Any]]\n\n done = True\n next_state = ''\n\n ax = plt.gca()\n self.add_plot(state, data, 'outpTIA', ax=ax, title='R_tia', function=lambda x: 20*np.log10(2*np.abs(x)), save=False)\n self.add_plot(state, data, 'outdiff', ax=ax, title='R_tia_ctle', function=lambda x: 20*np.log10(np.abs(x)), save=True)\n self.add_plot(state, data, 'input_noise', title='input_noise', function=lambda x:np.abs(x))\n self.add_plot(state, data, 'out_tran', x_axis='time', log_axis='none', title='out_tran', function=lambda x:np.abs(x))\n # self.add_plot(state, data, 'in_tran', x_axis='time', log_axis='none', title='in_tran')\n\n\n ac_res = tb_manager.get_R_and_f3db(data, ['outdiff'])\n tran_res = tb_manager.get_tset(data, out_name='out_tran', input_name='in_tran',\n tot_err=self.specs['tset_tol'], gain=ac_res['R_TIA_outdiff'])\n\n f3db_list = ac_res['f3db_outdiff']\n noise_res = tb_manager.get_integrated_noise(data, ['input_noise'], f3db_list)\n\n output = dict(\n r_tia=ac_res['R_TIA_outdiff'],\n f3db=f3db_list,\n rms_input_noise=noise_res['rms_input_noise'],\n ibias=np.abs(data['ibias']),\n tset=tran_res['tset_out_tran'],\n )\n\n return done, next_state, output\n\n def add_plot(self, state, data, y_axis, function=lambda x: x, x_axis='freq', ax=None, title=None, log_axis='x', save=True, show=False):\n \"\"\"\n this function should plot the data and maybe save it if needed. It depends on the MeasurementManager subclass.\n For more comlpex ploting function it should be overwritten and use state variable for conditioning\n :param state:\n :param data:\n :param y_axis:\n :param x_axis:\n :param ax:\n :param title:\n :param save:\n :param show:\n :param log_axis: 'x'|'y'|'both'|'none'\n :return:\n \"\"\"\n\n functions_dict = {'x': plt.semilogx, 'y': plt.semilogx, 'both': plt.loglog, 'none': plt.plot}\n #TODO: Unfinished\n\n if ax is None:\n fig = plt.figure()\n ax = plt.gca()\n\n if title is None:\n title = y_axis\n\n # import IPython\n sweep_kwrds = data['sweep_params'][y_axis]\n sweep_kwrds = [kwrd for kwrd in sweep_kwrds if kwrd != x_axis]\n combos = itertools.product(*(list(range(len(data[swp_kwrd]))) for swp_kwrd in sweep_kwrds))\n\n # IPython.embed()\n # if combos:\n # for index in combos:\n # functions_dict[log_axis](data[x_axis], np.abs(data[y_axis][index, :]))\n # else:\n functions_dict[log_axis](data[x_axis], function(data[y_axis]))\n plt.ylabel(y_axis)\n plt.xlabel(x_axis)\n ax.grid()\n if save:\n fname = os.path.join(self.data_dir, title + \".png\")\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n if show:\n plt.show()\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5684210658073425,
"alphanum_fraction": 0.5703349113464355,
"avg_line_length": 36.612613677978516,
"blob_id": "768384ead7dae6e23d53e44f08b3e16aad8984bb",
"content_id": "4ac1a04ad42b6d80f245cbb5a15433fd57542ddf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4180,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 111,
"path": "/verification_kh/OverDriveTest.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "from typing import TYPE_CHECKING, List, Tuple, Dict, Any, Sequence, Optional\n\nif TYPE_CHECKING:\n from bag.core import Testbench\n\nfrom bag.simulation.core import TestbenchManager, MeasurementManager\nimport matplotlib.pyplot as plt\nimport os\nfrom scipy import interpolate\nimport numpy as np\n\nclass ODTBM(TestbenchManager):\n def __init__(self,\n data_fname, # type: str\n tb_name, # type: str\n impl_lib, # type: str\n specs, # type: Dict[str, Any]\n sim_view_list, # type: Sequence[Tuple[str, str]]\n env_list, # type: Sequence[str]\n ):\n # type: (...) -> None\n TestbenchManager.__init__(self, data_fname, tb_name, impl_lib, specs,\n sim_view_list, env_list)\n def setup_testbench(self, tb):\n # not done properly, safer to make them equal\n sim_vars = self.specs.get('sim_vars', None)\n if sim_vars is not None:\n # just forcing tr be equal to tf in case it's not\n sim_vars['tr'] = sim_vars['tf']\n sim_vars['td'] = sim_vars['Tper']/4\n sim_outputs = self.specs.get('sim_outputs', None)\n\n for key, value in sim_vars.items():\n tb.set_parameter(key, value)\n if sim_outputs is not None:\n for key, val in sim_outputs.items():\n tb.add_output(key, val)\n\n @classmethod\n def add_plot(self, data, yaxis_key=None, xaxis_key='time'):\n if yaxis_key is None:\n raise ValueError('yaxis_key should be specified')\n if yaxis_key not in data:\n raise ValueError('yaxis_key = {} not found in data keywords'.format(yaxis_key))\n plt.plot(data[xaxis_key], data[yaxis_key])\n\n @classmethod\n def save_plot(self, fname):\n plt.grid()\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n\n\nclass ODMM(MeasurementManager):\n\n def __init__(self, data_dir, meas_name, impl_lib, specs, wrapper_lookup, sim_view_list, env_list):\n # type: (str, str, str, Dict[str, Any], Dict[str, str], Sequence[Tuple[str, str]], Sequence[str]) -> None\n MeasurementManager.__init__(self, data_dir, meas_name, impl_lib, specs, wrapper_lookup, sim_view_list, env_list)\n\n testbenches_od = specs['testbenches']['od']\n self.c_wait = testbenches_od['sim_vars']['c_wait']\n self.Tper = testbenches_od['sim_vars']['Tper']\n self.tsetup = self.specs['tsetup']\n\n def get_initial_state(self):\n # type: () -> str\n \"\"\"Returns the initial FSM state.\"\"\"\n return 'od'\n\n def process_output(self, state, data, tb_manager):\n # type: (str, Dict[str, Any], ODMM) -> Tuple[bool, str, Dict[str, Any]]\n # TODO: make this work for multiple corners\n done = True\n next_state = ''\n\n '''\n # Sanity check with visualization\n tb_manager.add_plot(data, yaxis_key='inclk')\n tb_manager.add_plot(data, yaxis_key='outdiff')\n tb_manager.add_plot(data, yaxis_key='indiff')\n tb_manager.save_plot(os.path.join(self.data_dir, 'plot.png'))\n '''\n\n # fit vout = f(time)\n # read value of vout @ different times\n vout = data['outdiff']\n time = data['time']\n\n fvout = interpolate.interp1d(time, vout, kind='cubic')\n t_charge = self.c_wait * self.Tper - self.tsetup\n t_reset = (self.c_wait+0.5) * self.Tper - self.tsetup\n t_out = (self.c_wait + 1) * self.Tper - self.tsetup\n\n v_charge = fvout(t_charge)\n v_reset = fvout(t_reset)\n v_out = fvout(t_out)\n ibias = np.abs(data['ibias'])\n\n tb_manager.add_plot(data, yaxis_key='inclk')\n tb_manager.add_plot(data, yaxis_key='outdiff')\n tb_manager.add_plot(data, yaxis_key='indiff')\n tb_manager.save_plot(os.path.join(self.data_dir, 'plot.png'))\n\n output = dict(v_charge=np.float(v_charge),\n v_reset=np.float(v_reset),\n v_out=np.float(v_out),\n ibias=np.float(ibias))\n\n return done, next_state, output\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.646789014339447,
"alphanum_fraction": 0.6498470902442932,
"avg_line_length": 23.22222137451172,
"blob_id": "ad4f84e7b9700762c1b8fc1600e93be27053b987",
"content_id": "2a073821d14a221183c85f75e92fd5f89520b4d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 654,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 27,
"path": "/BagModules/bag_testbenches_kh/dut_model.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design import Module\n\n\nyaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'dut_model.yaml'))\n\n\n# noinspection PyPep8Naming\nclass bag_testbenches_kh__dut_model(Module):\n\n def __init__(self, bag_config, parent=None, prj=None, **kwargs):\n Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n return dict(\n )\n\n def design(self):\n raise ValueError('This class should not be instantiated.')\n"
},
{
"alpha_fraction": 0.7011643052101135,
"alphanum_fraction": 0.7024579644203186,
"avg_line_length": 32.60869598388672,
"blob_id": "3483e3bf436adf22020c985c8539d98a24697453",
"content_id": "c56983eb9a85a8dccb0b205ee6cba99a7fb6c432",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 23,
"path": "/verification_kh/test_verification/tb_tia.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe script for testing the Design Manager Module\nThis file can generate layout/schematic, Do LVS and RCX, and run overdrive test recovery testbench\nTo be able to use this the top level yaml file has to follow certain conventions. DTSA.yaml is an example\n\"\"\"\nfrom bag import BagProject\nfrom bag.simulation.core import DesignManager\nfrom bag.io import read_yaml, open_file\n\n\nif __name__ == '__main__':\n local_dict = locals()\n if 'bprj' not in local_dict:\n print('creating BAG project')\n bprj = BagProject()\n\n else:\n print('loading BAG project')\n bprj = local_dict['bprj']\n\n fname = 'Acacia2Tapeout/specs/tia.yaml'\n sim = DesignManager(bprj, fname)\n sim.characterize_designs(generate=False, measure=True, load_from_file=True)\n"
},
{
"alpha_fraction": 0.5291133522987366,
"alphanum_fraction": 0.5302161574363708,
"avg_line_length": 37.34745788574219,
"blob_id": "b22750f23dbbe7324046f74c3e57fc9c56da9bcf",
"content_id": "e3d74cf4240e9564689a443bc08d3eba10923987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4534,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 118,
"path": "/BagModules/bag_testbenches_kh/photonic_link_tb.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design.module import Module\nfrom bag.math import float_to_si_string\n\n# noinspection PyPep8Naming\nclass bag_testbenches_kh__photonic_link_tb(Module):\n \"\"\"Module for library bag_testbenches_kh cell photonic_link_tb.\n\n Fill in high level description here.\n \"\"\"\n yaml_file = pkg_resources.resource_filename(__name__,\n os.path.join('netlist_info',\n 'photonic_link_tb.yaml'))\n\n\n def __init__(self, database, parent=None, prj=None, **kwargs):\n Module.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n return dict(\n dut_lib='DUT library name.',\n dut_cell='DUT cell name.',\n dut_conns='DUT connection dictionary',\n vbias_dict='Vbias dictionary (could include VDD)',\n ibias_dict='Ibias dictionary (should include input current)',\n no_cload='if True cload is deleted',\n no_cpd='if True cpd is deleted',\n )\n\n def design(\n self,\n dut_lib='',\n dut_cell='',\n dut_conns=None,\n vbias_dict=None,\n ibias_dict=None,\n no_cload=False,\n no_cpd=False,\n ):\n \"\"\"To be overridden by subclasses to design this module.\n\n This method should fill in values for all parameters in\n self.parameters. To design instances of this module, you can\n call their design() method or any other ways you coded.\n\n To modify schematic structure, call:\n\n rename_pin()\n delete_instance()\n replace_instance_master()\n reconnect_instance_terminal()\n restore_instance()\n array_instance()\n \"\"\"\n if vbias_dict is None:\n vbias_dict = {}\n if ibias_dict is None:\n ibias_dict = {}\n if dut_conns is None:\n dut_conns = {}\n\n self.design_voltage_current_sources(vbias_dict, ibias_dict,\n v_inst_names=['VSUP', 'VCLK'],\n i_inst_names=['IBIAS', 'Istream', 'Itran_in'])\n\n # delete load cap if needed\n if no_cload:\n self.delete_instance('CLOAD')\n # delete input cap if needed\n if no_cload:\n self.delete_instance('CPD')\n\n # setup DUT\n self.replace_instance_master('XDUT', dut_lib, dut_cell, static=True)\n for term_name, net_name in dut_conns.items():\n self.reconnect_instance_terminal('XDUT', term_name, net_name)\n\n\n\n def design_voltage_current_sources(self, v_source_dict, i_source_dict,\n v_inst_names=None, i_inst_names=None):\n for source_inst_names, source_dict in ((i_inst_names, i_source_dict), (v_inst_names, v_source_dict)):\n for source_inst_name in source_inst_names:\n source_replacements_dict = source_dict.get(source_inst_name, {})\n\n if not source_replacements_dict:\n self.delete_instance(source_inst_name)\n continue\n\n assert isinstance(source_replacements_dict, dict), \"{} is not a dictionary\".format(source_inst_name)\n\n name_list, term_list, param_dict_list = [], [], []\n for inst_name, inst_properties in source_replacements_dict.items():\n pname, nname = inst_properties[:2]\n term_list.append(dict(PLUS=pname, MINUS=nname))\n param_dict_list.append(inst_properties[2])\n name_list.append(inst_name)\n\n self.array_instance(source_inst_name, name_list, term_list=term_list)\n\n for inst, inst_properties in zip(self.instances[source_inst_name],\n source_replacements_dict.values()):\n for k, v in inst_properties[2].items():\n if isinstance(v, str):\n pass\n elif isinstance(v, int) or isinstance(v, float):\n v = float_to_si_string(v)\n else:\n raise ValueError('value %s of type %s not supported' % (v, type(v)))\n inst.parameters[k] = v\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6076173782348633,
"alphanum_fraction": 0.6093888282775879,
"avg_line_length": 28.736841201782227,
"blob_id": "469d8536a11c614fe1c375b53901e7393eb03d35",
"content_id": "c2ae2e601b9a3ba505a65e3ff3f5d4396b2e657f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1129,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 38,
"path": "/BagModules/bag_testbenches_kh/cmp_overdrive_recovery_tb.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design import Module\n\n\nyaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'cmp_overdrive_recovery_tb.yaml'))\n\n\n# noinspection PyPep8Naming\nclass bag_testbenches_kh__cmp_overdrive_recovery_tb(Module):\n \"\"\"\n Module for Overdrive Recovery test for comparators.\n TO use it for different architectures a wrapper should be built (i.e. DTSA_dsn_wrapper.py)\n \"\"\"\n\n def __init__(self, bag_config, parent=None, prj=None, **kwargs):\n Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n return dict(\n dut_lib='DUT library name.',\n dut_cell='DUT cell name.',\n )\n\n def design(\n self, # type: bag_testbenches_kh__cmp_overdrive_recovery_tb\n dut_lib='', # type: str\n dut_cell='', # type: str\n ):\n # type: (...) -> None\n self.replace_instance_master('XDUT', dut_lib, dut_cell, static=True)"
},
{
"alpha_fraction": 0.5369369387626648,
"alphanum_fraction": 0.5378378629684448,
"avg_line_length": 31.159420013427734,
"blob_id": "80a03ec4a36bbce3b80e6e8750516b6c85edfcec",
"content_id": "eea7918248e71557d473d2280b17890f4345b438",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2220,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 69,
"path": "/BagModules/bag_testbenches_kh/tia_noise_tb.py",
"repo_name": "kouroshHakha/bag_testbenches_kh",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design.module import Module\n\n\n# noinspection PyPep8Naming\nclass bag_testbenches_kh__tia_noise_tb(Module):\n \"\"\"Module for library bag_testbenches_kh cell tia_noise_tb.\n\n Fill in high level description here.\n \"\"\"\n yaml_file = pkg_resources.resource_filename(__name__,\n os.path.join('netlist_info',\n 'tia_noise_tb.yaml'))\n\n\n def __init__(self, database, parent=None, prj=None, **kwargs):\n Module.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n return dict(\n dut_lib='DUT library name.',\n dut_cell='DUT cell name.',\n dut_conns='DUT connection dictionary',\n vbias_dict='Vbias dictionary (could include VDD)',\n ibias_dict='Ibias dictionary (should include input current)',\n no_cload='if True cload is deleted',\n no_cpd='if True cpd is deleted',\n )\n def design(\n self, # type: bag_testbenches_kh__tia_tb\n dut_lib='', # type: str\n dut_cell='', # type: str\n dut_conns=None,\n vbias_dict=None,\n ibias_dict=None,\n no_cload=False,\n no_cpd=False\n ):\n # type: (...) -> None\n\n if vbias_dict is None:\n vbias_dict = {}\n if ibias_dict is None:\n ibias_dict = {}\n if dut_conns is None:\n dut_conns = {}\n\n # setup bias sources\n self.design_dc_bias_sources(vbias_dict, ibias_dict, 'VSUP', 'IBIAS', define_vdd=True)\n\n # delete load cap if needed\n if no_cload:\n self.delete_instance('CLOAD')\n # delete input cap if needed\n if no_cload:\n self.delete_instance('CPD')\n\n # setup DUT\n self.replace_instance_master('XDUT', dut_lib, dut_cell, static=True)\n for term_name, net_name in dut_conns.items():\n self.reconnect_instance_terminal('XDUT', term_name, net_name)\n\n"
}
] | 15 |
jenskutilek/AnchorOverlayTool | https://github.com/jenskutilek/AnchorOverlayTool | 6041c7a762e4a92842ea5a0fb3475b517120c826 | 105e886656af658e9e03506b297caf34b64ca849 | 15f910997c6fbdc164ca51c94acfbf93ca2e4259 | refs/heads/master | 2021-07-14T03:54:22.946257 | 2021-07-05T13:53:49 | 2021-07-05T13:53:49 | 14,745,606 | 4 | 3 | null | 2013-11-27T11:49:50 | 2020-03-09T18:29:49 | 2020-06-22T16:11:07 | Python | [
{
"alpha_fraction": 0.5384733080863953,
"alphanum_fraction": 0.5557423233985901,
"avg_line_length": 32.38520431518555,
"blob_id": "fdad178c032f547ecb5f94e490c368770b2959ae",
"content_id": "0c79430910c1336ccd4b98c66f6e771e03b68326",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13087,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 392,
"path": "/Anchor Overlay Tool.roboFontExt/lib/AnchorOverlay.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "# Anchor Overlay\n# An extension for the RoboFont editor\n# Requires RoboFont 3\n# Version 0.1 by Jens Kutilek 2013-01-04\n# Version 0.2 by Jens Kutilek 2013-12-05\n# Version 0.4 by Jens Kutilek 2014-02-13\n# Version 0.4 by Jens Kutilek 2014-02-13\n# Version 0.5.2: Jens Kutilek 2016-01-17\n# Version 0.6.0: Jens Kutilek 2018-01-10\n# Version 0.7.0: Jens Kutilek 2020-06-22\n# Version 0.8.0: Jens Kutilek 2021-02-03\n\nimport vanilla\n\n# from time import time\n\nfrom defconAppKit.windows.baseWindow import BaseWindowController\n\nfrom mojo.events import addObserver, removeObserver\nfrom mojo.drawingTools import (\n drawGlyph,\n fill,\n restore,\n save,\n strokeWidth,\n translate,\n)\nfrom mojo.roboFont import CurrentFont, CurrentGlyph\n\nfrom lib.tools.defaults import getDefaultColor\nfrom mojo.UI import UpdateCurrentGlyphView, CurrentGlyphWindow\nfrom mojo.extensions import getExtensionDefault, setExtensionDefault\n\nfrom extensionID import extensionID\nfrom FontAnchors import FontAnchors\n\n\ndef roundCoordinates(coordinatesTuple):\n return (int(round(coordinatesTuple[0])), int(round(coordinatesTuple[1])))\n\n\nclass AnchorOverlay(BaseWindowController):\n def __init__(self):\n self.fontAnchors = FontAnchors(CurrentFont())\n self.showPreview = getExtensionDefault(\n \"%s.%s\" % (extensionID, \"preview\"), True\n )\n nscolor = getDefaultColor(\"glyphViewPreviewFillColor\")\n self.preview_color = (\n nscolor.redComponent(),\n nscolor.greenComponent(),\n nscolor.blueComponent(),\n nscolor.alphaComponent(),\n )\n\n columnDescriptions = [\n {\"title\": \"Show\", \"cell\": vanilla.CheckBoxListCell(), \"width\": 35},\n {\"title\": \"Name\", \"typingSensitive\": True, \"editable\": False},\n ]\n\n self.w = vanilla.FloatingWindow(\n (170, 490), \"Anchor Overlay\", closable=False\n )\n\n y = 10\n self.w.showAnchors_label = vanilla.TextBox(\n (10, y, -10, 20), \"Show anchors:\", sizeStyle=\"small\"\n )\n y += 25\n self.w.showAnchors = vanilla.List(\n (10, y, -10, 150),\n self.fontAnchors.getAnchorNames(),\n columnDescriptions=columnDescriptions,\n drawFocusRing=True,\n editCallback=self.updateAnchorVisibility,\n doubleClickCallback=self.selectGlyphsWithAnchorName,\n selectionCallback=self.updateAnchoredGlyphsList,\n )\n y += 160\n self.w.markAnchors_label = vanilla.TextBox(\n (10, y, 150, 20), \"Show mark glyphs:\", sizeStyle=\"small\"\n )\n y += 25\n self.w.markAnchors = vanilla.List(\n (10, y, 150, 180),\n [], # self.fontAnchors.anchorGlyphs.keys(),\n columnDescriptions=columnDescriptions,\n editCallback=self.updateMarkVisibility,\n doubleClickCallback=self.gotoGlyph,\n allowsMultipleSelection=False,\n allowsEmptySelection=False,\n )\n y += 188\n # self.w.drawPreview = vanilla.CheckBox((10, y, -10, -10), \"Show in preview mode\",\n # callback=self.setShowPreview,\n # value=self.showPreview,\n # sizeStyle=\"small\"\n # )\n\n # self.w.displayAnchors = vanilla.CheckBox((10, y+25, -10, -10), \"Show anchors\",\n # callback=self.setShowAnchors,\n # value=getGlyphViewDisplaySettings()[\"Anchors\"],\n # sizeStyle=\"small\"\n # )\n\n y += 2\n self.w.alignAnchors_label = vanilla.TextBox(\n (10, y, -10, -10), \"Align selected anchors:\", sizeStyle=\"small\"\n )\n\n y += 21\n self.w.centerXButton = vanilla.Button(\n (10, y, 72, 25),\n \"Points X\",\n callback=self.centerAnchorX,\n sizeStyle=\"small\",\n )\n self.w.centerYButton = vanilla.Button(\n (88, y, 72, 25),\n \"Points Y\",\n callback=self.centerAnchorY,\n sizeStyle=\"small\",\n )\n\n y += 26\n self.w.baselineButton = vanilla.Button(\n (10, y, 46, 25),\n \"base\",\n callback=self.moveAnchorBaseline,\n sizeStyle=\"small\",\n )\n self.w.xheightButton = vanilla.Button(\n (62, y, 46, 25),\n \"x\",\n callback=self.moveAnchorXheight,\n sizeStyle=\"small\",\n )\n self.w.capheightButton = vanilla.Button(\n (114, y, 46, 25),\n \"cap\",\n callback=self.moveAnchorCapheight,\n sizeStyle=\"small\",\n )\n\n self.setUpBaseWindowBehavior()\n self.addObservers()\n\n self.w.showAnchors.setSelection([])\n self.w.open()\n\n # Observers\n\n def addObservers(self):\n addObserver(self, \"glyphChanged\", \"draw\")\n addObserver(self, \"glyphChangedPreview\", \"drawPreview\")\n addObserver(self, \"glyphChanged\", \"drawInactive\")\n\n def removeObservers(self):\n removeObserver(self, \"draw\")\n removeObserver(self, \"drawPreview\")\n removeObserver(self, \"drawInactive\")\n\n # Callbacks\n\n def updateAnchorVisibility(self, sender=None, glyph=None):\n for anchor in sender.get():\n # self.fontAnchors.setAnchorVisibility(anchor[\"Name\"], anchor[\"Show\"])\n self.fontAnchors.setVisibility(\n \"anchor\", anchor[\"Name\"], anchor[\"Show\"]\n )\n UpdateCurrentGlyphView()\n\n def updateGlyphVisibility(self, sender=None, glyph=None):\n for g in sender.get():\n self.fontAnchors.setVisibility(\n \"glyph\", g[\"Name\"], g[\"Show\"], False\n )\n UpdateCurrentGlyphView()\n\n def updateMarkVisibility(self, sender=None, glyph=None):\n for g in sender.get():\n self.fontAnchors.setVisibility(\"mark\", g[\"Name\"], g[\"Show\"], False)\n UpdateCurrentGlyphView()\n\n def updateAnchoredGlyphsList(self, sender=None, glyph=None):\n selectedAnchorNames = []\n for i in sender.getSelection():\n selectedAnchorNames.append(\n self.fontAnchors.getAnchorNames()[i][\"Name\"]\n )\n self.w.markAnchors.set(\n self.fontAnchors.getAnchoredGlyphNamesForList(\n selectedAnchorNames, marks=True\n )\n )\n\n def gotoGlyph(self, sender=None, glyph=None):\n newGlyphName = sender.get()[sender.getSelection()[0]][\"Name\"]\n # print(\"Goto Glyph:\", newGlyphName)\n CurrentGlyphWindow().setGlyphByName(newGlyphName)\n\n def selectGlyphsWithAnchorName(self, sender=None):\n anchorName = sender.get()[sender.getSelection()[0]][\"Name\"]\n self.fontAnchors.selectGlyphsWithAnchorName(anchorName)\n\n # def setShowPreview(self, sender=None, glyph=None):\n # self.showPreview = sender.get()\n\n # def setShowAnchors(self, sender=None, glyph=None):\n # showAnchors = sender.get()\n # setGlyphViewDisplaySettings({\"Anchors\": showAnchors})\n\n # Drawing helpers\n\n def setStroke(self, value=0.5):\n strokeWidth(value)\n\n def setFill(self, rgba=(0.2, 0, 0.2, 0.2)):\n r, g, b, a = rgba\n fill(r, g, b, a)\n\n # Stuff for anchor alignment buttons\n\n def _getBBox(self, pointList):\n minX = None\n maxX = None\n minY = None\n maxY = None\n for p in pointList:\n if minX is None or p.x < minX:\n minX = p.x\n if maxX is None or p.x > maxX:\n maxX = p.x\n if minY is None or p.y < minY:\n minY = p.y\n if maxY is None or p.y > maxY:\n maxY = p.y\n return ((minX, minY), (maxX, maxY))\n\n def _getReferencePoint(self, glyph):\n # calculate a reference point for anchor adjustments\n if len(glyph.selection) == 0:\n # no points selected, place anchor at glyph width or cap height\n # center\n # TODO: x-height for lowercase?\n # print(\"Ref: metrics\")\n return roundCoordinates(\n (glyph.width / 2, self.fontAnchors.font.info.capHeight / 2)\n )\n elif len(glyph.selection) == 1:\n # one point is selected, return same\n # print(\"Ref: point\")\n return roundCoordinates(\n (glyph.selection[0].x, glyph.selection[0].y)\n )\n else:\n # more points are selected, find min/max and return center.\n # print(\"Ref: bbox\")\n ((minX, minY), (maxX, maxY)) = self._getBBox(glyph.selection)\n return roundCoordinates(((minX + maxX) / 2, (minY + maxY) / 2))\n\n # Align anchors based on selection\n\n def centerAnchorX(self, sender=None, glyph=None):\n g = CurrentGlyph()\n g.prepareUndo(undoTitle=\"h-align anchors in /%s\" % g.name)\n p = self._getReferencePoint(g)\n for a in g.anchors:\n if a.selected:\n a.x = p[0]\n g.performUndo()\n UpdateCurrentGlyphView()\n\n def centerAnchorY(self, sender=None, glyph=None):\n g = CurrentGlyph()\n g.prepareUndo(undoTitle=\"v-align anchors in /%s\" % g.name)\n p = self._getReferencePoint(g)\n for a in g.anchors:\n if a.selected:\n a.y = p[1]\n g.performUndo()\n UpdateCurrentGlyphView()\n\n def addAnchorAndUpdateList(self, glyph, name, position):\n self.fontAnchors.addAnchor(glyph, name, position, addToGlyph=True)\n self.w.showAnchors.set(self.fontAnchors.getAnchorNames())\n UpdateCurrentGlyphView()\n\n # Align anchors based on metrics\n\n def moveAnchorBaseline(self, sender=None, glyph=None):\n g = CurrentGlyph()\n g.prepareUndo(undoTitle=\"align anchors to baseline in /%s\" % g.name)\n for a in g.anchors:\n if a.selected:\n a.y = 0\n g.performUndo()\n UpdateCurrentGlyphView()\n\n def moveAnchorXheight(self, sender=None, glyph=None):\n g = CurrentGlyph()\n g.prepareUndo(undoTitle=\"align anchors to x-height in /%s\" % g.name)\n y = self.fontAnchors.font.info.xHeight\n for a in g.anchors:\n if a.selected:\n a.y = y\n g.performUndo()\n UpdateCurrentGlyphView()\n\n def moveAnchorCapheight(self, sender=None, glyph=None):\n g = CurrentGlyph()\n g.prepareUndo(undoTitle=\"align anchors to cap height in /%s\" % g.name)\n y = self.fontAnchors.font.info.capHeight\n for a in g.anchors:\n if a.selected:\n a.y = y\n g.performUndo()\n UpdateCurrentGlyphView()\n\n def glyphChanged(self, info):\n # print(\" * glyphChanged\")\n g = info[\"glyph\"]\n if g is not None:\n if len(g.anchors) > 0:\n self.drawAnchoredGlyphs(g)\n\n def glyphChangedPreview(self, info):\n # print(\" * glyphChangedPreview\")\n g = info[\"glyph\"]\n if (g is not None) and self.showPreview:\n if len(g.anchors) > 0:\n self.drawAnchoredGlyphs(g, preview=True)\n\n def drawAnchoredGlyphs(self, glyph, preview=False):\n self.setStroke(0)\n if preview:\n self.setFill(self.preview_color)\n else:\n self.setFill()\n\n # start = time()\n\n dbx = 0\n dby = 0\n\n for a in glyph.anchors:\n anchor_name = a.name\n # print(\" %s\" % anchor_name)\n if self.fontAnchors.getVisibility(\"anchor\", anchor_name):\n glyphsToDraw = self.fontAnchors.getAnchoredGlyphNames(\n anchor_name\n )\n # get translation for base anchor\n dbx = a.x\n dby = a.y\n save()\n for gn in glyphsToDraw:\n if (\n anchor_name[0] != \"_\"\n and self.fontAnchors.getVisibility(\"mark\", gn, False)\n ) or (\n anchor_name[0] == \"_\"\n and self.fontAnchors.getVisibility(\"glyph\", gn, False)\n ):\n # get translation for current mark anchor\n dmx, dmy = self.fontAnchors.anchorPositions[\n gn,\n self.fontAnchors.getMatchingAnchorName(\n anchor_name\n ),\n ]\n x = dbx - dmx\n y = dby - dmy\n translate(x, y)\n drawGlyph(self.fontAnchors.font[gn])\n dbx = dmx\n dby = dmy\n restore()\n\n # stop = time()\n # print(\" Draw: %0.1f ms\" % (1000 * (stop - start)))\n\n def windowCloseCallback(self, sender):\n self.removeObservers()\n setExtensionDefault(\n \"%s.%s\" % (extensionID, \"hide\"), self.fontAnchors.hideLists\n )\n setExtensionDefault(\n \"%s.%s\" % (extensionID, \"preview\"), self.showPreview\n )\n super(AnchorOverlay, self).windowCloseCallback(sender)\n UpdateCurrentGlyphView()\n"
},
{
"alpha_fraction": 0.7758793830871582,
"alphanum_fraction": 0.7758793830871582,
"avg_line_length": 51.421051025390625,
"blob_id": "57c3e591f49a9f0b2ff3753afbb9777625e364b2",
"content_id": "260b8bce80c98406c6ae6dc90d60f47d40ce73aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 995,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 19,
"path": "/README.md",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "Anchor Overlay Tool\n===================\n\n\n\nJust another way to add anchors and preview accent positions in RoboFont. Installs as a tool in the glyph window toolbar.\n\n* Anchor placement: Double-click anywhere to place an anchor. The anchors are named automatically based on the click position (top, center, bottomRight, etc.)\n* Alignment assistance: Select one or more anchors or points and use the alignment buttons\n\nMenu additions:\n\n* *Recompose Selected Glyphs* (ctrl-cmd-R): Reposition components in current or selected glyphs based on anchor positions.\n* *Export Anchor Table (CSV)*: Export all anchor names and positions for open UFOs as comma-separated text file. This helps comparing position consistency across the font family and noticing any missing anchors.\n\nSimilar RoboFont extensions:\n\n* [Accentista](https://github.com/FontBureau/fbOpenTools/tree/master/Accentista) by David Jonathan Ross\n* [Adjust Anchors](https://github.com/adobe-type-tools/robofont-extensions) by Miguel Sousa"
},
{
"alpha_fraction": 0.4658590257167816,
"alphanum_fraction": 0.46879589557647705,
"avg_line_length": 34.376625061035156,
"blob_id": "b3fdf20c08d65b5b0e1a0a31fd439b2dd1b60f8a",
"content_id": "32b3bb49eb92b7f568e256bbc42d5402d37fa993",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2724,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 77,
"path": "/Anchor Overlay Tool.roboFontExt/lib/Compare Anchors CSV.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "import codecs\nfrom os.path import expanduser, join\n\n\nclass AnchorComparison(object):\n def __init__(self, fontlist=[]):\n fonts = []\n for f in fontlist:\n fonts.append((f.info.openTypeOS2WeightClass, f))\n fonts.sort(key=lambda i: i[0])\n self.fonts = [f[1] for f in fonts]\n\n def get_global_glyph_list(self):\n gl = []\n for f in self.fonts:\n gl.extend(f.glyphOrder)\n return sorted(list(set(gl)))\n\n def get_global_anchor_list(self, glyph_name):\n al = []\n for f in self.fonts:\n al.extend([a.name for a in f[glyph_name].anchors])\n return sorted(list(set(al)))\n\n def get_anchors_by_name(self, glyph):\n anchor_names = [a.name for a in glyph.anchors]\n if len(anchor_names) != len(set(anchor_names)):\n print(\" WARNING: Duplicate anchor name in %s\" % glyph.name)\n return {a.name: (a.x, a.y) for a in glyph.anchors}\n\n def get_comparison_csv(self):\n csv = \"Glyph;Anchor;\"\n for i in range(len(self.fonts)):\n csv += \"%s;%s;\" % (\n self.fonts[i].info.familyName,\n self.fonts[i].info.styleName,\n )\n csv += \"\\n\"\n glyphs = self.get_global_glyph_list()\n for name in glyphs:\n all_anchors = self.get_global_anchor_list(name)\n for anchor in all_anchors:\n csv += \"%s;%s;\" % (name, anchor)\n for i in range(len(self.fonts)):\n if name in self.fonts[i]:\n glyph_anchors = self.get_anchors_by_name(\n self.fonts[i][name]\n )\n if anchor in glyph_anchors:\n pos = self.get_anchors_by_name(\n self.fonts[i][name]\n )[anchor]\n csv += \"%i;%i;\" % (pos[0], pos[1])\n else:\n csv += \";;\"\n else:\n csv += \"(no glyph);\"\n csv += \"\\n\"\n return csv\n\n def save_comparison_csv(self, path=None):\n if len(self.fonts) > 0:\n if not path:\n path = join(\n expanduser(\"~\"),\n \"Documents\",\n \"%s_Anchor_Comparison.csv\" % self.fonts[0].info.familyName,\n )\n with codecs.open(path, \"wb\", encoding=\"utf-8\") as csv:\n csv.write(self.get_comparison_csv())\n print(\"Anchor table written to '%s'.\" % path)\n else:\n print(\"There are no open fonts.\")\n\n\nac = AnchorComparison(AllFonts())\nac.save_comparison_csv()\n"
},
{
"alpha_fraction": 0.531255841255188,
"alphanum_fraction": 0.5327576398849487,
"avg_line_length": 33.14743423461914,
"blob_id": "712f714f8f866ea2a1edb25db3dd82ef8e59bc3f",
"content_id": "8b198521085dbbfa02598da37a0da96debdb9e03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5327,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 156,
"path": "/Anchor Overlay Tool.roboFontExt/lib/FontAnchors.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "from mojo.extensions import getExtensionDefault\nfrom extensionID import extensionID\n\n\nclass FontAnchors(object):\n\n anchorNames = []\n anchorGlyphs = {}\n anchorPositions = {}\n invisibleAnchors = []\n invisibleGlyphs = []\n invisibleMarks = []\n\n hideLists = {\n \"anchor\": invisibleAnchors,\n \"glyph\": invisibleGlyphs,\n \"mark\": invisibleMarks,\n }\n\n def __init__(self, font):\n self.font = font\n self._readFromFont(self.font)\n self.hideLists = getExtensionDefault(\n \"%s.%s\" % (extensionID, \"hide\"), self.hideLists\n )\n\n def _readFromFont(self, font):\n self.anchorNames = []\n self.anchorGlyphs = {}\n self.anchorPositions = {}\n\n if font is not None:\n for g in font:\n if len(g.anchors) > 0:\n for a in g.anchors:\n self.addAnchor(g, a.name, (a.x, a.y))\n # for a in sorted(self.anchorBaseMap.keys()):\n # self.anchorNames.append({\"Show\": True, \"Name\": a})\n # print(\"\\nanchorGlyphs:\", self.anchorGlyphs)\n # print(\"\\nanchorPositions:\", self.anchorPositions)\n # print()\n\n def getVisibility(self, kind, name, includeMatching=True):\n hideList = self.hideLists[kind]\n if not (\n name in hideList\n or (\n includeMatching\n and self.getMatchingAnchorName(name) in hideList\n )\n ):\n return True\n return False\n\n def setVisibility(self, kind, name, isVisible=True, includeMatching=True):\n hideList = self.hideLists[kind]\n if isVisible:\n if name in hideList:\n hideList.remove(name)\n if includeMatching:\n hideList.remove(self.getMatchingAnchorName(name))\n else:\n if not (name in hideList):\n hideList.append(name)\n if includeMatching:\n hideList.append(self.getMatchingAnchorName(name))\n\n def addAnchor(self, glyph, name, position, addToGlyph=False):\n if len(name) == 0:\n print(\n \"WARNING: anchor with empty name at (%i, %i) in glyph '%s', ignored.\"\n % (position[0], position[1], glyph.name)\n )\n else:\n if (glyph.name, name) in self.anchorPositions.keys():\n print(\n \"WARNING: Duplicate anchor name '%s' requested in glyph '%s' when trying to add anchor. Ignored.\"\n % (name, glyph.name)\n )\n else:\n self.anchorPositions[(glyph.name, name)] = position\n if name in self.anchorGlyphs.keys():\n self.anchorGlyphs[name] += [glyph.name]\n else:\n self.anchorGlyphs[name] = [glyph.name]\n if addToGlyph:\n glyph.appendAnchor(name, position)\n\n def moveAnchor(self, name, newPosition):\n # happens automatically - why?\n # probably only for current glyph, not \"inverted\" view\n pass\n\n def renameAnchor(self, name):\n pass\n\n def deleteAnchor(self, name):\n pass\n\n def getMatchingAnchorName(self, name):\n # returns \"inverted\" anchor name, i.e. with leading underscore added or\n # removed\n if name[0] == \"_\":\n return name[1:]\n else:\n return \"_\" + name\n\n def getAnchorNames(self):\n # TODO: anchorNames should not be constructed each time this method is\n # called.\n # Better to build it once and modify it together with other anchor\n # modifications\n anchorNames = []\n for a in sorted(self.anchorGlyphs.keys()):\n if len(a) > 0:\n if a[0] != \"_\":\n anchorNames.append(\n {\n \"Show\": self.getVisibility(\"anchor\", a, False),\n \"Name\": a,\n }\n )\n return anchorNames\n\n def getAnchoredGlyphNames(self, anchorName):\n # print(\"Looking up anchored glyphs for\", anchorName)\n targetAnchorName = self.getMatchingAnchorName(anchorName)\n if targetAnchorName in self.anchorGlyphs.keys():\n return self.anchorGlyphs[targetAnchorName]\n return []\n\n def getAnchoredGlyphNamesForList(self, anchorNames, marks=False):\n anchoredGlyphs = []\n for an in anchorNames:\n if marks:\n an = self.getMatchingAnchorName(an)\n if an in self.anchorGlyphs.keys():\n anchoredGlyphs += self.anchorGlyphs[an]\n result = []\n # print(\"anchoredGlyphs:\", anchoredGlyphs)\n for g in sorted(set(anchoredGlyphs)):\n if marks:\n result.append(\n {\"Show\": self.getVisibility(\"mark\", g, False), \"Name\": g}\n )\n else:\n result.append(\n {\"Show\": self.getVisibility(\"glyph\", g, False), \"Name\": g}\n )\n return result\n\n def selectGlyphsWithAnchorName(self, anchorName):\n self.font.selection = self.getAnchoredGlyphNames(\n self.getMatchingAnchorName(anchorName)\n )\n # self.font.update()\n"
},
{
"alpha_fraction": 0.8156028389930725,
"alphanum_fraction": 0.8156028389930725,
"avg_line_length": 27.200000762939453,
"blob_id": "db3d87979094dca1a9ba066a15b36d120fe7af5a",
"content_id": "784f40512587e22021109f5e47a153e83ca73e3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 5,
"path": "/Anchor Overlay Tool.roboFontExt/lib/install.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "from mojo.events import installTool\nfrom AnchorTool import AnchorTool\n\ninstallTool(AnchorTool())\nprint(\"Anchor Tool installed in tool bar.\")\n"
},
{
"alpha_fraction": 0.5376942157745361,
"alphanum_fraction": 0.543908417224884,
"avg_line_length": 31.70053482055664,
"blob_id": "24d87aa6c8d5443cf4fbc8432b37e83d93f5ae50",
"content_id": "88d2fbca876a9209cf160c5ae89c256592368e34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6115,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 187,
"path": "/Anchor Overlay Tool.roboFontExt/lib/AnchorTool.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "from os.path import join, dirname, isfile\n\nfrom AppKit import NSBezierPath, NSImage\n\nfrom fontTools.misc.arrayTools import pointInRect\n\nfrom lib.tools import bezierTools # for single point click selection\nfrom lib.tools.defaults import getDefaultColor\nfrom mojo.events import BaseEventTool\nfrom mojo.roboFont import CurrentGlyph\n\nfrom AnchorOverlay import AnchorOverlay\n\n\niconpath = join(dirname(__file__), \"toolbarToolsAnchor.pdf\")\n\nif isfile(iconpath):\n toolbarIcon = NSImage.alloc().initByReferencingFile_(iconpath)\nelse:\n toolbarIcon = None\n print(\"Warning: Toolbar icon not found: <%s>\" % iconpath)\n\n\nclass AnchorTool(BaseEventTool):\n def setup(self):\n self.pStart = None\n self.pEnd = None\n self._selectedMouseDownPoint = None\n\n def getToolbarIcon(self):\n return toolbarIcon\n\n def getToolbarTip(self):\n return \"Anchor Tool\"\n\n def becomeActive(self):\n # print(\"becomeActive\")\n self.anchorOverlayUI = AnchorOverlay()\n\n def becomeInactive(self):\n # print(\"becomeInactive\")\n self.anchorOverlayUI.windowCloseCallback(None)\n self.anchorOverlayUI.w.close()\n\n def keyDown(self, event):\n # align via key commands\n c = event.characters()\n if c == \"X\":\n self.anchorOverlayUI.centerAnchorX()\n elif c == \"Y\":\n self.anchorOverlayUI.centerAnchorY()\n # move anchors with arrow keys\n # default increment is 10 units, hold down shift for 5, option for 1\n # (like in Metrics Machine)\n if self.shiftDown:\n inc = 5\n elif self.optionDown:\n inc = 1\n else:\n inc = 10\n if self.arrowKeysDown[\"up\"]:\n d = (0, inc)\n elif self.arrowKeysDown[\"down\"]:\n d = (0, -inc)\n elif self.arrowKeysDown[\"left\"]:\n d = (-inc, 0)\n elif self.arrowKeysDown[\"right\"]:\n d = (inc, 0)\n else:\n d = (0, 0)\n if d != (0, 0):\n # d = roundCoordinates(d)\n g = CurrentGlyph()\n g.prepareUndo(undoTitle=\"Move anchors in /%s\" % g.name)\n for a in g.anchors:\n if a.selected:\n a.x = int(round(a.x)) + d[0]\n a.y = int(round(a.y)) + d[1]\n g.performUndo()\n\n def shouldShowMarqueRect(self):\n return True\n\n def shouldShowSelection(self):\n return True\n\n def _guessAnchorName(self, glyph, p):\n if p.x <= glyph.width // 3:\n horizontal = \"Left\"\n elif p.x >= glyph.width * 2 // 3:\n horizontal = \"Right\"\n else:\n horizontal = \"\"\n if p.y <= glyph.box[2] // 3:\n vertical = \"bottom\"\n elif p.y >= glyph.box[2] * 2 // 3:\n vertical = \"top\"\n else:\n vertical = \"center\"\n name = vertical + horizontal\n if (\n glyph.name,\n name,\n ) in self.anchorOverlayUI.fontAnchors.anchorPositions.keys():\n name += \"Attach\"\n return name\n\n def _newAnchor(self, p):\n # Add an anchor at position p\n g = CurrentGlyph()\n newAnchorName = self._guessAnchorName(g, p)\n g.prepareUndo(\n undoTitle=\"Add anchor %s to /%s\" % (newAnchorName, g.name)\n )\n self.anchorOverlayUI.addAnchorAndUpdateList(\n g, newAnchorName, (p.x, p.y)\n )\n g.performUndo()\n\n def _normalizeBox(self, p0, p1):\n # normalize selection rectangle so it is always positive\n return (\n min(p0.x, p1.x),\n min(p0.y, p1.y),\n max(p0.x, p1.x),\n max(p0.y, p1.y),\n )\n\n def _getSelectedPoints(self):\n if self.pStart and self.pEnd:\n box = self._normalizeBox(self.pStart, self.pEnd)\n for contour in self._glyph:\n for p in contour.onCurvePoints:\n if pointInRect((p.x, p.y), box):\n self.selection.addPoint(\n p, self.shiftDown, contour=contour\n )\n self._selectedMouseDownPoint = (p.x, p.y)\n for anchor in self._glyph.anchors:\n if pointInRect((anchor.x, anchor.y), box):\n self.selection.addAnchor(anchor, self.shiftDown)\n self._selectedMouseDownPoint = (anchor.x, anchor.y)\n\n def mouseDown(self, point, clickCount):\n if not (self.shiftDown):\n self.selection.resetSelection()\n if clickCount > 1:\n self._newAnchor(point)\n else:\n self.pStart = point\n self.pEnd = None\n s = self._view.getGlyphViewOnCurvePointsSize(minSize=7)\n for contour in self._glyph:\n for p in contour.onCurvePoints:\n if bezierTools.distanceFromPointToPoint(p, point) < s:\n self.selection.addPoint(\n p, self.shiftDown, contour=contour\n )\n self._selectedMouseDownPoint = (p.x, p.y)\n return\n for anchor in self._glyph.anchors:\n if bezierTools.distanceFromPointToPoint(anchor, point) < s:\n self.selection.addAnchor(anchor, self.shiftDown)\n self._selectedMouseDownPoint = (anchor.x, anchor.y)\n return\n\n def mouseUp(self, point):\n self.pEnd = point\n self._getSelectedPoints()\n self.pStart = None\n self.pEnd = None\n self._selectedMouseDownPoint = None\n\n def mouseDragged(self, point, delta):\n self.pEnd = point\n # self._getSelectedPoints()\n\n def draw(self, scale):\n if self.isDragging() and self.pStart and self.pEnd:\n r = self.getMarqueRect()\n if r:\n color = getDefaultColor(\"glyphViewSelectionMarqueColor\")\n color.set()\n path = NSBezierPath.bezierPathWithRect_(r)\n path.fill()\n return\n # self.drawBackgroundSelection(scale)\n"
},
{
"alpha_fraction": 0.824999988079071,
"alphanum_fraction": 0.824999988079071,
"avg_line_length": 40,
"blob_id": "d72ccd0eb6e62185b7b504f8ef818a929fe58e13",
"content_id": "f9e688b894de3bc89ffd963d0cc1eb8b99aeba12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 1,
"path": "/Anchor Overlay Tool.roboFontExt/lib/extensionID.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "extensionID = \"de.kutilek.anchorOverlay\""
},
{
"alpha_fraction": 0.7696793079376221,
"alphanum_fraction": 0.7842565774917603,
"avg_line_length": 27.58333396911621,
"blob_id": "c2905d4e028450e1fde54849e6f2015bc8b49e20",
"content_id": "e27bbff977b89c533ac506230fc63746c19dcb2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 12,
"path": "/Anchor Overlay Tool.roboFontExt/lib/settings.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "from extensionID import extensionID\nfrom grtools.SettingsWindow import SettingsWindow\n\nmy_settings = SettingsWindow(extensionID, \"Anchor Overlay Tool Settings\")\n\nmy_settings.column = 10\nmy_settings.width = 200\n\nmy_settings.add(\"preview\", True, \"Show in preview mode\")\nmy_settings.add(\"lockOutlines\", True, \"Lock outlines\")\n\nmy_settings.show()\n"
},
{
"alpha_fraction": 0.4862552881240845,
"alphanum_fraction": 0.5079777240753174,
"avg_line_length": 27.50411033630371,
"blob_id": "294fad870be226333a1edde3265233d470b74d0e",
"content_id": "a61824b70d000aaef274d1bd1824dc90810a950c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10404,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 365,
"path": "/Anchor Overlay Tool.roboFontExt/lib/RecomposeSelected.py",
"repo_name": "jenskutilek/AnchorOverlayTool",
"src_encoding": "UTF-8",
"text": "from __future__ import division, print_function\n\n\"\"\"\nRecompose selected glyphs, using anchor positions as reference for placement.\nAlso resets the metrics of the composite to those of the base glyph(s).\n\nJens Kutilek\nVersion 0.1: 2013-05-28\nVersion 0.2: 2014-08-05 - Implemented chained accents positioning\nVersion 0.3: 2014-11-22 - Bug fixes for ligatures\nVersion 0.4: 2016-02-03 - Support kerning when positioning ligature-style components\n\"\"\"\n\nfrom operator import attrgetter\nfrom re import compile\n\n\nclass jkKernInfo(object):\n def __init__(self, font):\n self.font = font\n self.group_name_pattern = compile(\"^@MMK_*\")\n self.group_name_l_pattern = compile(\"^@MMK_L_*\")\n self.group_name_r_pattern = compile(\"^@MMK_R_*\")\n self._analyze_kerning()\n\n def is_kerning_group(self, name, side=None):\n # Test if supplied name is a kerning group name\n if side is None:\n return self.group_name_pattern.search(name)\n elif side == \"l\":\n return self.group_name_l_pattern.search(name)\n elif side == \"r\":\n return self.group_name_r_pattern.search(name)\n return False\n\n def _analyze_kerning(self):\n self.kerning = self.font.kerning\n self.group_info = {\n \"l\": {},\n \"r\": {},\n }\n for group_name, group_content in self.font.groups.items():\n if self.is_kerning_group(group_name, \"l\"):\n for glyph_name in group_content:\n self.group_info[\"l\"][glyph_name] = group_name\n if self.is_kerning_group(group_name, \"r\"):\n for glyph_name in group_content:\n self.group_info[\"r\"][glyph_name] = group_name\n\n def get_group_for_glyph(self, glyph_name, side):\n group_name = self.group_info[side].get(glyph_name, None)\n return group_name\n\n def getKernValue(self, left, right):\n left_group = self.get_group_for_glyph(left, \"l\")\n right_group = self.get_group_for_glyph(right, \"r\")\n pair_value = self.kerning.get((left, right), None)\n if pair_value is not None:\n return pair_value\n lg_value = self.kerning.get((left_group, right), None)\n if lg_value is not None:\n return lg_value\n rg_value = self.kerning.get((left, right_group), None)\n if rg_value is not None:\n return rg_value\n group_value = self.kerning.get((left_group, right_group), None)\n if group_value is None:\n group_value = 0\n return group_value\n\n\ndef getBaseName(glyphname):\n if \".\" in glyphname and not (glyphname in [\".notdef\", \".null\"]):\n glyphname = glyphname.split(\".\", 1)[0]\n return glyphname\n\n\ndef getMatchingAnchorName(name):\n # returns \"inverted\" anchor name, i.e. with leading underscore added or\n # removed\n if name[0] == \"_\":\n return name[1:]\n else:\n return \"_\" + name\n\n\ndef getBaseGlyphName(font, name):\n g = font[name]\n baseGlyphCandidates = []\n for c in g.components:\n baseGlyphCandidates.append(c.baseGlyph)\n numCandidates = len(baseGlyphCandidates)\n if numCandidates == 0:\n return name\n elif numCandidates == 1:\n return baseGlyphCandidates[0]\n else:\n # TODO: plausibility check if the base glyph really is the first\n # component.\n # print(baseGlyphCandidates)\n return baseGlyphCandidates[0]\n\n\ndef clearAnchors(glyph):\n for a in glyph.anchors:\n glyph.removeAnchor(a)\n\n\ndef deleteAnchor(glyph, name, position):\n for a in glyph.anchors:\n if a.name == name and a.position == position:\n glyph.removeAnchor(a)\n break\n\n\ndef repositionComponents(glyphname, font):\n print(\"Repositioning composites in '%s' ...\" % glyphname)\n basename = getBaseGlyphName(font, glyphname)\n # print(\" Base glyph is: %s\" % basename)\n\n nameWithoutSuffix = getBaseName(glyphname)\n\n anchor_map = {}\n\n baseGlyph = font[basename]\n\n totalWidth = 0\n\n modified = False\n prevComponentName = None\n kerning = 0\n is_liga = False\n\n for i, c in enumerate(font[glyphname].components):\n c = font[glyphname].components[i]\n print(f\"\\n Component: {c.baseGlyph}\")\n if (\n nameWithoutSuffix in ignoreAnchorNames\n or \"_\" in nameWithoutSuffix\n and not nameWithoutSuffix.endswith(\"comb\")\n ):\n # Handle as ligature resp. ignore anchors\n is_liga = True\n if prevComponentName is not None:\n kerning = kern_info.getKernValue(\n prevComponentName, c.baseGlyph\n )\n print(\n \"Kerning /%s/%s = %s\"\n % (prevComponentName, c.baseGlyph, kerning)\n )\n if kerning is None:\n kerning = 0\n # Put glyphs next to each other\n d = (int(round(totalWidth + kerning)), 0)\n if c.offset != d:\n modified = True\n font[glyphname].prepareUndo(\n f\"Reposition components in /{glyphname}\"\n )\n print(\" Setting component offset to (%i, %i).\" % d)\n c.offset = d\n else:\n # Handle as mark positioning\n anchor_found = False\n for mark_anchor in sorted(\n font[c.baseGlyph].anchors, key=attrgetter(\"name\")\n ):\n # print(f\" Mark anchor: {mark_anchor}\")\n if i == 0:\n if mark_anchor.name.startswith(\"_\"):\n continue\n # print(\n # \" Add anchor from base glyph: '%s'\" % mark_anchor.name\n # )\n anchor_map[mark_anchor.name] = mark_anchor.position\n\n base_anchor_name = getMatchingAnchorName(mark_anchor.name)\n # print(\n # \" Looking for matching anchor for '%s': '%s' ...\"\n # % (\n # mark_anchor.name,\n # base_anchor_name,\n # )\n # )\n\n for name in sorted(anchor_map.keys(), reverse=True):\n pos = anchor_map.get(name, (0, 0))\n if name == base_anchor_name:\n x, y = pos\n d = (\n x - mark_anchor.x,\n y - mark_anchor.y,\n )\n if c.offset != d:\n modified = True\n font[glyphname].prepareUndo(\n f\"Reposition components in /{glyphname}\"\n )\n print(f\" Moving component {c.offset} -> {d}\")\n c.offset = (int(round(d[0])), int(round(d[1])))\n\n for temp_anchor in sorted(\n font[c.baseGlyph].anchors,\n key=attrgetter(\"name\"),\n reverse=True,\n ):\n if temp_anchor.name.startswith(\"_\"):\n break\n\n anchor_map[temp_anchor.name] = (\n temp_anchor.x + d[0],\n temp_anchor.y + d[1],\n )\n anchor_found = True\n break\n\n if anchor_found:\n break\n else:\n print(\n \" No matching anchor found, \"\n \"setting offset to (0, 0).\"\n )\n if c.offset != (0, 0):\n c.offset = (0, 0)\n\n totalWidth += font[c.baseGlyph].width + kerning\n font.changed()\n prevComponentName = c.baseGlyph\n\n if is_liga or nameWithoutSuffix in ligatureNames:\n # For ligatures, set width to width of all components combined\n w = totalWidth\n else:\n # set width of glyph from baseglyph\n w = baseGlyph.width\n\n if w != font[glyphname].width:\n print(\n \" Setting width from base glyph: %i -> %i.\"\n % (font[glyphname].width, w)\n )\n if not modified:\n font[glyphname].prepareUndo(\n \"Reposition components in /%s\" % glyphname\n )\n font[glyphname].width = w\n\n if modified:\n font[glyphname].performUndo()\n font[glyphname].changed()\n print(\"... component positions were modified.\")\n else:\n print(\"... everything is fine.\")\n\n\nligatureNames = [\n \"uniFB00\",\n \"fi\",\n \"fl\",\n \"uniFB01\",\n \"uniFB02\",\n \"uniFB03\",\n \"uniFB04\",\n \"uniFB05\",\n \"uniFB06\",\n \"dcaron\",\n \"lcaron\",\n \"IJ\",\n \"ij\",\n \"napostrophe\",\n \"onequarter\",\n \"onehalf\",\n \"threequarters\",\n \"onethird\",\n \"twothirds\",\n \"uni2155\",\n \"uni2156\",\n \"uni2157\",\n \"uni2158\",\n \"uni2159\",\n \"uni215A\",\n \"oneeighth\",\n \"threeeighths\",\n \"fiveeighths\",\n \"seveneighths\",\n \"uni215F\",\n \"uni2150\",\n \"uni2151\",\n \"uni2152\",\n \"uni2189\",\n \"percent\",\n \"perthousand\",\n \"germandbls\",\n \"uni01C4\",\n \"uni01C5\",\n \"uni01C6\",\n \"uni01C7\",\n \"uni01C8\",\n \"uni01C9\",\n \"uni01CA\",\n \"uni01CB\",\n \"uni01CC\",\n]\n\nignoreAnchorNames = [\n \"uniFB00\",\n \"fi\",\n \"fl\",\n \"uniFB01\",\n \"uniFB02\",\n \"uniFB03\",\n \"uniFB04\",\n \"uniFB05\",\n \"uniFB06\",\n \"IJ\",\n \"ij\",\n \"napostrophe\",\n \"onequarter\",\n \"onehalf\",\n \"threequarters\",\n \"onethird\",\n \"twothirds\",\n \"uni2155\",\n \"uni2156\",\n \"uni2157\",\n \"uni2158\",\n \"uni2159\",\n \"uni215A\",\n \"oneeighth\",\n \"threeeighths\",\n \"fiveeighths\",\n \"seveneighths\",\n \"uni215F\",\n \"uni2150\",\n \"uni2151\",\n \"uni2152\",\n \"uni2189\",\n \"percent\",\n \"perthousand\",\n \"uni01C4\",\n \"uni01C5\",\n \"uni01C6\",\n \"uni01C7\",\n \"uni01C8\",\n \"uni01C9\",\n \"uni01CA\",\n \"uni01CB\",\n \"uni01CC\",\n]\n\nf = CurrentFont()\n\nglyphs = []\n\nif CurrentGlyph() is not None:\n glyphs = [CurrentGlyph().name]\nelif f.selection:\n glyphs = f.selection\n\nkern_info = jkKernInfo(f)\n\nfor glyphname in glyphs:\n result = repositionComponents(glyphname, f)\n"
}
] | 9 |
AnthonyTIrwin/PythonMathTester | https://github.com/AnthonyTIrwin/PythonMathTester | 8184d9d24b6ef676faa984a6408e9eca00523ea5 | f3a0d062a5d8542650b43fe5273c162837bd348a | 5f563029f4a97bb0919ebccb886270013f4ff32c | refs/heads/main | 2023-01-12T15:37:37.969556 | 2020-11-20T14:36:40 | 2020-11-20T14:36:40 | 307,522,485 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.767008364200592,
"alphanum_fraction": 0.767008364200592,
"avg_line_length": 32.46875,
"blob_id": "55abb70683dcb3b590f3029f93a02d945a7d313b",
"content_id": "38f2eea4d7d7b5b680be46e1fd0e1299869e6da6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 32,
"path": "/README.md",
"repo_name": "AnthonyTIrwin/PythonMathTester",
"src_encoding": "UTF-8",
"text": "# This is a Python Math Tester.\nThe user is asked to input their name and how many questions they want to try.\nIt is encouraged that the player do the test mentally, as this is a test of mental acumen.\n \n\n## Accesing openweather API.\nAs a bonus feature the app provides the user the opportunity to see what the local weather\nusing the openweather API.\n\n## Temperature conversion\nOpenweather API function takes temperature data in K and converts it it F.\n\n\n## Test Computation\n\nMore than three functions are created, whatisweather(), randumone(), rannumtwo(), teststart(), \nsaveit(), scoreboard(). Randnumone() and Randnumtwo() return numberswhich are used in the addition\nproblem.\n\n\n\n## Reading and Writing to .CSV\n\nUpon completion of test, user is given option of saving their score for further refrence. User can also read the .CSV\nin the form of the scoreboard.\n\n\n\n\n\n## The app can be launched by placing files in a same local directory and by entering into bash: python main.py.\nThe computer has to have access to the internet for a full-featured user experience. \n\n"
},
{
"alpha_fraction": 0.6186331510543823,
"alphanum_fraction": 0.6357188820838928,
"avg_line_length": 32.35483932495117,
"blob_id": "afb9145b1dfb307cfb51c24400c7c387d992303b",
"content_id": "54ec51a79482db562c9b0765bd799b11036a15e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3102,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 93,
"path": "/main.py",
"repo_name": "AnthonyTIrwin/PythonMathTester",
"src_encoding": "UTF-8",
"text": "import random\nimport csv\nimport datetime\nimport requests\n\n# Added feature of local temperature API.\ndef whatisweather(): \n API_key = \"f360ccaab20b5b1f7087127ed1a6d955\"\n base_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n zip_code = input(\"Enter your Zip code: \")\n Final_url = base_url + \"appid=\" + API_key + \"&zip=\" + zip_code\n weather_data = requests.get(Final_url).json()\n localinf = weather_data['main']['temp']\n ink = (localinf - 273.15) * 1.8 + 32\n print('The temperature in ' + weather_data['name'] + \" is \" + str(round(ink)) + 'F')\n\n# Function that starts the math assesment. \ndef teststart():\n scoreright = 0\n counter = 0\n while counter < questsize:\n testvaraone = rannumone()\n testvaratwo = rannumtwo()\n print(\"What's \" + str(testvaratwo) + \" plus \" + str(testvaraone))\n answer = int(input(\"Answer: \"))\n if answer == testvaraone + testvaratwo:\n print(\"Good Job!\")\n scoreright += 1\n counter += 1\n else:\n print(\"Wrong!\")\n counter += 1\n else:\n print('You got ' + str(scoreright) + ' out of ' + str(questsize) + ' questions!')\n saveit(scoreright, questsize)\n# Random Number Generator Functions\ndef rannumone():\n return int(random.randint(1,1000))\ndef rannumtwo(): \n return int(random.randint(1, 1000))\n\n# Saving the score and the name to .csv\ndef saveit(scoreright, questsize):\n wannasave = input(str(\"Would you like to save your score to a .csv? Y/N?\")).lower()\n if wannasave == \"y\":\n print(\"okay\")\n with open('scorecard.csv', 'a', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',',quotechar=' ', quoting=csv.QUOTE_MINIMAL)\n timewrite = '{:%Y-%m-%d}'.format(datetime.datetime.now())\n spamwriter.writerow([namez] + [scoreright] + [questsize] + [timewrite])\n again()\n else:\n print('Fine, dont save it') \n again() \n\n# Function that reads .csv vile and displays the Name, Score and Date\ndef scoreboard():\n wannsee = input('Would you like to see the scoreboard? Y/N ').lower()\n if wannsee == 'y':\n with open('scorecard.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'{\", \".join(row)}')\n line_count += 1\n else:\n print(f'\\t{row[0]} {row[1]}/{row[2]} {row[3]}')\n line_count += 1\n print(f'A Total of {line_count} scores.')\n else:\n print('Okay.')\n\n# Asks for another try at it.\ndef again():\n anothertry = input('Do you want to try again? Y/N ').lower()\n if anothertry == \"y\":\n teststart()\n else:\n print('Goodbye!')\n\n# Start Menu where user is asked to put in their name and ask whether or not they want to take the math challenge. \nwhatisweather()\nnamez = input(\" Please enter your name:\")\nscoreboard()\nprint(\"Would you like to test your computational skills??\")\nwannaplay = input(\"Y/N? \").lower()\nif wannaplay == \"y\":\n questsize = int(input(\"How many questions do you want to answer?\"))\n print(namez + \" lets Begin.\")\n teststart()\nelse:\n print(\"Please feel free to start when you're ready\")\n"
}
] | 2 |
anushaihalapathirana/RL-Gym-ANM-tool | https://github.com/anushaihalapathirana/RL-Gym-ANM-tool | e86c95954f4cb18b137fe5d0f4fd26daf122f99f | 2dee2da9be26f512179d313c985832718a34042b | 230bd184f70499a55223c5f42dd62c10ebdf7a2b | refs/heads/master | 2023-07-03T04:28:23.802997 | 2021-08-13T02:52:15 | 2021-08-13T02:52:15 | 385,184,094 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7541766166687012,
"alphanum_fraction": 0.7661097645759583,
"avg_line_length": 31.153846740722656,
"blob_id": "f3559e78f7f9ce5396f1ea9d78f160834dbc0405",
"content_id": "5c19ef6d8c9fa8f272fb7b25cf5b0f07b37ce760",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 13,
"path": "/src/MPC/main.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "import gym\nimport numpy as np\nfrom gym_anm import MPCAgentConstant\nfrom gym_anm import MPCAgentPerfect\nfrom testEnv import TestEnvironment\nfrom mpcPolicy import run\n\nenv = TestEnvironment()\n\n# initialize mpc constant policy agent\nmpcConstantPolicyAgent = MPCAgentConstant(env.simulator, env.action_space, env.gamma,\n safety_margin=0.96, planning_steps=10)\nrun(env, mpcConstantPolicyAgent)\n\n"
},
{
"alpha_fraction": 0.5906040072441101,
"alphanum_fraction": 0.6174496412277222,
"avg_line_length": 28.866666793823242,
"blob_id": "a5a53103a0ad01d77456d935a8ae1956944cab63",
"content_id": "d7e16db1b0c2578a6036cb6c966e9e63ca719ca9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 447,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 15,
"path": "/src/TD3/main.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "import gym\nimport sys\nfrom testEnv import TestEnvironment\nfrom traintd3 import trainTD3\nfrom testtd3 import testTD3\n\nif __name__ == '__main__':\n if len(sys.argv) == 2 and str(sys.argv[1]).lower() == 'default':\n print(\"environment is default gym_anm ANM6Easy-v0\")\n env = gym.make('gym_anm:ANM6Easy-v0')\n else: \n env = TestEnvironment()\n trainTD3(env)\n print(\"******* Training Done *************\")\n testTD3(env)"
},
{
"alpha_fraction": 0.6478375792503357,
"alphanum_fraction": 0.6632833480834961,
"avg_line_length": 30.48611068725586,
"blob_id": "78494b1c5de0dbfe3dcb636b7222b67f24659a4a",
"content_id": "ab59a899ed5b21fdd7863ae61d979fdea7fbe834",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2266,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 72,
"path": "/src/rl_techniques_baseline3/main.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "import gym\nimport os\nimport numpy as np\nfrom stable_baselines3 import SAC, PPO, A2C\nfrom testEnv import TestEnvironment\nfrom sac import create_model_SAC\nfrom ppo import create_model_PPO\nfrom a2c import create_model_A2C\n\nenv = TestEnvironment()\n\ndef load_model(algorithm, model_name):\n model = algorithm.load(model_name)\n return model\n\ndef run_test(env, model, model_name):\n episode_rewards, episode_lengths, episode_discounted_rewards = [], [], []\n for episode in range(10):\n done = False\n episode_reward = 0.0\n episode_discounted_reward = 0.0\n episode_length = 0\n obs = env.reset()\n\n # run microgrid for 10000 steps\n for step in range(10000):\n action, new_states = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n episode_reward += -reward\n episode_discounted_reward += -reward * (env.gamma ** episode_length)\n episode_length += 1\n if done:\n obs = env.reset()\n episode_rewards.append(episode_reward)\n episode_lengths.append(episode_length)\n episode_discounted_rewards.append(episode_discounted_reward)\n \n mean_reward = np.mean(episode_rewards)\n mean_discounted_reward = np.mean(episode_discounted_rewards)\n std_reward = np.std(episode_rewards)\n std_discounted_reward = np.std(episode_discounted_rewards)\n print(\"****** \", model_name, \" ******\")\n print('mean cost is %.2f' % mean_reward, 'std_cost %.3f' % std_reward)\n\n\nisSACModelAvailable = os.path.isfile('sac_gym_anm_model.zip')\nisPPOModelAvailable = os.path.isfile('ppo_gym_anm_model.zip')\nisA2CModelAvailable = os.path.isfile('a2c_gym_anm_model.zip')\n\n# SAC model\nif(not isSACModelAvailable):\n print(\"SAC Model saved\")\n create_model_SAC(env)\n\nsac_model = load_model(SAC, 'sac_gym_anm_model')\nrun_test(env, sac_model, 'SAC')\n\n# PPO model\nif(not isPPOModelAvailable):\n create_model_PPO(env)\n print(\"PPO Model saved\")\n\nppo_model = load_model(PPO,'ppo_gym_anm_model')\nrun_test(env, ppo_model, 'PPO')\n\n# A2c model\nif(not isA2CModelAvailable):\n create_model_A2C(env)\n print(\"A2C Model saved\")\n\na2c_model = load_model(A2C,'a2c_gym_anm_model')\nrun_test(env, a2c_model, 'A2C')"
},
{
"alpha_fraction": 0.6723768711090088,
"alphanum_fraction": 0.7109207510948181,
"avg_line_length": 34.769229888916016,
"blob_id": "266677734906c13885d1b103c516c27635559055",
"content_id": "62d7f98231343bac14ee719162258b3ba4c5cf18",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 467,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 13,
"path": "/src/rl_techniques_baseline3/sac.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "\nimport gym\nimport numpy as np\nfrom stable_baselines3 import SAC\nfrom testEnv import TestEnvironment\n\ndef create_model_SAC(env):\n # modify the action space - all actions will lie in [-1, 1]\n env.action_space = gym.spaces.Box(low=-1, high=1, shape=env.action_space.shape, dtype=np.float32)\n\n model = SAC(\"MlpPolicy\", env, gamma=0.995, verbose=0)\n model.learn(total_timesteps=10000, log_interval=4)\n # save the model\n model.save(\"sac_gym_anm_model\")\n\n"
},
{
"alpha_fraction": 0.6116931438446045,
"alphanum_fraction": 0.612031102180481,
"avg_line_length": 35.53086471557617,
"blob_id": "1f33f1fe2a878e6cc188390c2c67188c175fdc23",
"content_id": "ce9bec68335a7ba914c72084c000cfba7c34b621",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2963,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 81,
"path": "/newEnvTemplate.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis file gives the template to follow when creating new gym-anm environments.\nFor more information, see https://gym-anm.readthedocs.io/en/latest/topics/design_new_env.html.\n\"\"\"\n\n\n# inherit from ANMEnv super class\nfrom gym_anm import ANMEnv\n\nclass CustomEnvironment(ANMEnv):\n\n def __init__(self):\n\n \"\"\"\n network: a Python dictionary that describes the structure and characteristics of the distribution\n network G and the set of electrical devices D.\n \"\"\"\n network = {'baseMVA': ...,\n 'bus': ...,\n 'device': ...,\n 'branch': ...} # power grid specs\n\n \"\"\"\n obs: a list of tuples corresponding to the variables to include in observation vectors. \n all in MW units. \n \n Alternatively, the obs object can be defined as a customized function\n that returns observation vectors when called (i.e., ot = obs(st)), or as a string ’state’.\nIn the later case, the environment becomes fully observable and observations ot = st are emitted\n\nthere are combinations for the observation parameters are available\n \"\"\"\n observation = ... # observation space\n\n \"\"\"\n the number of auxiliary variables K in the state vector given by \n \"\"\"\n K = ... # number of auxiliary variables\n\n\n delta_t = ... # time interval between timesteps\n gamma = ... # discount factor\n lamb = ... # penalty weighting hyperparameter\n aux_bounds = ... # bounds on auxiliary variable (optional)\n costs_clipping = ... # reward clipping parameters (optional)\n seed = ... # random seed (optional)\n\n super().__init__(network, observation, K, delta_t, gamma, lamb,\n aux_bounds, costs_clipping, seed)\n\n def init_state(self):\n ...\n\n \"\"\"\n method that receives the current state vector s(t) and should return the outcomes of the internal variables for timestep t + 1. \n It must be implemented by the designer of the task, with the only constraint being that it must return a list of |DL| + |DRER| + K values.\n \"\"\"\n def next_vars(self, s_t):\n ...\n\n \"\"\"\n This method is optional and only useful if the observation space is specified as\na callable object. In the latter case, observation_space() should return the (potentially loose) bounds of\nthe observation space O, so that agents can easily normalize emitted observation vectors.\n \"\"\"\n def observation_bounds(self): # optional\n ...\n\n \"\"\"\n support rendering of the interactions\nbetween the agent and the new environment. render() should update the visualization every time it gets\ncalled, \n \"\"\"\n def render(self, mode='human'): # optional\n ...\n\n\"\"\"\nclose() should end the rendering process. \n\"\"\"\n def close(self): # optional\n ...\n"
},
{
"alpha_fraction": 0.537617564201355,
"alphanum_fraction": 0.5736677050590515,
"avg_line_length": 30.121952056884766,
"blob_id": "f04c5d96aa6756170a48149ac5395a600e84b9e5",
"content_id": "62fcf8048198f503a265e73502ce1bb79aa3f0c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1276,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 41,
"path": "/src/TD3/testtd3.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os, sys\nimport gym\nfrom agent import Agent\nfrom testEnv import TestEnvironment\n\n\ndef testTD3(env):\n \n agent = Agent(alpha=0.001, beta=0.001, \n input_dims=env.observation_space.shape, tau=0.005,\n env=env, batch_size=100, layer1_size=400, layer2_size=300,\n n_actions=env.action_space.shape[0])\n \n n_games = 10\n \n best_score = env.reward_range[0]\n score_history = []\n\n for i in range(n_games):\n state = env.reset()\n score = 0\n for j in range(10000):\n action = agent.choose_action(state)\n new_state, reward, done, info = env.step(action)\n agent.remember(state, action, reward, new_state, done)\n agent.load_models()\n if len(sys.argv) == 2 and str(sys.argv[1]).lower() == 'default':\n env.render()\n score += reward\n state = new_state\n\n score_history.append(score)\n avg_score = np.mean(score_history[-100:])\n \n mean_reward = np.mean(score_history)\n std_reward = np.std(score_history)\n print('mean cost is %.2f' % mean_reward, 'std_cost %.3f' % std_reward)\n\n\n # print('episode ', i, 'score %.2f' % score, 'trailing 100 games avg %.3f' % avg_score)\n"
},
{
"alpha_fraction": 0.6368386745452881,
"alphanum_fraction": 0.6453298330307007,
"avg_line_length": 29.6200008392334,
"blob_id": "23d25fedf3f4d4054afdd7a5da54c44edc52807d",
"content_id": "09cb0f0e8d3f253f8245e974836afa3fa31cab14",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1531,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 50,
"path": "/README.md",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "# RL-Gym-ANM-tool\n\nThis project contains the python scripts of microgrid generation and simulation using Gym-ANM tool. (https://github.com/robinhenry/gym-anm)\n\nAI techniques with Gym-ANM\n\n* MPC\n* PPO\n* SAC\n* A2C\n* TD3\n\n## Code Structure\n\n```\n src - This folder contains the RL algorithms implemented and apply for Gym-ANM simple environment\n \n |\n \n MPC - How to use Gym-ANM built in MPC algorithm\n \n TD3 - Implementation of TD3 algorithm and train, model save and test\n run main.py file\n \n rl_techniques_baseline3 - this folder contains the RL techniques implemented using baseline3 library on simple gym-anm environment\n \n |\n \n a2c\n ppo\n SAC\n \n run main.py file to run all three algorithms at once. train, model save and test\n \n anmEasy6-test-env.py - How to simply access Gym-ANM6-easy environment\n \n customizedEasyenv.py - How to Customize Gym-ANM6 easy environment\n \n mpcPolicy-on-anmeasy.py - MPC policy applied on Gym-ANM6-easy environment. \n Run python mpcPolicy-on-anmeasy.py\n \n \n newEnv.py - Run this file to create new Gym-ANM environment\n\n```\n\nYou can find the full doumentation from below links \n\n1. https://github.com/anushaihalapathirana/RL-Gym-ANM-tool/blob/master/GymANM-tool.pdf\n2. https://github.com/anushaihalapathirana/RL-Gym-ANM-tool/blob/master/Gym-ANM-tool-Implementation.pdf\n"
},
{
"alpha_fraction": 0.5852205157279968,
"alphanum_fraction": 0.6042908430099487,
"avg_line_length": 24.42424201965332,
"blob_id": "8e1be84214949e00dacd5ac459c2ef19984808af",
"content_id": "7fbab7353b7736a2ca089e3dfeee532582f36916",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 839,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 33,
"path": "/src/MPC/mpcPolicy.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "import gym\nimport numpy as np\nfrom gym_anm import MPCAgentConstant\nfrom testEnv import TestEnvironment\nimport time\n\ndef run(env, agent):\n env = env\n o = env.reset()\n\n # Initialize the MPC policy.\n agent = agent\n\n n_games = 10000\n \n best_score = env.reward_range[0]\n score_history = []\n score = 0\n start_time = time\n # Run the policy.\n for step in range(n_games):\n state = env.reset()\n action = agent.act(env)\n new_state, reward, done, info = env.step(action)\n # print(f'step = {step}, reward = {reward:.3}')\n score += reward\n state = new_state\n\n score_history.append(score)\n avg_score = np.mean(score_history[-100:])\n end_time = time\n print('score %.2f' % score, 'trailing 100 games avg %.3f' % avg_score)\n print(\"time: \", end_time - start_time)\n"
},
{
"alpha_fraction": 0.6163522005081177,
"alphanum_fraction": 0.6289308071136475,
"avg_line_length": 35,
"blob_id": "b2df07cca6f387d75e62b223c2a5b99b13d5e20f",
"content_id": "ea0427ce42e1c22f4f5ed27b3041b6617662f463",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 795,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 22,
"path": "/anmEasy6-test-env.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "\nimport gym\nimport time\n\ndef run():\n env = gym.make('gym_anm:ANM6Easy-v0')\n o = env.reset() # create innitial observation space\n\n for i in range(10):\n a = env.action_space.sample() # the agent samples random actions from the action space of the ANM6Easy-v0 task for 10 timesteps.\n o, r, done, info = env.step(a)\n env.render()\n time.sleep(0.5) # otherwise the rendering is too fast for the human eye\n\n # A terminal state will reach if no solution to the power flow equations is found.\n # power grid has collapsed and is often due to a voltage collapse problem\n if done:\n # Every time a terminal state is reached, the environment gets reset.\n o = env.reset()\n env.close()\n\nif __name__ == '__main__':\n run()\n\n\n"
},
{
"alpha_fraction": 0.7024128437042236,
"alphanum_fraction": 0.737265408039093,
"avg_line_length": 30.08333396911621,
"blob_id": "9382bbdad25154a289b73311a2e7993c66e4d77f",
"content_id": "417ede31045a1ce04d8c0ade08c666282a8c5293",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 373,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 12,
"path": "/src/rl_techniques_baseline3/a2c.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "import gym\nfrom testEnv import TestEnvironment\nfrom stable_baselines3 import A2C\nfrom stable_baselines3.common.env_util import make_vec_env\n\ndef create_model_A2C(test_env):\n # Parallel environments\n env = make_vec_env(lambda: test_env, n_envs=4)\n\n model = A2C(\"MlpPolicy\", env, verbose=0)\n model.learn(total_timesteps=10000)\n model.save(\"a2c_gym_anm_model\")\n"
},
{
"alpha_fraction": 0.6789215803146362,
"alphanum_fraction": 0.7132353186607361,
"avg_line_length": 32.75,
"blob_id": "b18a4c44e9c161ab7ad1afed99b09bd61bb0cd0d",
"content_id": "c33595fa0a0cec3bac774c4eb03b951418d06a95",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 408,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 12,
"path": "/src/rl_techniques_baseline3/ppo.py",
"repo_name": "anushaihalapathirana/RL-Gym-ANM-tool",
"src_encoding": "UTF-8",
"text": "\nimport gym\nimport numpy as np\nfrom stable_baselines3 import PPO\nfrom stable_baselines3.common.env_util import make_vec_env\n\ndef create_model_PPO(test_env):\n # parallel environments\n env = make_vec_env(lambda: test_env, n_envs=4)\n model = PPO(\"MlpPolicy\", env, gamma=0.995, verbose=0)\n model.learn(total_timesteps=10000, log_interval=4)\n # save the model\n model.save(\"ppo_gym_anm_model\")\n\n\n"
}
] | 11 |
gruel/etekkatho-content-wrapper | https://github.com/gruel/etekkatho-content-wrapper | cb30ec32f47ab420d5b5aff8b2542fefd9423c20 | 7bdc0a34307c1881476eb21747987114acfd75e3 | 76bce950cfcd7776ee73406ec891f5d4cd27b690 | refs/heads/master | 2020-03-15T13:58:30.067523 | 2016-04-28T11:29:59 | 2016-04-28T11:30:02 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8090909123420715,
"alphanum_fraction": 0.8090909123420715,
"avg_line_length": 35.33333206176758,
"blob_id": "3e2b40fabf7e6c369c71b990a332abccf74ededb",
"content_id": "97da55d495cff9ca493a4d9bbbb09d28df4bfa31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 3,
"path": "/README.md",
"repo_name": "gruel/etekkatho-content-wrapper",
"src_encoding": "UTF-8",
"text": "# etekkatho-content-wrapper\n\nPython script for wrapping media files in the eTekkatho HTML header and footer.\n\n"
},
{
"alpha_fraction": 0.5865234732627869,
"alphanum_fraction": 0.5955309271812439,
"avg_line_length": 32.958824157714844,
"blob_id": "81b219e39d0263a7f1e5838caa1bcce60a5f4e9a",
"content_id": "410061145aff112c381e788d29b3d4428ff1557e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5773,
"license_type": "no_license",
"max_line_length": 327,
"num_lines": 170,
"path": "/etwrap.py",
"repo_name": "gruel/etekkatho-content-wrapper",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n'''\n\tGet resources and wrap in the eTekkatho HTML\n'''\n\nimport os\n\n# Main class\nclass BCWrap():\n\t'Main class for wrapping resource files in the eTekkatho HTML header and footer'\n\t\n\t# Vars\n\t\n\t\n\t# Constructor\n\tdef __init__(self):\n\t\tself.getContent()\n\t\n\tdef getContent(self):\n\t\tprint('Getting content...')\n\t\t\n\t\t# Loop through the folders\n\t\trootdir = '../content/'\n\t\t\n\t\tfor subdir, dirs, files in os.walk(rootdir):\n\t\t\t\t# Add the HTML header\n\t\t\t\thtml = self.getHeader()\n\t\t\t\t\n\t\t\t\t#print(\"Generating page for {}\".format(subdir))\n\t\t\t\t\n\t\t\t\tfor file in files:\n\t\t\t\t\t#print(os.path.join(subdir, file))\t\t\t\n\t\t\t\t\t# If folder contains swf add the swf body\n\t\t\t\t\tif file.endswith('.swf') and 'framework' not in file and 'assets' not in file:\n\t\t\t\t\t\t# Add the body for flash file\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor file2 in files:\n\t\t\t\t\t\t\tif file2.endswith('.xml'):\n\t\t\t\t\t\t\t\txmlFile = file2\n\t\t\t\t\t\t\n\t\t\t\t\t\thtml += \"\"\"<div style=\"width: 500px; margin:0 auto\"><embed width=\"500\" height=\"700\" flashvars=\"xmldata={xmlFile}\" \n\t\t\t\t\t\t\t\tautoplay=\"false\" wmode=\"transparent\" bgcolor=\"transparent\" \n\t\t\t\t\t\t\t\t\tpluginspage=\"http://www.adobe.com/go/getflashplayer\" allowScriptAccess=\"always\"\n\t\t\t\t\t\t\t\t\t\ttype=\"application/x-shockwave-flash\" \n\t\t\t\t\t\t\t\t\t\t\tsrc=\"{file}\" style=\"display: block;\"/></div>\"\"\".format(file = file, xmlFile = xmlFile)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t\t# If folder contains mp4 add the mp4 body\n\t\t\t\t\telif file.endswith('.mp4'):\n\t\t\t\t\t\t# Add the body for video file\n\t\t\t\t\t\t\n\t\t\t\t\t\thtml += \"\"\"<video width=\"800\" controls style=\"width:100%\">\n\t\t\t\t\t\t\t\t\t <source src=\"{file}\" type=\"video/mp4\">\n\t\t\t\t\t\t\t\t\t Your browser does not support HTML5 video.\n\t\t\t\t\t\t\t\t\t</video>\"\"\".format(file = file)\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# Add some content\n\t\t\t\thtml += \"\"\"<h2>Page title</h2>\n\t\t\t\t\t\t\t<p>\n\t\t\t\t\t\t\t\tLorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus fringilla metus vel leo convallis volutpat. Nullam ornare, quam id lobortis efficitur, risus quam auctor risus, id interdum ligula justo at elit. Etiam pharetra nulla et hendrerit semper. Mauris nec sapien lacinia, sollicitudin ante nec, lacinia quam. \n\t\t\t\t\t\t\t</p>\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\n\t\t\t\t# Add the nav links\n\t\t\t\thtml += self.buildNavigation()\n\t\t\t\t\n\t\t\t\t# Add the HTML footer\n\t\t\t\thtml += self.getFooter()\n\t\t\t\t\n\t\t\t\t# Write the index.html file\n\t\t\t\tprint(\"Writing index.html file for {}\".format(subdir))\n\t\t\t\t#print(html)\n\t\t\t\t\n\t\t\t\tindex = open(os.path.join(subdir, \"index.html\"), \"w\")\n\t\t\t\tindex.write(html)\n\t\t\t\tindex.close()\n\t\t\t\t\n\t\t\t\tif os.path.exists(os.path.join(subdir, \"index.html\")):\n\t\t\t\t\tprint('index.html successfully created.')\n\t\t\t\telse:\n\t\t\t\t\tprint('index.html does not exist, an error must\\'ve occurred.')\n\t\t\t\t\n\t\tprint('Done')\n\t\n\tdef getHeader(self):\n\t\theader = \"\"\"<!doctype html>\n\t\t\t\t\t\t <html>\n\t\t\t\t\t\t <head>\n\t\t\t\t\t\t\t <!-- css -->\n\t\t\t\t\t\t\t <link type=\"text/css\" rel=\"stylesheet\" href=\"../css/main.css\" media=\"all\" />\n \t\t\t\t\t\t\t <link type=\"text/css\" rel=\"stylesheet\" href=\"../css/style.css\" media=\"all\" />\n\t\t\t\t\t\t </head>\n\t\t\t\t\t\t <body>\n\t\t\t\t\t\t <div class=\"header\">\n\t\t\t\t\t\t <div class=\"container\">\n\t\t\t\t\t\t\t<p class=\"skip-link\"><a href=\"#main\" class=\"visuallyhidden focusable\">Skip to main content</a></p>\n\t\t\t\t\t\t\t<div class=\"site-info\">\n\t\t\t\t\t\t\t <h1 class=\"logo\"><a href=\"index.html\">eTekkatho</a></h1>\n\t\t\t\t\t\t\t <p class=\"logo-myanmar\">\n\t\t\t\t\t\t\t\t<a href=\"/index.html\">\n\t\t\t\t\t\t\t\t\t<img src=\" data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIsAAAA9CAYAAACdipqXAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAACkVJREFUeNrsHUt22zaQ9su+6gnCnKDMsqsyF6iZE5hZdRnlBGFOoHjZbhifgHZ7ACqrdkf1BHRPQPUELOEO2vFoAA7AjyQ/4j0926QAzA+D+QG+CJY2a2vbdtX9SLpPiB5/ubi4eDgBWBQMdx0s+4VTxxeUrPs0Ld8KYN6xYVHP0oVbxxWUTdvfqjkERgjLZuHacQQlbuVtc0KwLBrmCMJSMFtO1H1C2A5oW80MSwgf+q5euDe/sDQ2BnTPcsKkeEJY2h5YSq3h5rShlsYzqGTeJ0cSlozzkJSW4fpeLqycpWFXlGPED0eC64o+UG7zMdz4pdnthBW8W89ss1Rkrhy0yWoxaM/PG8onhiXtmT9cOHYecZZ6pjhLvsRXzltgqjlXtQWWxtTnhWUwBXgCxteKGGv3gSGHoOIHqB9uymj62vX54oGY15jKy4A+EdPvvut35wiHzqVcgaGqx93BxwZLiOamQqFyQ+9GpIuVR9DuAY6E0OXGiSA9aorNIUC/UqhqEyEsoc+YYCPUI8OSWPI61jEBj75WAvNFQiKkizHP09O/ny4ARN26tRz6Na79RmLOk/1WYMA579NjjClkbm+IHWBphuJniBzL6ALS37TzttwitHO3zYiCcjDmGDkZJnjX+o7pMFYm8b+x5lgDsmuHFYL7ZRaNlTDboC2NLxmTquIcmB6D9jH1i4ULqARi65xKYqFLTFZzjPBMLbBEQrrUiCaxBZYGe1owXizo95QuhtVTm/bQntVm68dZ4LVAPdrGzHoEduVgl1UCFzO1bBFcgK30dGVLwXdyxzHXg2FhtErTZ2wZBKa3HsMgMNg4bRhYwp4xOSYVnoSJkED1q2QZXSJBv8oWIGPoIsGvti0GZ7r4EsVgtJXCfjW3txv29dRzzFzQh1Pta/I+BUY2Dh5l6SFkoUkTGOgS+giug0d8IBOXTAzi0e8X0oX65JGw352hH+2/d4jL3JK/e4kJMQgKy4ok1VQM5HX3+dYhBHLrmiiE5J0JFkqXnTDZd8elHjzp8vLSArik7U2E7ml/G57T/rsZgpl/TTDmg2e/P4V0ERVUDyy8prCEl5ZIpU/bj0zkOcLfLycYM/Ls993Ii3LIgvuGPrg0rIJEOOCVJ2DX5O+vJmFxyJdcea7uuK8f2BNrB0I70wUWaGwQEto/cqDL3lOQE5ZHPlazwTjcCPqte7yh1sNQTXwMYwMsIQmkFSN4Q4mgX26CxWBwFhKOM+PWgj6ZEQfDy7xHUCpXF9EQmW0EREt6xmxsASgHpkpiG7Y4S2w5ixNNAIuv8G4cv19LooMFRrIn6lgy0UG8MkzBs7Uw6bZ2yJNkPQzNB0Zwc0KXUJhryYjmipgqOlMEN7IkIBMCS0Iix7WBtxSWXBJlH5p3aMjEmbBf6RiVrZEAmr5TCYKArVTABuaGfNvaI1ptxGcgb/Mxs6sNswok+SNrtNci5aHl/cGYQlhyD3tCkhurJsiAS2GpxsBBssdJs88VtxcL+onO9TJaobIIdmnIA5U+GsVgDDcuDIftt3BYdOnIsMSeWlZMF73v5T3Z39TSt7DYQHHg0MDGKFG9irabCsP2tAJiFtCvsWSiQ0dYQkvW2jgm41XRrTVzjW0hO3BsWIx0uZAQCAXHVPh750Jc3bfrtx0S5QIhU1rsoyUo9QXiA6b3H7rP1gWHnrhI5EoXsEdWKH6yHXrFxQBYIgTLw7M5L+RYPCS2r46AR2nbKk65ndOJxDHO06hVtBx1eM7CAtogJCHsN53aVNvoq+AwQ6rU8Gt4/4GG99vlINXzbYznkzLGni36mZ2K6j/nbejF77/+HAbzZHe92vc//rRl4HtiiCkDsSP6gwWPA4Ovw3sV+GeHfdpDhwtnQEZMTdLuj99+CWaGrxd+dcgsBQ/jJFvHVFV0tCUwxvDMtk3hRrPAe2BEOSMqn7oPF7vgbKg38LM8IVZ8OgebJWY0w0dccsgQ9R7FRTJYEAFyEXfBYTnA0p6Bgfse4hCf6YqEiG5J4iqP2xbUm9aM1ryBLeh6Yf/zE5a4Y+4a1DjVMFvGNnlrGUvV034G1b94RK4GbvBv1HN74nDuwYh9A5pCZ2VVwbi+9Fe5z+/gewFjo9x077TNcBscFlVPbiCCMf5GaKutkO1yEgbuxTlKOMRJUsV8cKOVMOjbEr6C4OAq/e0JwZ6gLfDTGKkHZtybU8J5TOJthobgUSlAJT3sNub8DnCuUcKuGSvOguJQo457choCIZh6jhGTA1mp6diqYf7K5TDbQHxrXUqByhfGEJYKxsbjJs9Rs+B63myAsCQjzD/17dZNO8GdcVONe8oCk0ur+Zm+DXyyAUKT2w7Tj4QnnmMz0bjZrIxDRuQx7JfKoxAIX0KU++ILwjbKP1tAW8LKsCiasWg81bgSZjVIWkuCbEIqs0pE7IyWWjLfzxnilaRSK2G2mdpUPsmUgeYOxm2hz9/AHKYbkpqeQ/wUL308JUHVbBGhU83ULnP04vCPyFxWGk7i0uGViSzsHK28BoBJERErsjL1lpAi4cmQ5JeMF5NrQSVF2SHZYgpci4tgNs7RszD+M2xR35QIii4HTdD8GSrv3OA5EcwVMDJGOKzgfYoM+5AIX01w0dpig2hUo0Vao5JTXGYZTiksJXOxjiYEPi8U2oxLgjS9bUi7jhH6XoEIFpHVm2PmEAZGqNaXziG5J+Zgy9JeBfVeGI+mYrSPhikznGykghhio96AS8bQVy/ClHqCSFvXU3h3l1yUETV9BjmCqyde4TpNFPiJmGe36poKUl+qA1CYkYoQj1sABKi2aDxN8I/wnQRFm1fw2eE5VDgfrsiQ1LXSGxTug6dpgBDRIEDRXxqX2VK8DHQKyfuH4P/rOA5wQePiZ3d0LERXfU3IK58rZF2E5TFtT9SXjgjukG2A90v2lCCDoG4xEso9Ql4RZc1Y8ppROomYEyKpcWICUy68VEjNf0UM3evgaf5J/X5NtNRea0H07D3Fi2gRjk4REcYDXBC9MC3XSJD25HvY2E+m3IZisj+WRE1m5H3OHKrKaQQVtgU8XkG2vhYdO2ngWcnYLCXa3lqAN0Lv8RySA/oZsRFqRuUn6Dsa55DMWTFnhWq0xXL/5CGnZ7KZcUt8xor0qww0xHSc9lp3IE6Fb0FkiFvSsynkjFFB9t0NZorB5cN9S2IER2TOAoezQWgq0xwCgaltXgShSYVgqoR4cXRq6FljBtcaGdIRsgFzgztuHPcYgbNMeifZCIZ2KdCAsYHQG4e56CXRZxsFRUJTHPu4yzkIS+lyHwrybBpywnF9psKSul4EOdQbGtP+mVsV7i1enakpGG/A03tLjd4za3vD76O1F67AgKq2FQ4pFai2AlOBsoiJSpX21HooLwW7mu/As9g51Ig8Ckc3zmcQnFUwz6WHozf1H046PN4BDl+OCozltqfe+1o8tiHbfSnxWGdumPNITbscQDMLpAeBw8Bev7ofWv0F1v7KdlAb4NiPcKg8hjjF492vyz+TNLd/BBgAQYNHxzQSRxUAAAAASUVORK5CYII=\" alt=\"eTekkatho logo - Myanmar language version\" width=\"139\" height=\"61\" />\n\t\t\t\t\t\t\t\t</a></p>\n\t\t\t\t\t\t\t <h2 class=\"tagline\">Educational resources for the Myanmar academic community</h2>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"nav-primary navbar\">\n\t\t\t\t\t\t\t <div class=\"navbar-inner\">\n\t\t\t\t\t\t\t\t<ul class=\"nav\">\n\t\t\t\t\t\t\t\t <li class=\"home\"><a href=\"/index.html\">Home</a></li>\n\t\t\t\t\t\t\t\t <li class=\"subjects\"><a href=\"/classifications.html\">Subjects</a></li>\n\t\t\t\t\t\t\t\t <li class=\"search\"><a href=\"/search.html\">Search</a></li>\n\t\t\t\t\t\t\t\t <li class=\"keywords\"><a href=\"/keywords.html\">Keywords</a></li>\n\t\t\t\t\t\t\t\t <li class=\"about\"><a href=\"/about.html\">About us</a></li>\n\t\t\t\t\t\t\t\t <li class=\"help\"><a href=\"/help.html\">Help</a></li>\n\t\t\t\t\t\t\t\t</ul>\n\t\t\t\t\t\t\t </div>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"page-header\">\n\t\t\t\t\t\t\t\t<a href=\"/\" style=\"float:right;display:block\">British Council home »</a>\n\t\t\t\t\t\t\t\t<img src=\"../img/bc-logo.jpg\" alt=\"British Council logo\" width=\"200\">\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t </div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div id=\"main-inner\">\n\t\t\t\t\t\t\"\"\"\n\t\t\n\t\treturn header\n\t\n\tdef buildNavigation(self):\n\t\t# Build a navigation tree from the file structure\n\t\tnavHTML = '<div id=\"footer-nav\">'\n\t\trootdir = '../content/'\n\t\t\n\t\tfor subdir, dirs, files in os.walk(rootdir):\n\t\t\tif \"css\" not in subdir and \"img\" not in subdir and len(subdir) > 11: \n\t\t\t\tsubdirname = subdir.replace('_', ': ')\n\t\t\t\n\t\t\t\tnavHTML += \"\"\"\n\t\t\t\t<div class=\"nav-item\">\n\t\t\t\t\t» <a href=\"../{subdir}/index.html\">{subdirname}</a>\t\t\t\t\n\t\t\t\t</div>\n\t\t\t\t\"\"\".format(relpath = os.path.relpath(subdir), subdir = subdir.replace(\"../content/\", \"\"), subdirname = subdirname.replace(\"../content/\", \"\"))\n\t\t\n\t\tnavHTML += '</div>'\n\t\t\n\t\treturn navHTML\n\t\t\n\tdef getFooter(self):\n\t\tfooter = \"\"\"</div>\n\t\t\t\t\t<div class=\"footer\">\n\t\t\t\t\t <div class=\"container\">\n\t\t\t\t\t\t<p>eTekkatho is hosted and run by The University of Manchester<span class=\"visuallyhidden\">.</span></p>\n\t\t\t\t\t\t<ul class=\"legal-links\">\n\t\t\t\t\t\t <li><a href=\"terms-of-service.html\">Terms of service</a></li>\n\t\t\t\t\t\t <li><a href=\"copyright-licensing.html\">Copyright and licensing</a></li>\n\t\t\t\t\t\t</ul>\n\t\t\t\t\t\t<ul class=\"tribute-logos\">\n\t\t\t\t\t\t <li><img src=\"../img/university-of-manchester.png\" alt=\"The University of Manchester logo\" width=\"109\" height=\"46\" class=\"tribute-logo\" /></li>\n\t\t\t\t\t\t</ul>\n\t\t\t\t\t </div>\n\t\t\t\t\t</div>\n\t\t\t\t\t\n\t\t\t\t\t</body>\n\t\t\t\t\t</html>\"\"\"\n\t\t\n\t\treturn footer\n\n\t\nBCWrap()\n"
}
] | 2 |
isysrg/sharing_progress1_batch2 | https://github.com/isysrg/sharing_progress1_batch2 | 44d76ef000c97b7ca47fafb8b2d0e095591dff2d | 813bb66226fcfb9d947518a005c8edd7d5a2cf08 | 2953df1a6b224f133b8b6ee2859cc5161ce0b645 | refs/heads/master | 2020-08-20T01:56:25.619969 | 2019-10-31T16:00:54 | 2019-10-31T16:00:54 | 215,973,577 | 3 | 3 | null | null | null | null | null | [
{
"alpha_fraction": 0.5640000104904175,
"alphanum_fraction": 0.6320000290870667,
"avg_line_length": 13.625,
"blob_id": "c68ee688deaae25a19552706621619ba1abe2979",
"content_id": "d65b91f83c21976773d0b7f9b987a7345a4ce86a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 16,
"path": "/ISYSRG BATCH 2_Sinta Bella.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 25 17:19:28 2019\r\n\r\n@author: CCservice\r\n\"\"\"\r\n\r\nimport cv2\r\n\r\nimport glob\r\nimport matplotlib.pyplot as plt\r\n\r\ngambar = glob.glob(\"D:\\SK\\Isysrg\\Image\\1\")[0]\r\n\r\nimg = cv2.imread(gambar)\r\nplt.imshow(img) "
},
{
"alpha_fraction": 0.71875,
"alphanum_fraction": 0.7437499761581421,
"avg_line_length": 30.399999618530273,
"blob_id": "efd955a110218ecdc17f5c7858b410206aacea85",
"content_id": "aa4c9d1c55c55399ea621666cfce9c8a081da1fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 5,
"path": "/ISYSRG BATCH 2_MAdhityaReskiPratamaR.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "import cv2\r\n\r\npaths_data = r'C:/Users/Adit/Documents/Perkuliahan/ISYSRG/Anaconda/Gambar/Melanoma.jpg'\r\nimg = cv2.imread(paths_data, 0)\r\ncv2.imshow('image', img)"
},
{
"alpha_fraction": 0.6853377223014832,
"alphanum_fraction": 0.7166392207145691,
"avg_line_length": 21.423076629638672,
"blob_id": "dec1fd83622371a3f14f7ef8d435a6ac3c2d2a81",
"content_id": "17419e0b75e453406613636e1a6d8a7f760f7b94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 607,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 26,
"path": "/ISYSRG BATCH 2_Bima Pratama Anom.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\r\nimport cv2\r\nimport wfdb\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\npath_data = \"D:/Aplikasi SK/Semester 5/ISYSRG/mit-bih-arrhythmia-database-1.0.0/mit-bih-arrhythmia-database-1.0.0/100\"\r\n\r\nrecord = wfdb.rdrecord(path_data)\r\ndict_record = record.__dict__\r\n\r\nsinyal = dict_record['p_signal'][:,0]\r\n\r\n\r\nsinyal_data_jantung = sinyal[360:720]\r\n\r\n\r\nplt.plot(np.arange(len(sinyal_data_jantung)), sinyal_data_jantung)\r\nplt.show()\r\nplt.savefig('Sinyal Data Jantung.jpg')\r\n\r\n\r\ndata = \"C:/Users/HarunPininggit/Pictures/janin.jpg\"\r\ning = cv2.imread(data)\r\nplt.imshow(ing)"
},
{
"alpha_fraction": 0.665105402469635,
"alphanum_fraction": 0.709601879119873,
"avg_line_length": 23.235294342041016,
"blob_id": "b950d55d6b8fbffd6f463150b02887bf4705cae6",
"content_id": "17e30fbc95bfcad036151dbc915de742c7ce326f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 17,
"path": "/ISYSRG Batch 2_Irvan Fahreza.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "import wfdb\r\nimport numpy as np\r\nimport glob\r\nimport matplotlib.pyplot as plt\r\n\r\npath_data = 'C:/Users/USER/ISYSRG/Dataset/mit-bih-arrhythmia-database-1.0.0/124'\r\n\r\nrecord = wfdb.rdrecord(path_data)\r\ndict_record = record.__dict__\r\n\r\nattribute = wfdb.rdann(path_data,'atr')\r\nattribute_record= attribute.__dict__\r\n\r\nsignal = dict_record['p_signal'][:,0]\r\n\r\nfig,ax1=plt.subplots(nrows=1)\r\nax1.plot(np.arange(1440), signal[0:1440])"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6606741547584534,
"avg_line_length": 18.31818199157715,
"blob_id": "775f4f137d7a3b54801f7db20dbffee7b67dae1a",
"content_id": "667a5adde240571a52656739b20e7742cfee4ad6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 22,
"path": "/ISYSRG BATCH 2_MRizkyAdityaUtama.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 18 11:00:20 2019\r\n\r\n@author: Adit\r\n\"\"\"\r\n\r\n## Memotong Sinyal pada Node(Detik Ke-2)\r\n\r\nimport wfdb\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\nrecord = wfdb.rdrecord('Sinyal/101')\r\nrecord_dict = record.__dict__\r\nsinyal = record_dict['p_signal'][:,0]\r\n\r\nsinyal_satu_detik = sinyal\r\n\r\nfig,ax1 = plt.subplots(nrows=1)\r\nax1.plot(np.arange(720),signal[:720])\r\nplt.savefig('Sinyal.jpg')"
},
{
"alpha_fraction": 0.6185252666473389,
"alphanum_fraction": 0.6721511483192444,
"avg_line_length": 24.934425354003906,
"blob_id": "42f444e3b170f6c181c0e67a62098ef207cab0e4",
"content_id": "ed63cfc979fe88df9718022e8e72c11fbfc72e0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1641,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 61,
"path": "/ISRYG BATCH 2_IRAWAN.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 18 10:08:39 2019\r\n\r\n@author: User\r\n\"\"\"\r\n#Menampilkan 5 Buah Sinyal\r\n#Irawan 09011281621057\r\nimport numpy as np\r\nimport wfdb\r\nimport matplotlib.pyplot as plt\r\nimport glob\r\n\r\npath_data = 'C:/Users/User/Downloads/dataset/EKG/100'\r\nsinyal = wfdb.rdrecord(path_data)\r\nsinyal_doc = sinyal.__dict__\r\ndata = wfdb.rdann(path_data, 'atr')\r\ndata_doc = data.__dict__\r\npsignal =sinyal_doc[\"p_signal\"]\r\npsignal_0 =psignal[:,0]\r\n\r\npath_data = 'C:/Users/User/Downloads/dataset/EKG/101'\r\nsinyal = wfdb.rdrecord(path_data)\r\nsinyal_doc = sinyal.__dict__\r\ndata = wfdb.rdann(path_data, 'atr')\r\ndata_doc = data.__dict__\r\npsignal =sinyal_doc[\"p_signal\"]\r\npsignal_1 =psignal[:,0]\r\n\r\npath_data = 'C:/Users/User/Downloads/dataset/EKG/102'\r\nsinyal = wfdb.rdrecord(path_data)\r\nsinyal_doc = sinyal.__dict__\r\ndata = wfdb.rdann(path_data, 'atr')\r\ndata_doc = data.__dict__\r\npsignal =sinyal_doc[\"p_signal\"]\r\npsignal_2 =psignal[:,0]\r\n\r\npath_data = 'C:/Users/User/Downloads/dataset/EKG/103'\r\nsinyal = wfdb.rdrecord(path_data)\r\nsinyal_doc = sinyal.__dict__\r\ndata = wfdb.rdann(path_data, 'atr')\r\ndata_doc = data.__dict__\r\npsignal =sinyal_doc[\"p_signal\"]\r\npsignal_3 =psignal[:,0]\r\n\r\npath_data = 'C:/Users/User/Downloads/dataset/EKG/104'\r\nsinyal = wfdb.rdrecord(path_data)\r\nsinyal_doc = sinyal.__dict__\r\ndata = wfdb.rdann(path_data, 'atr')\r\ndata_doc = data.__dict__\r\npsignal =sinyal_doc[\"p_signal\"]\r\npsignal_4 =psignal[:,0]\r\n\r\n\r\n\r\nplt.plot(range(360),psignal_0[:360])\r\nplt.plot(range(360),psignal_1[:360])\r\nplt.plot(range(360),psignal_2[:360])\r\nplt.plot(range(360),psignal_3[:360])\r\nplt.plot(range(360),psignal_4[:360])\r\nplt.show()"
},
{
"alpha_fraction": 0.591269850730896,
"alphanum_fraction": 0.64682537317276,
"avg_line_length": 15.857142448425293,
"blob_id": "2441fd7d2206666208ef61a5990532604e197b37",
"content_id": "affe88c5683c7240bd083dd5eda3d1a836eb5088",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/ISYSRG Batch 2_Leni Estiyani.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 23 08:17:29 2019\r\n\r\n@author: Leni Estiyani\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport glob\r\nimport matplotlib.pyplot as plt\r\nfrom skimage import io, color\r\nimg = io.imread('janin.jpeg')\r\nio.imshow (img)\r\n\r\n"
},
{
"alpha_fraction": 0.6206140518188477,
"alphanum_fraction": 0.655701756477356,
"avg_line_length": 16.239999771118164,
"blob_id": "111449e7bdc47b16634699fb5b8628d2e574011a",
"content_id": "275e76f26d9b80cb1f7fbe053bc5acc52288190c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 456,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 25,
"path": "/ISRYG BATCH 2_Helti_yuniar.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\n@author: Helti yuniar\r\n\"\"\"\r\n\r\nimport wfdb\r\nimport numpy as np\r\nimport glob\r\nimport matloblib.pyplot as plt\r\n\r\n\r\npath_data= 'E:/mit-bih-arrhythmia-database-1.0.0/101'\r\n\r\nrecord = wfdb.rdrecord(path_data)\r\ndict_record = record.__dict__\r\n\r\natribute = wfdb.rdann(path_data,'atr')\r\natribute = atribute.__dict__\r\n\r\nsignal= dict_record['p_signal']\r\n\r\nfig,ax1=plt.subplots(nrows=1)\r\nax1.plot(np.arange(360),signal[:360])\r\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 19.399999618530273,
"blob_id": "a2781ac16146f09b9db90a84abe727b0d711334f",
"content_id": "11106cbfccfb19730055549f346bbf857d6e5e8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 5,
"path": "/ISYSRG BATCH 2_AnnisaKarimaRHarahap.py",
"repo_name": "isysrg/sharing_progress1_batch2",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport matplotlib.pyplot as plt\r\ndata = 'images.jpg'\r\nimg = cv2.imread(data)\r\nplt.imshow(img)"
}
] | 9 |
Adwaita/yaml2sql | https://github.com/Adwaita/yaml2sql | 2f4040934c3ddcbf51c598bd27503716eddb0047 | fcce22e02b7cf28a74086f6e235f3b77e2bc8143 | 07cb95342cb4def8dca1ff7b6951f10a37219d2d | refs/heads/master | 2018-03-30T16:55:51.377990 | 2017-04-11T18:15:21 | 2017-04-11T18:15:21 | 87,965,098 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6145387887954712,
"alphanum_fraction": 0.6175931692123413,
"avg_line_length": 30.5,
"blob_id": "e1cd8af841f394b53bd7174afc23e53d7e156aae",
"content_id": "3eefe8ebb4f503a005706ceaab48b8ef37674ac1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1637,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 52,
"path": "/src/yaml2sql.py",
"repo_name": "Adwaita/yaml2sql",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\"\"\"\n This utility accepts multi document yaml files as input,\n and produces the SQL for the table creation and data insertion.\n\n It is assumed that the input dataset is small enough\n to be kept entirely in memory.\n\n All the fields will use the TEXT type.\n\"\"\"\nfrom __future__ import print_function\nimport yaml\nimport fileinput\n\ndef read_input_data():\n \"\"\" Load the entire input data set to memory \"\"\"\n yaml_text_data = \"\"\n for line in fileinput.input():\n yaml_text_data += line\n\n return [x for x in yaml.load_all(yaml_text_data)]\n\n\ndef generate_sql_create_table(sample_row, table_name):\n \"\"\" Generate the CREATE TABLE SQL, fields are determined from a sample row\"\"\"\n print(\"DROP TABLE IF EXISTS %s ;\" % table_name)\n print(\"CREATE TABLE %s (\" % table_name)\n fields = [\"%s %s\" % (key, \"TEXT\") for key in sample_row.keys()]\n print(\",\\n\".join(fields))\n print(\");\")\n\ndef generate_multirow_insert(data, table_name):\n \"\"\" Gerenerate the mysql for a multirow-insert \"\"\"\n print(\"INSERT INTO %s\" % table_name)\n fields = [\"%s\" % key for key in data[0].keys()]\n print (\"(%s)\" % ','.join(fields))\n print(\"VALUES\")\n inserted_lines_count = 0\n for row in data:\n if inserted_lines_count > 0:\n print(\",\"),\n values = [\"'%s'\" % value for value in row.values()]\n print(\"(%s)\" % (', '.join (values))),\n inserted_lines_count += 1\n print(\";\")\n\n\nif __name__ == \"__main__\":\n data = read_input_data()\n table_name = \"table_name\"\n generate_sql_create_table(data[0], table_name)\n generate_multirow_insert(data, table_name)"
}
] | 1 |
josh-newton/python-link-parser | https://github.com/josh-newton/python-link-parser | 39a8fa6cff950075bbb8dad773a326b214196195 | 89dcd91d1554269e0ee8bbd035afe6bdcf705833 | 0d0c603c886ce75637d5ed7c8786c2bcbe6d6304 | refs/heads/master | 2020-12-24T18:55:47.484093 | 2016-05-25T13:14:40 | 2016-05-25T13:14:40 | 59,664,502 | 2 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.667664647102356,
"alphanum_fraction": 0.7065868377685547,
"avg_line_length": 32.5,
"blob_id": "30c92aa07e513d7f2ca6ddfda5449a2ec71830da",
"content_id": "284cc28eb1d770137ea5d1f0aa6ef84edae0b40a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 10,
"path": "/README.md",
"repo_name": "josh-newton/python-link-parser",
"src_encoding": "UTF-8",
"text": "Python Link Parser\n===================\nParses a web page and prints out all the href links it finds.\nTaken and adapted from the book [Violent Python](http://store.elsevier.com/product.jsp?isbn=9781597499576&pagename=search).\n\n### Usage\n```bash\npython parse-links.py -u <target url>\n```\nTarget url must include http[s]:// at the start."
},
{
"alpha_fraction": 0.6723602414131165,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 19.15625,
"blob_id": "fbb752a506168744826112c3f9f45f7098ef0817",
"content_id": "3a3aca22a56c4120218cb1a63a8a5a9d4d7ec675",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 644,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 32,
"path": "/parse-links.py",
"repo_name": "josh-newton/python-link-parser",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport mechanize\nfrom BeautifulSoup import BeautifulSoup\nimport argparse\n\ndef printLinks(url):\n\tbrowser = mechanize.Browser()\n\tpage = browser.open(url)\n\thtml = page.read()\n\t\n\ttry:\n\t\tsoup = BeautifulSoup(html)\n\t\tlinks = soup.findAll(name='a')\n\t\tfor link in links:\n\t\t\tif link.has_key('href'):\n\t\t\t\tprint link['href']\n\texcept:\n\t\tpass\n\ndef main():\n\tparser = argparse.ArgumentParser('%prog -u <target-url>')\n\tparser.add_argument('-u', dest='tgtUrl', help='specify target url')\n\targs = parser.parse_args()\n\turl = args.tgtUrl\n\tif url == None:\n\t\tprint parser.usage\n\t\texit(0)\n\telse:\n\t\tprintLinks(url)\n\nif __name__ == '__main__':\n\tmain()"
}
] | 2 |
umiundlake/links-api | https://github.com/umiundlake/links-api | 2b448c3ec03d9d94e16cf1c873a178d28634a847 | 2759e3c461dea7005f0303ae8983f0937ecb7147 | 8c84b88551edef65a3e14c7ab5cc2d3726bf9d03 | refs/heads/master | 2020-03-21T09:09:18.971769 | 2018-06-23T08:32:43 | 2018-06-23T08:32:43 | 138,382,877 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6618357300758362,
"alphanum_fraction": 0.6631776690483093,
"avg_line_length": 22.14285659790039,
"blob_id": "a824e7b7c280d80aca7885fe8c1074b80da62886",
"content_id": "abec5e1f938ef9bec5c0e727e89ed0c92835ec36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3726,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 161,
"path": "/app.py",
"repo_name": "umiundlake/links-api",
"src_encoding": "UTF-8",
"text": "from flask import Flask, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom marshmallow import Schema, fields\n\nimport os\n\n\n# This method to get an absolute path of a file works with all the operative systems.\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n#DB_URI = \"sqlite:///\" + os.path.join(BASE_DIR, \"database.db\")\nDB_URI = \"mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}\".format(\n username=\"\",\n password=\"\",\n hostname=\"\",\n databasename=\"\")\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = DB_URI\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\nclass Framework(db.Model):\n __tablename__ = \"frameworks\"\n\n # The id will be unique, cannot be null, and auto-increase.\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50))\n\nclass Link(db.Model):\n __tablename__ = \"links\"\n\n id = db.Column(db.Integer, primary_key=True)\n url = db.Column(db.String(100))\n\nclass FrameworkSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n \nclass LinkSchema(Schema):\n id = fields.Int()\n link = fields.Str()\n\[email protected](\"/\")\ndef index():\n\n return \"Hello World!\"\n\n\n# GET METHOD\n\[email protected](\"/api/frameworks/\", methods=[\"GET\"])\ndef get_frameworks():\n frameworks = Framework.query.all()\n frameworks_schema = FrameworkSchema(many=True)\n result, errors = frameworks_schema.dump(frameworks)\n\n return jsonify(result)\n\[email protected](\"/api/frameworks/<string:name>\")\ndef get_framework_by_name(name):\n framework = Framework.query.filter_by(name=name).first()\n framework_dict = dict(id=framework.id, name=framework.name)\n\n return jsonify(framework_dict)\n\n\n# POST METHOD\n\[email protected](\"/api/frameworks/\", methods=[\"POST\"])\ndef add_framework():\n new_framework = Framework(name=request.json[\"name\"])\n db.session.add(new_framework)\n db.session.commit()\n\n framework_dict = dict(id=new_framework.id, name=new_framework.name)\n\n return jsonify(framework_dict)\n\n\n# PUT METHOD\n\[email protected](\"/api/frameworks/<int:id>\", methods=[\"PUT\"])\ndef edit_framework(id):\n framework = Framework.query.get(id)\n framework.name = request.json[\"name\"]\n\n db.session.commit()\n\n framework_dict = dict(id=framework.id, name=framework.name)\n\n return jsonify(framework_dict)\n\n\n# DELETE METHOD\n\[email protected](\"/api/frameworks/<int:id>\", methods=[\"DELETE\"])\ndef delete_framework(id):\n framework = Framework.query.get(id)\n \n db.session.delete(framework)\n db.session.commit()\n\n return jsonify({\"message\": \"ok\"})\n\n\n#LINKS\n\[email protected](\"/api/links/\", methods=[\"GET\"])\ndef get_links():\n links = Link.query.all()\n links_schema = LinkSchema(many=True)\n result, errors = links_schema.dump(links)\n\n return jsonify(result)\n\[email protected](\"/api/links/<string:url>\")\ndef get_link_by_url(url):\n link = Link.query.filter_by(url=url).first()\n link_dict = dict(id=link.id, url=link.url)\n\n return jsonify(link_dict)\n\n\n# POST METHOD\n\[email protected](\"/api/links/\", methods=[\"POST\"])\ndef add_link():\n new_link = Link(url=request.json[\"url\"])\n db.session.add(new_link)\n db.session.commit()\n\n link_dict = dict(id=new_link.id, url=new_link.url)\n\n return jsonify(link_dict)\n\n\n# PUT METHOD\n\[email protected](\"/api/links/<int:id>\", methods=[\"PUT\"])\ndef edit_link(id):\n link = Link.query.get(id)\n link.url = request.json[\"url\"]\n\n db.session.commit()\n\n link_dict = dict(id=link.id, url=link.url)\n\n return jsonify(link_dict)\n\n\n# DELETE METHOD\n\[email protected](\"/api/links/<int:id>\", methods=[\"DELETE\"])\ndef delete_link(id):\n link = Link.query.get(id)\n \n db.session.delete(link)\n db.session.commit()\n\n return jsonify({\"message\": \"ok\"})\n"
}
] | 1 |
noobj2/Reset-Cards | https://github.com/noobj2/Reset-Cards | aedcf6a4527429777bb335d7a8fc79324af1217a | 07139070c2ff91eab8d7ed3f7bb55cdb40bc76f1 | 6b4f2c3277b78c52c694bbc957060d798a12ee6c | refs/heads/master | 2022-10-05T15:17:31.405043 | 2020-06-08T19:35:21 | 2020-06-08T19:35:21 | 270,798,576 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6437389850616455,
"alphanum_fraction": 0.6490299701690674,
"avg_line_length": 31.352941513061523,
"blob_id": "96fa360c152c4af59585a94ccda4feb6d956ba16",
"content_id": "b150f207b771395053c0ba9f5e37f6be534f91f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 17,
"path": "/__init__.py",
"repo_name": "noobj2/Reset-Cards",
"src_encoding": "UTF-8",
"text": "from aqt import mw\r\nfrom aqt.utils import showInfo, askUser\r\nfrom aqt.qt import *\r\n\r\ndef accept():\r\n reset = askUser(\"<div style='font-size: 16px'> Reset all cards to new?<br><font color=red>This action can't be undone.</font></div>\", defaultno=True, title=\"Reset Cards\")\r\n if reset:\r\n anki_ease = 0\r\n mw.col.db.execute(\"update cards set queue = ?\", anki_ease)\r\n showInfo(\"Done\", title=\"Reset Cards\")\r\n else:\r\n pass\r\n\r\n\r\naction = QAction(\"Reset &Cards\", mw)\r\naction.triggered.connect(accept)\r\nmw.form.menuTools.addAction(action)\r\n"
}
] | 1 |
sumukhhk/AI_LAB-1BM17CS106 | https://github.com/sumukhhk/AI_LAB-1BM17CS106 | 79728278f8436ab82846a27a2dd98e188947d78b | 7ac49b6585e3e0cddeb511424332fbdfb983ec29 | 3d9cd867c1a0be471ba83eb534d987796a3f7167 | refs/heads/master | 2020-12-23T11:57:43.970348 | 2020-05-03T17:56:07 | 2020-05-03T17:56:07 | 237,144,180 | 0 | 3 | null | null | null | null | null | [
{
"alpha_fraction": 0.5212368369102478,
"alphanum_fraction": 0.5317702889442444,
"avg_line_length": 22.54400062561035,
"blob_id": "c580a0c5f9b9c715a9750df67b4d84c870037681",
"content_id": "ae6746514baf2df62b6c2d6a3a62f382c0aadcbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2943,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 125,
"path": "/KB_Entailment.py",
"repo_name": "sumukhhk/AI_LAB-1BM17CS106",
"src_encoding": "UTF-8",
"text": "combinations=[(True,True),(False,False),(False,True),(True,False)]#expand this set for more variables\nvariable={'p':0,'q':1}#expand this set matching combinations indices for variables\n#set of rules\nkb=''#should be a cnf \nq=''#should be a cnf\npriority={'~':3,'v':1,'^':2}\ndef input_rules():\n global kb,q\n kb=(input(\"Enter rule :\")) \n q=input(\"Enter the Query : \") \n\ndef entailment():\n global kb,q\n print('*'*10+\"Truth Table Reference\"+'*'*10)\n print('kb','alpha')\n print('*'*10)\n for comb in combinations:\n s=evaluatePostfix(toPostfix(kb),comb)\n f=evaluatePostfix(toPostfix(q),comb) \n print(s,f) \n print('-'*10) \n if s and not f: \n return False\n return True\n\ndef isOperand(c):\n return c.isalpha() and c!='v'\n\ndef isLeftParenthesis(c):\n return c=='('\ndef isRightParenthesis(c):\n return c==')'\ndef isEmpty(stack):\n return len(stack)==0\n\ndef peek(stack):\n return stack[-1]\n\ndef hasLessOrEqualPriority(c1,c2):\n try: return priority[c1]<=priority[c2]\n except KeyError: return False\n\ndef toPostfix(infix):\n stack = []\n postfix = ''\n for c in infix:\n if isOperand(c):\n postfix += c\n else:\n if isLeftParenthesis(c):\n stack.append(c)\n elif isRightParenthesis(c):\n operator = stack.pop()\n while not isLeftParenthesis(operator):\n postfix += operator\n operator = stack.pop() \n else:\n while (not isEmpty(stack)) and hasLessOrEqualPriority(c,peek(stack)):\n postfix += stack.pop()\n stack.append(c)\n\n while (not isEmpty(stack)):\n postfix += stack.pop()\n return postfix\n\ndef evaluatePostfix(exp,comb): \n stack=[]\n for i in exp: \n if isOperand(i): \n stack.append(comb[variable[i]])\n elif i=='~':\n val1 = stack.pop()\n stack.append(not val1) \n else:\n val1 = stack.pop()\n val2 = stack.pop() \n stack.append(_eval(i,val2,val1)) \n \n return stack.pop()\n\ndef _eval(i,val1,val2):\n if i=='^': return val2 and val1\n return val2 or val1 \n\ninput_rules()\nans=entailment()\nif ans: print(\"The Knowlege Base entails query \")\nelse: print(\"The Knowlege Base does not entail query \")\n \n\n\n\n\"\"\"\nOUTPUT:\nEnter rule :pvq\nEnter the Query : p\n**********Truth Table Reference**********\nkb alpha\n**********\nTrue True\n----------\nFalse False\n----------\nTrue False\n----------\nThe Knowlege Base does not entail query\n\n\n\n\nEnter rule :p^q\nEnter the Query : p\n**********Truth Table Reference**********\nkb alpha\n**********\nTrue True\n----------\nFalse False\n----------\nFalse False\n----------\nFalse True\n----------\nThe Knowlege Base entails query\n\"\"\"\n"
},
{
"alpha_fraction": 0.4341636896133423,
"alphanum_fraction": 0.4509405195713043,
"avg_line_length": 23.89873504638672,
"blob_id": "142894568f173987e1f8f36efce5a74d2d96830c",
"content_id": "bf1edc837a990c28796d926f586de4647cfaf441",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1967,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 79,
"path": "/FOL_TO_CNF.py",
"repo_name": "sumukhhk/AI_LAB-1BM17CS106",
"src_encoding": "UTF-8",
"text": "prep=input(\"Enter the preposition logic: \")\ndi=prep.find('<=>')\nif di == -1:\n pass\nelse:\n #print(di)\n prep=prep[0:di]+\"=>\"+prep[di+3:len(prep)]+\" ^ \"+prep[di+3:]+\"=>\"+prep[:di]\n print(\"\\n1.Removing double implies: \"+prep)\ni=prep.find('=>')\nwhile(i!=-1):\n if prep[i-1]!=\")\":\n prep=prep[0:i-1]+\"!\"+prep[i-1]+\" v \"+prep[i+2:]\n else:\n '''ib=prep.find('(')'''\n j=i\n while prep[j]!='(':\n j-=1\n prep=prep[0:j]+\"!\"+prep[j:i]+\" v \"+prep[i+2:]\n i=prep.find('=>')\nprint(\"\\n2.Removing single implies: \"+prep)\n\n\nn=prep.find('!(')\nwhile(n!=-1):\n if prep[n+1]=='(':\n l=len(prep)\n prepn=prep\n prep=prepn[0:n+1]\n i=n+2\n while(prepn[i]!='^' and prepn[i]!='v'):\n if(prepn[i]!='('):\n prep+=prepn[i]\n i+=1\n if prepn[i]=='^':\n prep+=' v '\n elif prepn[i]=='v':\n prep+=' ^ '\n prep+='!'+prepn[i+1:prepn.find(')',i)]\n prep+=prepn[prepn.find(')',i)+1:]\n n=prep.find('!(')\nprint(\"\\n3.Demorgan's Law: \"+prep)\n\nprep = prep.replace(\"! !\", \"\")\nprint(\"\\n4.Removing double negation: \"+prep)\n\nprepn=prep\nprep=\"\"\nfor i in prepn:\n if i!='(' and i!=')':\n prep+=i\nprint(\"\\n5.Removing unwanted brackets: \"+prep)\n\nprepn=prep\nprep=\"(\"\nfor i in prepn:\n if i=='^':\n prep+=') ^ ('\n else:\n prep+=i\nprep+=')'\nprint(\"\\n6.Grouping into CNF form: \"+prep)\n\n'''\noutput\n\nEnter the preposition logic: (a=>b)^(b<=>c)\n\n1.Removing double implies: (a=>b)^(b=>c) ^ c)=>(a=>b)^(b\n\n2.Removing single implies: (!a v b)^!(!b v c) ^ c) v (!a v b)^(b\n\n3.Demorgan's Law: (!a v b)^!!b ^ ! c ^ c) v (!a v b)^(b\n\n4.Removing double negation: (!a v b)^!!b ^ ! c ^ c) v (!a v b)^(b\n\n5.Removing unwanted brackets: !a v b^!!b ^ ! c ^ c v !a v b^b\n\n6.Grouping into CNF form: (!a v b) ^ (!!b ) ^ ( ! c ) ^ ( c v !a v b) ^ (b)\n'''\n"
},
{
"alpha_fraction": 0.46782007813453674,
"alphanum_fraction": 0.4826989471912384,
"avg_line_length": 22.88429832458496,
"blob_id": "86dd294922f2801cb9d09fb209056433332310d7",
"content_id": "f31c6bb9dd0419db40671b4e04157e97c7e3df78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2890,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 121,
"path": "/unification.c",
"repo_name": "sumukhhk/AI_LAB-1BM17CS106",
"src_encoding": "UTF-8",
"text": "no_of_pred=0\nno_of_arg=[None for i in range(10)]\nnouse=''\npredicate=[None for i in range(10)]\nargument=[[None for i in range(10)] for i in range(10)]\n \ndef main():\n global no_of_pred\n ch='y'\n while(ch == 'y'):\n print(\"=========PROGRAM FOR UNIFICATION=========\")\n no_of_pred=int(input(\"Enter Number of Predicates:\"))\n for i in range(no_of_pred):\n #nouse=input() # //to accept \"enter\" as a character\n print(\"Enter Predicate \",(i+1),\" :\")\n predicate[i]=input()\n print(\"Enter No.of Arguments for Predicate \",predicate[i],\" :\")\n no_of_arg[i]=int(input())\n\n for j in range(no_of_arg[i]):\n print(\"Enter argument \",j+1,\" :\");\n argument[i][j]=input()\n\n \n display()\n chk_arg_pred()\n ch=input(\"Do you want to continue(y/n): \")\n\ndef display():\n\n print(\"=======PREDICATES ARE======\")\n for i in range(no_of_pred):\n print(predicate[i],\"(\", end=\"\")\n for j in range(no_of_arg[i]):\n print(argument[i][j], end=\"\")\n if(j!=no_of_arg[i]-1):\n print(\",\", end=\"\")\n print(\")\")\n\n\n#/*==========UNIFY FUNCTION=========*/\n\ndef unify():\n flag=0\n for i in range(no_of_pred-1):\n for j in range(no_of_arg[i]):\n if(argument[i][j]!=argument[i+1][j]):\n if(flag==0):\n print(\"======SUBSTITUTION IS======\")\n print(argument[i+1][j],\"/\",argument[i][j])\n flag+=1\n\n if(flag==0):\n print(\"Arguments are Identical...\")\n print(\"No need of Substitution\")\n\n\ndef chk_arg_pred():\n pred_flag=0\n arg_flag=0\n\n\n #/*======Checking Prediactes========*/\n for i in range(no_of_pred-1):\n if(predicate[i]!=predicate[i+1]):\n print(\"Predicates not same..\")\n print(\"Unification cannot progress!\")\n pred_flag=1\n break\n\n #/*=====Chking No of Arguments====*/\n\n if(pred_flag!=1):\n ind=0\n key = no_of_arg[ind]\n l = len(no_of_arg)\n for i in range(0,key-1):\n if i>=key:\n continue\n if ind!=l-1:\n ind += 1\n key = no_of_arg[ind]\n if(no_of_arg[i]!=no_of_arg[i+1]):\n\n print(\"Arguments Not Same..!\")\n arg_flag=1\n break\n\n if(arg_flag==0 and pred_flag!=1):\n unify()\n\nmain()\n\n\n'''\nOUTPUT\n=========PROGRAM FOR UNIFICATION=========\nEnter Number of Predicates:2\nEnter Predicate 1 :\np\nEnter No.of Arguments for Predicate p :\n2\nEnter argument 1 :\na\nEnter argument 2 :\nb\nEnter Predicate 2 :\np\nEnter No.of Arguments for Predicate p :\n2\nEnter argument 1 :\nc\nEnter argument 2 :\nb\n=======PREDICATES ARE======\np (a,b)\np (c,b)\n======SUBSTITUTION IS======\nc / a\nDo you want to continue(y/n):\n'''\n"
},
{
"alpha_fraction": 0.4435446560382843,
"alphanum_fraction": 0.4986117482185364,
"avg_line_length": 17.31355857849121,
"blob_id": "e8000e86c9c9e2c6336e61122ac938ae98671ecd",
"content_id": "7ecafe3c5d82a94484c804a891949971c161f876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4322,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 236,
"path": "/A*algorithm_AILAB",
"repo_name": "sumukhhk/AI_LAB-1BM17CS106",
"src_encoding": "UTF-8",
"text": "from collections import deque\nfrom itertools import chain, tee\nfrom math import sqrt\nfrom random import choice\n\nclass Puzzle:\n HOLE = 0\n\n def __init__(self, board, hole_location=None, width=None):\n # Use a flattened representation of the board (if it isn't already)\n self.board = list(chain.from_iterable(board)) if hasattr(board[0], '__iter__') else board\n self.hole = hole_location if hole_location is not None else self.board.index(Puzzle.HOLE)\n self.width = width or int(sqrt(len(self.board)))\n\n @property\n def solved(self):\n \n return self.board == list(range(1, self.width * self.width)) + [Puzzle.HOLE]\n\n @property \n def possible_moves(self):\n \n for dest in (self.hole - self.width, self.hole + self.width):\n if 0 <= dest < len(self.board):\n yield dest\n \n for dest in (self.hole - 1, self.hole + 1):\n if dest // self.width == self.hole // self.width:\n yield dest\n\n def move(self, destination):\n board = self.board[:]\n board[self.hole], board[destination] = board[destination], board[self.hole]\n return Puzzle(board, destination, self.width)\n\n def shuffle(self, moves=1000):\n p = self\n for _ in range(moves):\n p = p.move(choice(list(p.possible_moves)))\n return p\n\n @staticmethod\n def direction(a, b):\n if a is None:\n return None\n return {\n -a.width: 'U',\n -1: 'L', 0: None, +1: 'R',\n +a.width: 'D',\n }[b.hole - a.hole]\n\n def __str__(self):\n return \"\\n\".join(str(self.board[start : start + self.width])\n for start in range(0, len(self.board), self.width))\n\n def __eq__(self, other):\n return self.board == other.board\n\n def __hash__(self):\n h = 0\n for value, i in enumerate(self.board):\n h ^= value << i\n return h\n\nclass MoveSequence:\n def __init__(self, last, prev_holes=None):\n self.last = last\n self.prev_holes = prev_holes or []\n\n def branch(self, destination):\n return MoveSequence(self.last.move(destination),\n self.prev_holes + [self.last.hole])\n\n def __iter__(self):\n states = [self.last]\n for hole in reversed(self.prev_holes):\n states.append(states[-1].move(hole))\n yield from reversed(states)\n\nclass Solver:\n def __init__(self, start):\n self.start = start\n\n def solve(self):\n queue = deque([MoveSequence(self.start)])\n seen = set([self.start])\n if self.start.solved:\n return queue.pop()\n\n for seq in iter(queue.pop, None):\n for destination in seq.last.possible_moves:\n attempt = seq.branch(destination)\n if attempt.last not in seen:\n seen.add(attempt.last)\n queue.appendleft(attempt)\n if attempt.last.solved:\n return attempt\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\nif __name__ == '__main__':\n board = [[1,2,3],\n [4,0,6],\n [7,5,8]]\n\n puzzle = Puzzle(board).shuffle()\n print(puzzle)\n move_seq = iter(Solver(puzzle).solve())\n for from_state, to_state in pairwise(move_seq):\n print()\n print(Puzzle.direction(from_state, to_state))\n print(to_state)\n\n\n\n'''OUTPUT\n\n[5, 1, 8]\n[4, 0, 6]\n[3, 2, 7]\n\nD\n[5, 1, 8]\n[4, 2, 6]\n[3, 0, 7]\n\nL\n[5, 1, 8]\n[4, 2, 6]\n[0, 3, 7]\n\nU\n[5, 1, 8]\n[0, 2, 6]\n[4, 3, 7]\n\nU\n[0, 1, 8]\n[5, 2, 6]\n[4, 3, 7]\n\nR\n[1, 0, 8]\n[5, 2, 6]\n[4, 3, 7]\n\nD\n[1, 2, 8]\n[5, 0, 6]\n[4, 3, 7]\n\nR\n[1, 2, 8]\n[5, 6, 0]\n[4, 3, 7]\n\nU\n[1, 2, 0]\n[5, 6, 8]\n[4, 3, 7]\n\nL\n[1, 0, 2]\n[5, 6, 8]\n[4, 3, 7]\n\nD\n[1, 6, 2]\n[5, 0, 8]\n[4, 3, 7]\n\nD\n[1, 6, 2]\n[5, 3, 8]\n[4, 0, 7]\n\nR\n[1, 6, 2]\n[5, 3, 8]\n[4, 7, 0]\n\nU\n[1, 6, 2]\n[5, 3, 0]\n[4, 7, 8]\n\nL\n[1, 6, 2]\n[5, 0, 3]\n[4, 7, 8]\n\nU\n[1, 0, 2]\n[5, 6, 3]\n[4, 7, 8]\n\nR\n[1, 2, 0]\n[5, 6, 3]\n[4, 7, 8]\n\nD\n[1, 2, 3]\n[5, 6, 0]\n[4, 7, 8]\n\nL\n[1, 2, 3]\n[5, 0, 6]\n[4, 7, 8]\n\nL\n[1, 2, 3]\n[0, 5, 6]\n[4, 7, 8]\n\nD\n[1, 2, 3]\n[4, 5, 6]\n[0, 7, 8]\n\nR\n[1, 2, 3]\n[4, 5, 6]\n[7, 0, 8]\n\nR\n[1, 2, 3]\n[4, 5, 6]\n[7, 8, 0]\n'''\n"
},
{
"alpha_fraction": 0.6429539322853088,
"alphanum_fraction": 0.6443089246749878,
"avg_line_length": 29.75,
"blob_id": "7e0680aa0d09c110044d7a7dafd89e2d9c3dd727",
"content_id": "a7b2038b0eacb6da6ac0cec734170a142d239b66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1476,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 48,
"path": "/forward_reasoning.py",
"repo_name": "sumukhhk/AI_LAB-1BM17CS106",
"src_encoding": "UTF-8",
"text": "import aima.utils\nimport aima.logic\n\n# The main entry point for this module\ndef main():\n\n # Create an array to hold clauses\n clauses = []\n\n # Add first-order logic clauses (rules and fact)\n clauses.append(aima.utils.expr(\"(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)\"))\n clauses.append(aima.utils.expr(\"Enemy(Nono, America)\"))\n clauses.append(aima.utils.expr(\"Owns(Nono, M1)\"))\n clauses.append(aima.utils.expr(\"Missile(M1)\"))\n clauses.append(aima.utils.expr(\"(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)\"))\n clauses.append(aima.utils.expr(\"American(West)\"))\n clauses.append(aima.utils.expr(\"Missile(x) ==> Weapon(x)\"))\n\n # Create a first-order logic knowledge base (KB) with clauses\n KB = aima.logic.FolKB(clauses)\n\n # Add rules and facts with tell\n KB.tell(aima.utils.expr('Enemy(Coco, America)'))\n KB.tell(aima.utils.expr('Enemy(Jojo, America)'))\n KB.tell(aima.utils.expr(\"Enemy(x, America) ==> Hostile(x)\"))\n\n # Get information from the knowledge base with ask\n hostile = aima.logic.fol_fc_ask(KB, aima.utils.expr('Hostile(x)'))\n criminal = aima.logic.fol_fc_ask(KB, aima.utils.expr('Criminal(x)'))\n\n # Print answers\n print('Hostile?')\n print(list(hostile))\n print('\\nCriminal?')\n print(list(criminal))\n print()\n\n# Tell python to run main method\nif __name__ == \"__main__\": main()\n\n'''\noutput\nHostile?\n[{x: Nono}, {x: Jojo}, {x: Coco}]\n\nCriminal?\n[{x: West}]\n'''\n"
}
] | 5 |
domdfcoding/Photo-Sort | https://github.com/domdfcoding/Photo-Sort | b15e7247dfd50fa1b2ab55e692b3d791bebfbe24 | c1f13ba8934956ebdd4e36e99dd7aadc1c7909ca | 42d7b607e37099c58a6922074c0633c3009511d4 | refs/heads/master | 2023-08-08T20:04:32.817077 | 2022-09-06T14:29:13 | 2022-09-06T14:29:13 | 199,680,175 | 0 | 1 | MIT | 2019-07-30T15:35:36 | 2022-08-19T15:54:48 | 2023-05-22T18:54:00 | Python | [
{
"alpha_fraction": 0.7182080745697021,
"alphanum_fraction": 0.7235067486763,
"avg_line_length": 29.086956024169922,
"blob_id": "ed13ba30667142397b79cc2d3b21f313be8c04f1",
"content_id": "25ac1697d157ea72d064abde2182bb80a98ee888",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2077,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 69,
"path": "/photo_sort/settings_dialog.py",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#\n# SettingsDialog.py\n\"\"\"\nProvides a dialog for configuring settings.\n\"\"\"\n#\n# Copyright © 2014-2021 Dominic Davis-Foster <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n# 3rd party\nimport wx # type: ignore # nodep\n\n__all__ = [\"SettingsDialog\"]\n\n# begin wxGlade: dependencies\n# end wxGlade\n\n# begin wxGlade: extracode\n# end wxGlade\n\n\nclass SettingsDialog(wx.Dialog):\n\t\"\"\"\n\tDialog for configuring settings.\n\t\"\"\"\n\n\t# TODO: docstring for __init__'s arguments\n\n\tdef __init__(self, *args, **kwds):\n\t\t# begin wxGlade: SettingsDialog.__init__\n\t\tkwds[\"style\"] = kwds.get(\"style\", 0) | wx.DEFAULT_DIALOG_STYLE\n\t\twx.Dialog.__init__(self, *args, **kwds)\n\n\t\tself.__set_properties()\n\t\tself.__do_layout()\n\t\t# end wxGlade\n\n\tdef __set_properties(self):\n\t\t# begin wxGlade: SettingsDialog.__set_properties\n\t\tself.SetTitle(\"dialog\")\n\t\t# end wxGlade\n\n\tdef __do_layout(self):\n\t\tself.CreateSeparatedButtonSizer(wx.ID_APPLY | wx.ID_CANCEL)\n\t\t# begin wxGlade: SettingsDialog.__do_layout\n\t\tself.Layout()\n\t\t# end wxGlade\n\n\n# end of class SettingsDialog\n"
},
{
"alpha_fraction": 0.7223984003067017,
"alphanum_fraction": 0.7365235090255737,
"avg_line_length": 32.355770111083984,
"blob_id": "41cc0e93341eeda9d85281f5674a16d75a80dbd6",
"content_id": "5d5ccb1d2b14c0504f44314904c259e2f5d4a478",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 3469,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 104,
"path": "/README.rst",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "###########\nphoto-sort\n###########\n\n.. start short_desc\n\n**A program for sorting photographs into folders.**\n\n.. end short_desc\n\n\n.. start shields\n\n.. list-table::\n\t:stub-columns: 1\n\t:widths: 10 90\n\n\t* - Docs\n\t - |docs| |docs_check|\n\t* - Tests\n\t - |actions_linux| |actions_windows| |actions_macos|\n\t* - Activity\n\t - |commits-latest| |commits-since| |maintained|\n\t* - QA\n\t - |codefactor| |actions_flake8| |actions_mypy|\n\t* - Other\n\t - |license| |language| |requires|\n\n.. |docs| image:: https://img.shields.io/readthedocs/photo-sort/latest?logo=read-the-docs\n\t:target: https://photo-sort.readthedocs.io/en/latest\n\t:alt: Documentation Build Status\n\n.. |docs_check| image:: https://github.com/domdfcoding/photo-sort/workflows/Docs%20Check/badge.svg\n\t:target: https://github.com/domdfcoding/photo-sort/actions?query=workflow%3A%22Docs+Check%22\n\t:alt: Docs Check Status\n\n.. |actions_linux| image:: https://github.com/domdfcoding/photo-sort/workflows/Linux/badge.svg\n\t:target: https://github.com/domdfcoding/photo-sort/actions?query=workflow%3A%22Linux%22\n\t:alt: Linux Test Status\n\n.. |actions_windows| image:: https://github.com/domdfcoding/photo-sort/workflows/Windows/badge.svg\n\t:target: https://github.com/domdfcoding/photo-sort/actions?query=workflow%3A%22Windows%22\n\t:alt: Windows Test Status\n\n.. |actions_macos| image:: https://github.com/domdfcoding/photo-sort/workflows/macOS/badge.svg\n\t:target: https://github.com/domdfcoding/photo-sort/actions?query=workflow%3A%22macOS%22\n\t:alt: macOS Test Status\n\n.. |actions_flake8| image:: https://github.com/domdfcoding/photo-sort/workflows/Flake8/badge.svg\n\t:target: https://github.com/domdfcoding/photo-sort/actions?query=workflow%3A%22Flake8%22\n\t:alt: Flake8 Status\n\n.. |actions_mypy| image:: https://github.com/domdfcoding/photo-sort/workflows/mypy/badge.svg\n\t:target: https://github.com/domdfcoding/photo-sort/actions?query=workflow%3A%22mypy%22\n\t:alt: mypy status\n\n.. |requires| image:: https://dependency-dash.repo-helper.uk/github/domdfcoding/photo-sort/badge.svg\n\t:target: https://dependency-dash.repo-helper.uk/github/domdfcoding/photo-sort/\n\t:alt: Requirements Status\n\n.. |codefactor| image:: https://img.shields.io/codefactor/grade/github/domdfcoding/photo-sort?logo=codefactor\n\t:target: https://www.codefactor.io/repository/github/domdfcoding/photo-sort\n\t:alt: CodeFactor Grade\n\n.. |license| image:: https://img.shields.io/github/license/domdfcoding/photo-sort\n\t:target: https://github.com/domdfcoding/photo-sort/blob/master/LICENSE\n\t:alt: License\n\n.. |language| image:: https://img.shields.io/github/languages/top/domdfcoding/photo-sort\n\t:alt: GitHub top language\n\n.. |commits-since| image:: https://img.shields.io/github/commits-since/domdfcoding/photo-sort/v0.0.0\n\t:target: https://github.com/domdfcoding/photo-sort/pulse\n\t:alt: GitHub commits since tagged version\n\n.. |commits-latest| image:: https://img.shields.io/github/last-commit/domdfcoding/photo-sort\n\t:target: https://github.com/domdfcoding/photo-sort/commit/master\n\t:alt: GitHub last commit\n\n.. |maintained| image:: https://img.shields.io/maintenance/yes/2022\n\t:alt: Maintenance\n\n.. end shields\n\nInstallation\n--------------\n\n.. start installation\n\n``photo-sort`` can be installed from GitHub.\n\nTo install with ``pip``:\n\n.. code-block:: bash\n\n\t$ python -m pip install git+https://github.com/domdfcoding/photo-sort\n\n.. end installation\n\nYou'll also need ``exiftool`` installed. On Debian/Ubuntu:\n\n.. code-block:: bash\n\n\t$ sudo apt install exiftool\n"
},
{
"alpha_fraction": 0.6419597864151001,
"alphanum_fraction": 0.6570351719856262,
"avg_line_length": 28.157508850097656,
"blob_id": "6402a8e6a855cceee0ce207905533682b288a431",
"content_id": "739af00463caf55815f0d7641ce21d51c383c623",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7961,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 273,
"path": "/photo_sort/manage_cameras.py",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#\n# manage_cameras.py\n\"\"\"\nProvides a class to manage the mapping of camera IDs to human-readable names.\n\"\"\"\n#\n# Copyright © 2014-2021 Dominic Davis-Foster <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n# stdlib\nimport json\nfrom typing import Dict\n\n# 3rd party\nimport exifread # type: ignore\nimport exiftool # type: ignore\nimport wx # type: ignore # nodep\nimport wx.grid # type: ignore # nodep\n\n__all__ = [\"manage_cameras\"]\n\n# begin wxGlade: dependencies\n# end wxGlade\n\n# begin wxGlade: extracode\n# end wxGlade\n\n\nclass ManageCameras(wx.Dialog):\n\t\"\"\"\n\tClass to manage the mapping of camera IDs to human-readable names.\n\n\t:param parent:\n\t:param id:\n\t:param title:\n\t:param pos:\n\t:param size:\n\t:param style:\n\t:param name:\n\t:param data:\n\t\"\"\"\n\n\t# TODO: docstring for __init__'s arguments\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tparent,\n\t\t\tid=wx.ID_ANY, # noqa: A002 # pylint: disable=redefined-builtin\n\t\t\ttitle='',\n\t\t\tpos=wx.DefaultPosition,\n\t\t\tsize=wx.DefaultSize,\n\t\t\tstyle=wx.DEFAULT_DIALOG_STYLE,\n\t\t\tname=wx.DialogNameStr,\n\t\t\tdata=None\n\t\t\t):\n\n\t\tif not data:\n\t\t\tdata = {}\n\n\t\targs = (parent, id)\n\t\tkwds = {\n\t\t\t\t\"title\": title,\n\t\t\t\t\"pos\": pos,\n\t\t\t\t\"size\": size,\n\t\t\t\t\"style\": style,\n\t\t\t\t\"name\": name,\n\t\t\t\t}\n\n\t\t# begin wxGlade: manage_cameras.__init__\n\t\tkwds[\"style\"] = kwds.get(\"style\", 0) | wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER\n\t\twx.Dialog.__init__(self, *args, **kwds)\n\t\tself.SetSize((400, 328))\n\t\tself.add_btn = wx.Button(self, wx.ID_ADD, '')\n\t\tself.add_from_btn = wx.Button(self, wx.ID_ANY, \"Add from Image\")\n\t\tself.remove_btn = wx.Button(self, wx.ID_REMOVE, '')\n\t\tself.grid_1 = wx.grid.Grid(self, wx.ID_ANY, size=(1, 1))\n\t\tself.cancel_btn = wx.Button(self, wx.ID_CANCEL, '')\n\t\tself.apply_btn = wx.Button(self, wx.ID_APPLY, '')\n\n\t\tself.__set_properties()\n\t\tself.__do_layout()\n\n\t\tself.Bind(wx.EVT_BUTTON, self.do_add, self.add_btn)\n\t\tself.Bind(wx.EVT_BUTTON, self.do_add_from, self.add_from_btn)\n\t\tself.Bind(wx.EVT_BUTTON, self.do_remove, self.remove_btn)\n\t\tself.Bind(wx.EVT_BUTTON, self.do_apply, self.apply_btn)\n\t\t# end wxGlade\n\t\tself.grid_1.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.on_select_cell)\n\t\tself.grid_1.DeleteRows(1, 10)\n\n\t\trow = 0\n\n\t\tfor camera in data:\n\t\t\tself.grid_1.SetCellValue(row, 0, camera)\n\t\t\tself.grid_1.SetCellValue(row, 1, data[camera])\n\t\t\trow += 1\n\t\t\tself.grid_1.AppendRows()\n\n\t\tself.cur_row = 0\n\t\tself.cur_col = 0\n\n\t\tself.grid_1.GoToCell(0, 0)\n\t\tself.grid_1.SetFocus()\n\n\tdef __set_properties(self) -> None:\n\t\t# begin wxGlade: manage_cameras.__set_properties\n\t\tself.SetTitle(\"Manage Cameras\")\n\t\tself.SetSize((400, 328))\n\t\tself.grid_1.CreateGrid(10, 2)\n\t\tself.grid_1.SetColLabelValue(0, \"Exif Camera\")\n\t\tself.grid_1.SetColSize(0, 180)\n\t\tself.grid_1.SetColLabelValue(1, \"Pretty Name\")\n\t\tself.grid_1.SetColSize(1, 180)\n\t\t# end wxGlade\n\t\tself.grid_1.HideRowLabels()\n\n\tdef __do_layout(self) -> None:\n\t\t# begin wxGlade: manage_cameras.__do_layout\n\t\tsizer_3 = wx.BoxSizer(wx.VERTICAL)\n\t\tsizer_7 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsizer_4 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsizer_4.Add(self.add_btn, 0, wx.ALIGN_CENTER | wx.RIGHT, 3)\n\t\tsizer_4.Add(self.add_from_btn, 0, wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, 3)\n\t\tsizer_4.Add(self.remove_btn, 0, wx.ALIGN_CENTER | wx.LEFT, 3)\n\t\tsizer_3.Add(sizer_4, 0, wx.ALIGN_CENTER | wx.BOTTOM | wx.TOP, 10)\n\t\tsizer_3.Add(self.grid_1, 1, wx.EXPAND, 0)\n\t\tsizer_7.Add(self.cancel_btn, 0, wx.RIGHT, 7)\n\t\tsizer_7.Add(self.apply_btn, 0, wx.RIGHT, 7)\n\t\tsizer_3.Add(sizer_7, 0, wx.ALIGN_RIGHT | wx.ALL, 10)\n\t\tself.SetSizer(sizer_3)\n\t\tself.Layout()\n\t\t# end wxGlade\n\n\tdef do_apply(self, event) -> None: # wxGlade: manage_cameras.<event_handler>\n\t\tif self.Validate() and self.TransferDataFromWindow():\n\n\t\t\tif self.IsModal():\n\t\t\t\tself.EndModal(wx.ID_APPLY)\n\t\t\telse:\n\t\t\t\tself.SetReturnCode(wx.ID_APPLY)\n\t\t\t\tself.Show(False)\n\n\t\tevent.Skip()\n\n\tdef do_add(self, event) -> None: # wxGlade: manage_cameras.<event_handler>\n\t\tself.grid_1.AppendRows()\n\t\tevent.Skip()\n\n\tdef do_remove(self, event) -> None: # wxGlade: manage_cameras.<event_handler>\n\t\tprint(self.grid_1.GetSelectedCells())\n\t\tprint(self.cur_row)\n\t\tif self.grid_1.GetNumberRows() == 1:\n\t\t\tprint(\"You can not remove the last entry!\")\n\t\t\treturn\n\t\tself.grid_1.DeleteRows(self.cur_row)\n\t\tself.grid_1.GoToCell(self.cur_row, self.cur_col)\n\t\tself.grid_1.SetFocus()\n\n\t\tevent.Skip()\n\n\tdef on_select_cell(self, event) -> None:\n\t\tself.cur_row = event.GetRow()\n\t\tself.cur_col = event.GetCol()\n\t\tprint(self.cur_col, self.cur_row)\n\t\tevent.Skip()\n\n\tdef get_data(self) -> Dict:\n\t\tdata = {}\n\t\tfor row in range(self.grid_1.GetNumberRows()):\n\t\t\texif_camera = self.grid_1.GetCellValue(row, 0)\n\t\t\tpretty_name = self.grid_1.GetCellValue(row, 1)\n\t\t\tif any([pretty_name == '', exif_camera == '']):\n\t\t\t\tcontinue\n\t\t\tdata[exif_camera] = pretty_name\n\t\treturn data\n\n\tdef do_add_from(self, event) -> None: # wxGlade: manage_cameras.<event_handler>\n\n\t\twith wx.FileDialog(\n\t\t\t\tself,\n\t\t\t\t\"Open Image File\", # wildcard=\"JPEG files (*.jpg;*.jpeg;*.JPG)|*.jpg;*.jpeg;*.JPG\",\n\t\t\t\tstyle=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST\n\t\t\t\t) as fileDialog:\n\n\t\t\tif fileDialog.ShowModal() == wx.ID_CANCEL:\n\t\t\t\treturn # the user changed their mind\n\n\t\t\t# print(style)\n\t\t\t# print(wx.FD_MULTIPLE in style)\n\n\t\t\tpathname = fileDialog.GetPath()\n\n\t\t\tprint(pathname)\n\n\t\t\ttry:\n\t\t\t\twith open(pathname, \"rb\") as file:\n\t\t\t\t\tdata = exifread.process_file(file)\n\t\t\t\t\texif_camera = str(data[\"Image Model\"])\n\n\t\t\texcept (OSError, KeyError):\n\t\t\t\t# wx.MessageDialog(self, f\"Cannot open file '{pathname}'.\", \"Error\",\n\t\t\t\t# \t\t\t\t style=wx.OK | wx.ICON_ERROR).ShowModal()\n\t\t\t\t# Video File\n\n\t\t\t\twith exiftool.ExifTool() as et:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = et.get_metadata(pathname)\n\t\t\t\t\texcept json.decoder.JSONDecodeError:\n\t\t\t\t\t\twx.MessageDialog(\n\t\t\t\t\t\t\t\tself, f\"Cannot open file '{pathname}'.\", \"Error\", style=wx.OK | wx.ICON_ERROR\n\t\t\t\t\t\t\t\t).ShowModal()\n\n\t\t\t\t\t# Video files, Canon\n\t\t\t\t\ttry:\n\t\t\t\t\t\texif_camera = str(data[\"EXIF:Model\"])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t# Video files, Panasonic\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\texif_camera = str(data[\"MakerNotes:Model\"])\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t# Video files, GoPro 7\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\texif_camera = str(data[\"QuickTime:Model\"])\n\t\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\texif_camera = str(data[\"QuickTime:LensSerialNumber\"])\n\t\t\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t\t\twx.MessageDialog(\n\t\t\t\t\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\t\t\t\t\tf\"Cannot parse EXIF data from file '{pathname}'.\",\n\t\t\t\t\t\t\t\t\t\t\t\"Error\",\n\t\t\t\t\t\t\t\t\t\t\tstyle=wx.OK | wx.ICON_ERROR\n\t\t\t\t\t\t\t\t\t\t\t).ShowModal()\n\t\t\t\t\t\t\t\t\tevent.Skip()\n\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\tprint(exif_camera)\n\n\t\t\t\t\tfor row in range(self.grid_1.GetNumberRows()):\n\t\t\t\t\t\tif exif_camera == self.grid_1.GetCellValue(row, 0):\n\t\t\t\t\t\t\twx.MessageDialog(\n\t\t\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\t\t\tf\"The camera '{exif_camera}' is already in the table.\",\n\t\t\t\t\t\t\t\t\t\"Error\",\n\t\t\t\t\t\t\t\t\tstyle=wx.OK | wx.ICON_ERROR\n\t\t\t\t\t\t\t\t\t).ShowModal()\n\t\t\t\t\t\t\treturn\n\t\t\t\tself.grid_1.SetCellValue(self.grid_1.GetNumberRows() - 1, 0, exif_camera)\n\t\t\t\tself.grid_1.AppendRows()\n\n\t\tevent.Skip()\n\n\n# end of class manage_cameras\n"
},
{
"alpha_fraction": 0.6597784161567688,
"alphanum_fraction": 0.6699616312980652,
"avg_line_length": 29.12837028503418,
"blob_id": "bee578cdb44b06593d5048e76f1260686864c594",
"content_id": "7edd07dc17017ee086f28f9bd31168677363ad62",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23471,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 779,
"path": "/photo_sort/launcher.py",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#\n# Launcher.py\n\"\"\"\nProvides the main window class, and its background worker thread.\n\"\"\"\n#\n# Copyright © 2014-2021 Dominic Davis-Foster <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n# stdlib\nimport json\nimport os\nimport shutil\nfrom datetime import timedelta\nfrom threading import Event, Thread\nfrom typing import Dict, List\n\n# 3rd party\nimport exifread # type: ignore\nimport exiftool # type: ignore\nimport wx # type: ignore # nodep\nfrom domdf_python_tools.paths import maybe_make\nfrom domdf_wxpython_tools.events import SimpleEvent # type: ignore # TODO\nfrom domdf_wxpython_tools.picker import dir_picker # type: ignore\nfrom domdf_wxpython_tools.timer_thread import Timer, timer_event # type: ignore\n\n# this package\nfrom photo_sort.errors import ExifError\nfrom photo_sort.manage_cameras import ManageCameras\nfrom photo_sort.settings_dialog import SettingsDialog\n\n__all__ = [\"Worker\", \"Launcher\"]\n\n# begin wxGlade: dependencies\n# end wxGlade\n\n# begin wxGlade: extracode\n# end wxGlade\n\nprogress_event = SimpleEvent(\"Progress\")\nsorting_done = SimpleEvent(\"Done\")\n\nworker_thread_running = False\n\nmode_copy = 0\nmode_move = 1\n\n########################################################################\n\n\nclass Worker(Thread):\n\t\"\"\"\n\tWorker Thread for performing sorting.\n\n\tIncludes code from https://gist.github.com/samarthbhargav/5a515a399f7113137331\n\n\n\t:param parent: Class to send event updates to\n\t:param filelist: List of file paths to sort\n\t:param destination: Base directory to sort files into\n\t:param mode: Whether to copy or move the files, default Copy\n\t:param within_dirs: Whether to sort within directories, default False\n\t:param by_datetime: Whether to sort by date and time, default False (i.e. just by date)\n\t:param by_camera: Whether to sort by camera name, default False\n\t\"\"\"\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tparent: wx.Window,\n\t\t\tfilelist: List,\n\t\t\tdestination: str,\n\t\t\tmode: int = mode_copy,\n\t\t\twithin_dirs: bool = False,\n\t\t\tby_datetime: bool = False,\n\t\t\tby_camera: bool = False\n\t\t\t):\n\t\tself._stopevent = Event()\n\t\tThread.__init__(self, name=\"WorkerThread\")\n\t\tself._parent = parent\n\t\tglobal worker_thread_running\n\t\tworker_thread_running = True\n\n\t\tprint(f\"Destination: {destination}\")\n\t\tif mode == mode_copy:\n\t\t\tprint(\"Mode: Copy\")\n\t\telif mode == mode_move:\n\t\t\tprint(\"Mode: Move\")\n\t\telse:\n\t\t\tprint(\"Unknown mode. Defaulting to 'Copy'\")\n\t\t\tmode = mode_copy\n\n\t\tprint(f\"Sort Within Directories: {within_dirs}\")\n\t\tprint(f\"Sort by Date and Time: {by_datetime}\")\n\t\tprint(f\"Sort by Camera: {by_camera}\")\n\n\t\tself.destination = destination\n\t\tself.mode = mode\n\t\tself.within_dirs = within_dirs\n\t\tself.by_datetime = by_datetime\n\t\tself.by_camera = by_camera\n\t\tself.filelist = filelist\n\n\t@staticmethod\n\tdef parse_date(data: Dict) -> str:\n\t\t\"\"\"\n\t\tDetermine the date the photograph was taken from its EXIF data.\n\n\t\t:param data: EXIF data to find the date from.\n\t\t\"\"\"\n\n\t\tprint(data)\n\t\ttry:\n\t\t\tdate = str(data[\"EXIF DateTimeOriginal\"])[:10]\n\t\t\tdate = date.replace(':', '_').replace(' ', '_')\n\t\texcept KeyError:\n\n\t\t\ttry:\n\t\t\t\tprint(\"attempt 2\")\n\t\t\t\tdate = str(data[\"Image DateTime\"])[:10]\n\t\t\t\tdate = date.replace(':', '_').replace(' ', '_')\n\t\t\texcept KeyError:\n\t\t\t\ttry:\n\t\t\t\t\tprint(\"attempt 3 - video file\")\n\t\t\t\t\tdate = str(data[\"EXIF:DateTimeOriginal\"])[:10]\n\t\t\t\t\tdate = date.replace(':', '_').replace(' ', '_')\n\t\t\t\texcept KeyError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint(\"attempt 4 - video file\")\n\t\t\t\t\t\tdate = str(data[\"QuickTime:MediaCreateDate\"])[:10]\n\t\t\t\t\t\tdate = date.replace(':', '_').replace(' ', '_')\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\treturn ExifError().parse_error()\n\n\t\treturn date\n\n\tdef parse_camera(self, data: Dict) -> str:\n\t\t\"\"\"\n\t\tDetermine the camera the photograph was taken with its EXIF data.\n\n\t\t:param data: EXIF data to find the camera from.\n\t\t\"\"\"\n\n\t\tcamera = ''\n\n\t\tif self.by_camera:\n\t\t\ttry:\n\t\t\t\traw_camera = str(data[\"Image Model\"])\n\t\t\t\tif raw_camera in self._parent.cameras:\n\t\t\t\t\tcamera = self._parent.cameras[raw_camera]\n\t\t\t\telse:\n\t\t\t\t\tcamera = raw_camera\n\t\t\texcept KeyError:\n\t\t\t\t# Video files, Canon\n\t\t\t\ttry:\n\t\t\t\t\traw_camera = str(data[\"EXIF:Model\"])\n\t\t\t\t\tif raw_camera in self._parent.cameras:\n\t\t\t\t\t\tcamera = self._parent.cameras[raw_camera]\n\t\t\t\t\telse:\n\t\t\t\t\t\tcamera = raw_camera\n\t\t\t\texcept KeyError:\n\t\t\t\t\t# Video files, Panasonic\n\t\t\t\t\ttry:\n\t\t\t\t\t\traw_camera = str(data[\"MakerNotes:Model\"])\n\t\t\t\t\t\tif raw_camera in self._parent.cameras:\n\t\t\t\t\t\t\tcamera = self._parent.cameras[raw_camera]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcamera = raw_camera\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t# Video files, GoPro 7\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\traw_camera = str(data[\"QuickTime:Model\"])\n\t\t\t\t\t\t\tif raw_camera in self._parent.cameras:\n\t\t\t\t\t\t\t\tcamera = self._parent.cameras[raw_camera]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcamera = raw_camera\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\traw_camera = str(data[\"QuickTime:LensSerialNumber\"])\n\t\t\t\t\t\t\t\tif raw_camera in self._parent.cameras:\n\t\t\t\t\t\t\t\t\tcamera = self._parent.cameras[raw_camera]\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcamera = raw_camera\n\t\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t\tpass\n\n\t\treturn camera\n\n\tdef run(self) -> None:\n\t\t\"\"\"\n\t\tRun the worker thread.\n\t\t\"\"\"\n\n\t\tprint(\"Working...\")\n\n\t\tfor filepath in self.filelist:\n\n\t\t\tif self._stopevent.is_set():\n\t\t\t\treturn\n\n\t\t\terror = False\n\n\t\t\tpath_length = 80\n\n\t\t\tif len(filepath) > path_length:\n\t\t\t\tfilename_string = \"...\" + filepath[-path_length:]\n\t\t\telse:\n\t\t\t\tfilename_string = filepath + ' ' * (path_length - len(filepath))\n\n\t\t\tprint(f'\\r{filename_string}', end='')\n\n\t\t\ttry:\n\t\t\t\tfile = open(filepath, \"rb\")\n\t\t\texcept BaseException:\n\t\t\t\tExifError().open_error().show(filename_string)\n\t\t\t\terror = True\n\t\t\t\tcontinue\n\n\t\t\t# get the tags\n\t\t\tdata = exifread.process_file(file, details=False, debug=False)\n\n\t\t\twith exiftool.ExifTool() as et:\n\t\t\t\ttry:\n\t\t\t\t\tmetadata = et.get_metadata(filepath)\n\t\t\t\texcept json.decoder.JSONDecodeError:\n\t\t\t\t\tmetadata = None\n\n\t\t\t\t# using exiftool as a backup for video files\n\n\t\t\tif not data:\n\t\t\t\tdata = metadata\n\t\t\t\tif not metadata:\n\t\t\t\t\terror = True\n\t\t\t\t\tExifError().no_data().show(filename_string)\n\t\t\t\"\"\"try:\n\t\t\t\tdate = str(data['Image DateTime'])[:10]\n\t\t\t\tdate = date.replace(':', '_').replace(' ', '_')\n\t\t\texcept KeyError:\n\n\t\t\t\ttry:\n\t\t\t\t\tprint(\"attempt 2\")\n\t\t\t\t\tdate = str(data[\"EXIF DateTimeOriginal\"])[:10]\n\t\t\t\t\tdate = date.replace(':', '_').replace(' ', '_')\n\t\t\t\texcept KeyError:\n\t\t\t\t\tprint(f\"\\r'{filename_string}': Unable to parse EXIF data.\\n\")\n\t\t\t\t\terror = True\"\"\"\n\n\t\t\tdate = self.parse_date(data)\n\t\t\tif isinstance(date, ExifError):\n\t\t\t\terror = True\n\t\t\t\tdate.show(filename_string)\n\n\t\t\tif not error:\n\n\t\t\t\tcamera = self.parse_camera(data)\n\n\t\t\t\tdestination_path = os.path.join(self.destination, date, camera)\n\t\t\t\tmaybe_make(destination_path, parents=True)\n\n\t\t\t\t# print(f\"{date} {camera} -> {destination_path} \")\n\n\t\t\t\tdestination_filename = os.path.split(filepath)[-1]\n\n\t\t\t\t# If file already exists, add a (number) to the end of the filename\n\t\t\t\tif os.path.isfile(os.path.join(destination_path, destination_filename)):\n\t\t\t\t\t# TODO: Use filecmp to see if files are identical\n\n\t\t\t\t\tnum = 1\n\t\t\t\t\twhile True:\n\t\t\t\t\t\t# Determine the fist available duplicate number\n\t\t\t\t\t\tbase_filename, extension = os.path.splitext(destination_filename)\n\t\t\t\t\t\tif not os.path.isfile(f\"{base_filename} ({num}){extension}\"):\n\t\t\t\t\t\t\tdestination_filename = f\"{base_filename} ({num}){extension}\"\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tnum += 1\n\n\t\t\t\tprint(f\"{date} {camera} -> {os.path.join(destination_path, destination_filename)} \")\n\n\t\t\t\tif self.mode == mode_copy:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copy2(filepath, os.path.join(destination_path, destination_filename))\n\t\t\t\t\texcept BaseException:\n\t\t\t\t\t\tprint(f\"\\r'{filename_string}': Could not copy file.\\n\")\n\t\t\t\t\t\tExifError().copy_error().show(filename_string)\n\n\t\t\t\telif self.mode == mode_move:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.move(filepath, os.path.join(destination_path, destination_filename))\n\t\t\t\t\texcept BaseException:\n\t\t\t\t\t\tExifError().move_error().show(filename_string)\n\n\t\t\tfile.close()\n\n\t\t\tif not self._stopevent.is_set():\n\t\t\t\t# evt = ProgressEvent(myEVT_PROGRESS, -1)\n\t\t\t\t# wx.PostEvent(self._parent, evt)\n\t\t\t\tprogress_event.trigger()\n\n\t\tglobal worker_thread_running\n\t\tworker_thread_running = False\n\n\t\t# evt = CompletionEvent(myEVT_DONE, -1)\n\t\t# wx.PostEvent(self._parent, evt)\n\t\tsorting_done.trigger()\n\n\tdef join(self, timeout=None):\n\t\t\"\"\"\n\t\tStop the thread and wait for it to end.\n\n\t\t:param timeout:\n\t\t\"\"\"\n\n\t\tself._stopevent.set()\n\t\tThread.join(self, timeout)\n\n\n########################################################################\n\n\nclass Launcher(wx.Frame):\n\t\"\"\"\n\tMain window for Photo Sort.\n\t\"\"\"\n\n\tdef __init__(self, *args, **kwds):\n\t\t# begin wxGlade: Launcher.__init__\n\t\tkwds[\"style\"] = kwds.get(\"style\", 0) | wx.DEFAULT_FRAME_STYLE\n\t\twx.Frame.__init__(self, *args, **kwds)\n\t\tself.SetSize((600, 528))\n\t\tself.panel_1 = wx.Panel(self, wx.ID_ANY)\n\t\tself.source_dir_picker = dir_picker(self.panel_1, wx.ID_ANY)\n\t\tself.destination_dir_picker = dir_picker(self.panel_1, wx.ID_ANY)\n\t\tself.copy_radio_btn = wx.RadioButton(self.panel_1, wx.ID_ANY, \"Copy\", style=wx.RB_GROUP)\n\t\tself.move_radio_btn = wx.RadioButton(self.panel_1, wx.ID_ANY, \"Move\")\n\t\tself.within_dirs_checkbox = wx.CheckBox(self.panel_1, wx.ID_ANY, \"Sort Within Directories\")\n\t\tself.datetime_checkbox = wx.CheckBox(self.panel_1, wx.ID_ANY, \"Sort by Date and Time\")\n\t\tself.camera_checkbox = wx.CheckBox(self.panel_1, wx.ID_ANY, \"Sort by Camera\")\n\t\tself.manage_cameras_btn = wx.Button(self.panel_1, wx.ID_ANY, \"Manage Cameras\")\n\t\tself.progress_gauge = wx.Gauge(self.panel_1, wx.ID_ANY, 10)\n\t\tself.cancel_btn = wx.Button(self, wx.ID_ANY, \"Close\")\n\t\tself.sort_btn = wx.Button(self, wx.ID_ANY, \"Sort\")\n\n\t\t# Menu Bar\n\t\tself.Launcher_menubar = wx.MenuBar()\n\t\twxglade_tmp_menu = wx.Menu()\n\t\titem = wxglade_tmp_menu.Append(wx.ID_ANY, \"Settings FIle\", '')\n\t\tself.Bind(wx.EVT_MENU, self.set_settings_file, id=item.GetId())\n\t\tself.Launcher_menubar.Append(wxglade_tmp_menu, \"FIle\")\n\t\tself.SetMenuBar(self.Launcher_menubar)\n\t\t# Menu Bar end\n\n\t\tself.__set_properties()\n\t\tself.__do_layout()\n\n\t\tself.Bind(wx.EVT_CHECKBOX, self.within_dirs_clicked, self.within_dirs_checkbox)\n\t\tself.Bind(wx.EVT_BUTTON, self.do_manage_cameras, self.manage_cameras_btn)\n\t\tself.Bind(wx.EVT_BUTTON, self.on_cancel, self.cancel_btn)\n\t\tself.Bind(wx.EVT_BUTTON, self.sort_handler, self.sort_btn)\n\t\t# end wxGlade\n\n\t\t# Bind Events\n\t\tprogress_event.set_receiver(self)\n\t\tprogress_event.Bind(self.increase_file_count)\n\n\t\tsorting_done.set_receiver(self)\n\t\tsorting_done.Bind(self.on_sort_done)\n\n\t\tself.Bind(wx.EVT_CLOSE, self.on_close)\n\n\t\ttimer_event.set_receiver(self)\n\t\ttimer_event.Bind(self.update_time_elapsed)\n\n\tdef __set_properties(self):\n\t\t# begin wxGlade: Launcher.__set_properties\n\t\tself.SetTitle(\"Sort Photographs\")\n\t\tself.source_dir_picker.SetMinSize((10000000, -1))\n\t\tself.copy_radio_btn.SetValue(1)\n\t\tself.camera_checkbox.SetValue(1)\n\t\t# end wxGlade\n\n\t\t# Load camera and directories settings\n\n\t\ttry:\n\t\t\twith open(\"settings.json\") as f:\n\t\t\t\tself.cameras, directories = json.load(f)\n\t\t\t\tself.source_dir_picker.SetInitialValue(directories[\"Source\"])\n\t\t\t\tself.destination_dir_picker.SetInitialValue(directories[\"Destination\"])\n\t\texcept FileNotFoundError:\n\t\t\tself.source_dir_picker.SetInitialValue(os.path.abspath(\"To Sort\"))\n\t\t\tself.destination_dir_picker.SetInitialValue(os.path.abspath(\"By Date\"))\n\n\t\tself.source_dir_picker.ResetValue()\n\t\tself.destination_dir_picker.ResetValue()\n\t\tself.sort_btn.SetFocus()\n\n\tdef __do_layout(self):\n\t\t# begin wxGlade: Launcher.__do_layout\n\t\tsizer_1 = wx.BoxSizer(wx.VERTICAL)\n\t\tsizer_7 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsizer_2 = wx.BoxSizer(wx.VERTICAL)\n\t\tprogress_text = wx.BoxSizer(wx.HORIZONTAL)\n\t\tgrid_sizer_1 = wx.FlexGridSizer(2, 2, 0, 0)\n\t\theader_text = wx.StaticText(self.panel_1, wx.ID_ANY, \"Sort Photographs By Date\")\n\t\theader_text.SetFont(\n\t\t\t\twx.Font(20, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, 0, \"Ubuntu\")\n\t\t\t\t)\n\t\tsizer_2.Add(header_text, 0, 0, 0)\n\t\tstatic_line_1 = wx.StaticLine(self.panel_1, wx.ID_ANY)\n\t\tsizer_2.Add(static_line_1, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 5)\n\t\tsource_label = wx.StaticText(self.panel_1, wx.ID_ANY, \"Source Directory: \")\n\t\tgrid_sizer_1.Add(source_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.BOTTOM | wx.TOP, 3)\n\t\tgrid_sizer_1.Add(self.source_dir_picker, 1, wx.ALIGN_CENTER_VERTICAL | wx.BOTTOM | wx.EXPAND | wx.TOP, 3)\n\t\tdestination_label = wx.StaticText(self.panel_1, wx.ID_ANY, \"Destination Directory: \")\n\t\tgrid_sizer_1.Add(destination_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.BOTTOM | wx.TOP, 3)\n\t\tgrid_sizer_1.Add(\n\t\t\t\tself.destination_dir_picker, 1, wx.ALIGN_CENTER_VERTICAL | wx.BOTTOM | wx.EXPAND | wx.TOP, 3\n\t\t\t\t)\n\t\tsizer_2.Add(grid_sizer_1, 1, wx.EXPAND, 0)\n\t\tstatic_line_2 = wx.StaticLine(self.panel_1, wx.ID_ANY)\n\t\tsizer_2.Add(static_line_2, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 5)\n\t\tmove_or_copy_label = wx.StaticText(self.panel_1, wx.ID_ANY, \"Would you like to copy or move the images?\")\n\t\tsizer_2.Add(move_or_copy_label, 0, 0, 0)\n\t\tsizer_2.Add(self.copy_radio_btn, 0, wx.BOTTOM | wx.TOP, 3)\n\t\tsizer_2.Add(self.move_radio_btn, 0, wx.BOTTOM | wx.TOP, 3)\n\t\tstatic_line_3 = wx.StaticLine(self.panel_1, wx.ID_ANY)\n\t\tsizer_2.Add(static_line_3, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 5)\n\t\tother_options_label = wx.StaticText(self.panel_1, wx.ID_ANY, \"Other Options\")\n\t\tsizer_2.Add(other_options_label, 0, 0, 0)\n\t\tsizer_2.Add(self.within_dirs_checkbox, 0, wx.BOTTOM | wx.TOP, 3)\n\t\tsizer_2.Add(self.datetime_checkbox, 0, wx.BOTTOM | wx.TOP, 3)\n\t\tsizer_2.Add(self.camera_checkbox, 0, wx.BOTTOM | wx.TOP, 3)\n\t\tsizer_2.Add(self.manage_cameras_btn, 0, wx.LEFT, 5)\n\t\tstatic_line_4 = wx.StaticLine(self.panel_1, wx.ID_ANY)\n\t\tsizer_2.Add(static_line_4, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 10)\n\t\tsizer_2.Add(self.progress_gauge, 0, wx.EXPAND, 0)\n\t\tfile_count_text = wx.StaticText(self.panel_1, wx.ID_ANY, \"Ready to Sort\", style=wx.ALIGN_LEFT)\n\t\tprogress_text.Add(file_count_text, 1, wx.EXPAND, 0)\n\t\ttime_elapsed_text = wx.StaticText(self.panel_1, wx.ID_ANY, \"0:00:00\", style=wx.ALIGN_RIGHT)\n\t\tprogress_text.Add(time_elapsed_text, 1, wx.EXPAND, 0)\n\t\tsizer_2.Add(progress_text, 1, wx.EXPAND, 0)\n\t\tstatic_line_5 = wx.StaticLine(self.panel_1, wx.ID_ANY)\n\t\tsizer_2.Add(static_line_5, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 5)\n\t\tself.panel_1.SetSizer(sizer_2)\n\t\tsizer_1.Add(self.panel_1, 1, wx.ALL | wx.EXPAND, 10)\n\t\tsizer_7.Add(self.cancel_btn, 0, wx.RIGHT, 7)\n\t\tsizer_7.Add(self.sort_btn, 0, wx.RIGHT, 7)\n\t\tsizer_1.Add(sizer_7, 0, wx.ALIGN_RIGHT | wx.BOTTOM | wx.LEFT | wx.RIGHT, 10)\n\t\tself.SetSizer(sizer_1)\n\t\tself.Layout()\n\t\t# end wxGlade\n\n\t\tself.file_count_text = file_count_text\n\t\tself.max_file_count = 0\n\t\tself.current_file_count = 0\n\n\t\tself.time_elapsed_text = time_elapsed_text\n\t\tself.elapsed_time = 0\n\n\tdef set_max_file_count(self, value: int):\n\t\t\"\"\"\n\t\tSet the total number of files to be sorted.\n\n\t\t:param value: total number of files to be sorted.\n\t\t\"\"\"\n\n\t\tself.max_file_count = value\n\t\tself.progress_gauge.SetRange(value)\n\t\tself.update_file_count()\n\n\tdef reset_file_count(self):\n\t\t\"\"\"\n\t\tReset the count of files which have been sorted to ``0``.\n\t\t\"\"\"\n\n\t\tself.current_file_count = 0\n\t\tself.update_file_count()\n\n\tdef update_file_count(self, *_):\n\t\t\"\"\"\n\t\tUpdate the tracker to show the new count of files which have been sorted.\n\t\t\"\"\"\n\n\t\tself.file_count_text.SetLabel(f\"Processing {self.current_file_count} of {self.max_file_count}\")\n\t\tself.progress_gauge.SetValue(self.current_file_count)\n\n\tdef increase_file_count(self, *_):\n\t\t\"\"\"\n\t\tIncrease the count of files which have been sorted by ``1``.\n\t\t\"\"\"\n\n\t\tself.current_file_count += 1\n\t\tself.update_file_count()\n\n\tdef on_sort_done(self, *_):\n\t\t\"\"\"\n\t\tTidy up after all files have been sorted.\n\t\t\"\"\"\n\n\t\tself.timer.join()\n\t\tself.sort_btn.Enable()\n\t\tself.cancel_btn.SetLabel(\"Close\")\n\n\t\tlabel = f\"Complete: {self.current_file_count} file{'s' if self.current_file_count > 1 else ''} sorted\"\n\t\tself.file_count_text.SetLabel(label)\n\t\twx.MessageDialog(self, \"Sort Complete\", \"Sort Complete\", style=wx.OK | wx.ICON_INFORMATION).ShowModal()\n\n\tdef update_time_elapsed(self, *_):\n\t\t\"\"\"\n\t\tIncrease the elapsed time by one and update the displayed value.\n\t\t\"\"\"\n\n\t\tself.elapsed_time += 1\n\t\tself.time_elapsed_text.SetLabel(f\"{timedelta(seconds=self.elapsed_time)}\")\n\t\tself.Layout()\n\n\tdef do_manage_cameras(self, event): # wxGlade: Launcher.<event_handler>\n\t\t\"\"\"\n\t\tHandler for ``Manage Cameras`` button.\n\n\t\tOpens ``Manage Cameras`` dialog.\n\t\t\"\"\"\n\n\t\twith ManageCameras(self, data=self.cameras) as dlg:\n\t\t\tres = dlg.ShowModal()\n\t\t\tif res == wx.ID_APPLY:\n\t\t\t\tself.cameras = dlg.get_data()\n\t\t\t\tprint(self.cameras)\n\n\t\tevent.Skip()\n\n\tdef within_dirs_ignore(self, event): # noqa: D102\n\t\tevent.Skip()\n\n\tdef within_dirs_folder_done(self, event):\n\t\t\"\"\"\n\t\tTidy up after all files have been sorted with ``within_dirs=True``.\n\t\t\"\"\"\n\n\t\tevent.Skip()\n\n\t\tself.increase_file_count()\n\n\t\tif self.current_file_count == self.max_file_count:\n\t\t\tself.timer.join()\n\t\t\tself.sort_btn.Enable()\n\t\t\tself.cancel_btn.SetLabel(\"Close\")\n\n\t\t\tlabel = f\"Complete: {self.current_file_count} file{'s' if self.current_file_count > 1 else ''} sorted\"\n\t\t\tself.file_count_text.SetLabel(label)\n\t\t\twx.MessageDialog(self, \"Sort Complete\", \"Sort Complete\", style=wx.OK | wx.ICON_INFORMATION).ShowModal()\n\n\t\t\t# Reset event bindings\n\t\t\tprogress_event.Unbind()\n\t\t\tsorting_done.Unbind()\n\n\t\t\tprogress_event.Bind(self.increase_file_count)\n\t\t\tsorting_done.Bind(self.increase_file_count)\n\n\tdef sort_within_dirs(self):\n\t\t\"\"\"\n\t\tSort photos within directories.\n\t\t\"\"\"\n\n\t\t# Rebind events\n\t\tprogress_event.Unbind()\n\t\tsorting_done.Unbind()\n\n\t\tprogress_event.Bind(self.within_dirs_ignore)\n\t\tsorting_done.Bind(self.within_dirs_folder_done)\n\n\t\tmain_source = self.source_dir_picker.get_value()\n\t\tprint(f\"Main Source: {main_source}\")\n\n\t\tsubdir_list = [x[0] for x in os.walk(main_source)]\n\t\tself.set_max_file_count(len(subdir_list))\n\t\t# TODO: Handle no folders\n\t\tprint(subdir_list)\n\n\t\tif len(subdir_list) >= 1:\n\t\t\tfor subdir in subdir_list:\n\t\t\t\tprint(f\"Subdir: {subdir}\")\n\n\t\t\t\tprint(os.path.split(subdir)[-1])\n\n\t\t\t\tonlyfiles = []\n\n\t\t\t\tprint(f\"Source: {subdir}\")\n\n\t\t\t\tfor root, dirs, files in os.walk(subdir):\n\t\t\t\t\tfor filename in os.listdir(root):\n\t\t\t\t\t\tif os.path.isfile(os.path.join(root, filename)):\n\t\t\t\t\t\t\tonlyfiles.append(os.path.join(root, filename))\n\n\t\t\t\tprint(onlyfiles)\n\n\t\t\t\tif self.copy_radio_btn.GetValue() and not self.move_radio_btn.GetValue():\n\t\t\t\t\tmode = mode_copy\n\t\t\t\telif self.move_radio_btn.GetValue() and not self.move_radio_btn.GetValue():\n\t\t\t\t\tmode = mode_move\n\t\t\t\telse:\n\t\t\t\t\tmode = mode_copy\n\n\t\t\t\tself.elapsed_time = 0\n\n\t\t\t\tself.worker = Worker(\n\t\t\t\t\t\tself,\n\t\t\t\t\t\tfilelist=onlyfiles,\n\t\t\t\t\t\tdestination=subdir,\n\t\t\t\t\t\tmode=mode, # TODO: Disable copy checkbox if within dirs selected\n\t\t\t\t\t\twithin_dirs=True,\n\t\t\t\t\t\tby_datetime=self.datetime_checkbox.GetValue(),\n\t\t\t\t\t\tby_camera=self.camera_checkbox.GetValue(),\n\t\t\t\t\t\t)\n\t\t\t\tself.timer = Timer(self)\n\t\t\t\tself.timer.start()\n\t\t\t\tself.worker.start()\n\n\tdef sort(self) -> None: # wxGlade: Launcher.<event_handler>\n\t\t\"\"\"\n\t\tRun sort.\n\t\t\"\"\"\n\n\t\tonlyfiles = []\n\n\t\tsource = self.source_dir_picker.get_value()\n\t\tprint(f\"Source: {source}\")\n\n\t\tfor root, dirs, files in os.walk(source):\n\t\t\tfor filename in os.listdir(root):\n\t\t\t\tif os.path.isfile(os.path.join(root, filename)):\n\t\t\t\t\tonlyfiles.append(os.path.join(root, filename))\n\n\t\tprint(onlyfiles)\n\t\tself.reset_file_count()\n\t\tself.set_max_file_count(len(onlyfiles))\n\t\t# TODO: Handle no files\n\n\t\tif self.copy_radio_btn.GetValue() and not self.move_radio_btn.GetValue():\n\t\t\tmode = mode_copy\n\t\telif self.move_radio_btn.GetValue() and not self.copy_radio_btn.GetValue():\n\t\t\tmode = mode_move\n\t\telse:\n\t\t\tmode = mode_copy\n\n\t\tself.elapsed_time = 0\n\n\t\tself.worker = Worker(\n\t\t\t\tself,\n\t\t\t\tfilelist=onlyfiles,\n\t\t\t\tdestination=self.destination_dir_picker.get_value(),\n\t\t\t\tmode=mode,\n\t\t\t\twithin_dirs=self.within_dirs_checkbox.GetValue(),\n\t\t\t\tby_datetime=self.datetime_checkbox.GetValue(),\n\t\t\t\tby_camera=self.camera_checkbox.GetValue(),\n\t\t\t\t)\n\t\tself.timer = Timer(self)\n\t\tself.timer.start()\n\t\tself.worker.start()\n\n\tdef sort_handler(self, event) -> None: # wxGlade: Launcher.<event_handler>\n\t\t\"\"\"\n\t\tHandler for the \"sort\" button to determine which function\n\t\tto call from the options selected by the user\n\t\t\"\"\" # noqa: D400\n\n\t\tself.sort_btn.Disable()\n\t\tself.cancel_btn.SetLabel(\"Cancel\")\n\n\t\tif self.within_dirs_checkbox.GetValue():\n\t\t\tself.sort_within_dirs()\n\t\telse:\n\t\t\tself.sort()\n\n\t\tevent.Skip()\n\n\tdef stop_threads(self) -> None:\n\t\t\"\"\"\n\t\tStop worker and timer threads.\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tself.worker.join()\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.timer.join()\n\t\texcept AttributeError:\n\t\t\tpass\n\n\tdef on_close(self, event) -> None: # wxGlade: Launcher.<event_handler>\n\t\t\"\"\"\n\t\tHandler for closing the window.\n\t\t\"\"\"\n\n\t\tif worker_thread_running:\n\t\t\tres = wx.MessageDialog(\n\t\t\t\t\tself,\n\t\t\t\t\t\"Are you sure you want to cancel?\",\n\t\t\t\t\t\"Cancel?\",\n\t\t\t\t\tstyle=wx.YES_NO | wx.ICON_QUESTION,\n\t\t\t\t\t).ShowModal()\n\t\t\tif res == wx.ID_NO:\n\t\t\t\tif event.CanVeto:\n\t\t\t\t\tevent.Veto()\n\t\t\t\t\treturn\n\t\t\tself.stop_threads()\n\n\t\t# Save camera and directory settings\n\t\twith open(\"settings.json\", 'w') as f:\n\t\t\tjson.dump([\n\t\t\t\t\tself.cameras,\n\t\t\t\t\t{\n\t\t\t\t\t\t\t\"Source\": self.source_dir_picker.GetValue(),\n\t\t\t\t\t\t\t\"Destination\": self.destination_dir_picker.GetValue(),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t],\n\t\t\t\t\t\tf)\n\n\t\tself.Destroy() # you may also do: event.Skip()\n\t\t# since the default event handler does call Destroy(), too\n\n\tdef on_cancel(self, *events): # wxGlade: Launcher.<event_handler>\n\t\t\"\"\"\n\t\tHandler for the cancel/close button, depending on context.\n\t\t\"\"\"\n\n\t\tif worker_thread_running:\n\t\t\tres = wx.MessageDialog(\n\t\t\t\t\tself, \"Are you sure you want to cancel?\", \"Cancel?\", style=wx.YES_NO | wx.ICON_QUESTION\n\t\t\t\t\t).ShowModal()\n\t\t\tif res == wx.ID_YES:\n\t\t\t\tself.stop_threads()\n\t\t\t\tself.timer.join()\n\t\t\t\tself.sort_btn.Enable()\n\t\t\t\tself.cancel_btn.SetLabel(\"Close\")\n\n\t\tif self.cancel_btn.Label == \"Close\":\n\t\t\tself.Close()\n\n\tdef within_dirs_clicked(self, event) -> None: # wxGlade: Launcher.<event_handler>\n\t\t\"\"\"\n\t\tHandler for the ``within_dirs`` checkbox being toggled.\n\t\t\"\"\"\n\n\t\tself.destination_dir_picker.Enable(not self.destination_dir_picker.IsEnabled())\n\t\tself.move_radio_btn.SetValue(1)\n\t\tself.move_radio_btn.Enable(not self.move_radio_btn.IsEnabled())\n\t\tself.copy_radio_btn.SetValue(0)\n\t\tself.copy_radio_btn.Enable(not self.copy_radio_btn.IsEnabled())\n\n\t\tevent.Skip()\n\n\tdef set_settings_file(self, event) -> None: # wxGlade: Launcher.<event_handler> # noqa: D102\n\t\tdlg = SettingsDialog(self, id=wx.ID_ANY)\n\t\tres = dlg.ShowModal()\n\t\tprint(\"Event handler 'set_settings_file' not implemented!\")\n\t\tevent.Skip()\n\n\n# end of class Launcher\n"
},
{
"alpha_fraction": 0.6556291580200195,
"alphanum_fraction": 0.6887417435646057,
"avg_line_length": 17.875,
"blob_id": "13a164c7b6e978b29217363111577c020430ef74",
"content_id": "ac81f1b1fb4a3bd952c5ccb46c9182634c6ed102",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 32,
"path": "/photo_sort/photo_sort.py",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# noqa: D100\n#\n# generated by wxGlade 0.9.3 on Sun Jul 14 22:10:15 2019\n#\n\n# This is an automatically generated file.\n# Manual changes will be overwritten without warning!\n\n# 3rd party\nimport wx # type: ignore # nodep\n\n# this package\nfrom photo_sort.launcher import Launcher\n\n__all__ = [\"photo_sort\"]\n\n\nclass PhotoSort(wx.App):\n\n\tdef OnInit(self):\n\t\tself.photo_sort = Launcher(None, wx.ID_ANY, '')\n\t\tself.SetTopWindow(self.photo_sort)\n\t\tself.photo_sort.Show()\n\t\treturn True\n\n\n# end of class photo_sort\n\nif __name__ == \"__main__\":\n\tphoto_sort = PhotoSort(0)\n\tphoto_sort.MainLoop()\n"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.4444444477558136,
"avg_line_length": 11.600000381469727,
"blob_id": "8c62bc485ba4999d9d8978f53c4f330a59cee7f1",
"content_id": "e28d35ada1a721d49045282f2ecf698a9d911219",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 63,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 5,
"path": "/doc-source/api/photo-sort.rst",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "===========\nphoto_sort\n===========\n\n.. automodule:: photo_sort\n"
},
{
"alpha_fraction": 0.5845070481300354,
"alphanum_fraction": 0.5845070481300354,
"avg_line_length": 13.199999809265137,
"blob_id": "5d9b47ce86c1720e9899aefa62e617c8c3d91f78",
"content_id": "fa054079bbe3f64678b35b9ecdb1678521c58944",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 10,
"path": "/doc-source/license.rst",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "=========\nLicense\n=========\n\n``photo-sort`` is licensed under the :choosealicense:`MIT`\n\n.. license-info:: MIT\n\n.. license::\n\t:py: photo-sort\n"
},
{
"alpha_fraction": 0.6518672108650208,
"alphanum_fraction": 0.6593360900878906,
"avg_line_length": 16.59123992919922,
"blob_id": "d3eff4f5640a7e0e86214a2c2f4742b4d87b17c6",
"content_id": "c58ecc27e3300dd237d8b493781ba98a236e44a6",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2410,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 137,
"path": "/doc-source/index.rst",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "###########\nphoto-sort\n###########\n\n.. start short_desc\n\n.. documentation-summary::\n\t:meta:\n\n.. end short_desc\n\n.. start shields\n\n.. only:: html\n\n\t.. list-table::\n\t\t:stub-columns: 1\n\t\t:widths: 10 90\n\n\t\t* - Docs\n\t\t - |docs| |docs_check|\n\t\t* - Tests\n\t\t - |actions_linux| |actions_windows| |actions_macos|\n\t\t* - Activity\n\t\t - |commits-latest| |commits-since| |maintained|\n\t\t* - QA\n\t\t - |codefactor| |actions_flake8| |actions_mypy|\n\t\t* - Other\n\t\t - |license| |language| |requires|\n\n\t.. |docs| rtfd-shield::\n\t\t:project: photo-sort\n\t\t:alt: Documentation Build Status\n\n\t.. |docs_check| actions-shield::\n\t\t:workflow: Docs Check\n\t\t:alt: Docs Check Status\n\n\t.. |actions_linux| actions-shield::\n\t\t:workflow: Linux\n\t\t:alt: Linux Test Status\n\n\t.. |actions_windows| actions-shield::\n\t\t:workflow: Windows\n\t\t:alt: Windows Test Status\n\n\t.. |actions_macos| actions-shield::\n\t\t:workflow: macOS\n\t\t:alt: macOS Test Status\n\n\t.. |actions_flake8| actions-shield::\n\t\t:workflow: Flake8\n\t\t:alt: Flake8 Status\n\n\t.. |actions_mypy| actions-shield::\n\t\t:workflow: mypy\n\t\t:alt: mypy status\n\n\t.. |requires| image:: https://dependency-dash.repo-helper.uk/github/domdfcoding/photo-sort/badge.svg\n\t\t:target: https://dependency-dash.repo-helper.uk/github/domdfcoding/photo-sort/\n\t\t:alt: Requirements Status\n\n\t.. |codefactor| codefactor-shield::\n\t\t:alt: CodeFactor Grade\n\n\t.. |license| github-shield::\n\t\t:license:\n\t\t:alt: License\n\n\t.. |language| github-shield::\n\t\t:top-language:\n\t\t:alt: GitHub top language\n\n\t.. |commits-since| github-shield::\n\t\t:commits-since: v0.0.0\n\t\t:alt: GitHub commits since tagged version\n\n\t.. |commits-latest| github-shield::\n\t\t:last-commit:\n\t\t:alt: GitHub last commit\n\n\t.. |maintained| maintained-shield:: 2022\n\t\t:alt: Maintenance\n\n.. end shields\n\nInstallation\n---------------\n\n.. start installation\n\n.. installation:: photo-sort\n\t:github:\n\n.. end installation\n\nYou'll also need ``exiftool`` installed. On Debian/Ubuntu:\n\n.. code-block:: bash\n\n\t$ sudo apt install exiftool\n\n\n.. toctree::\n\t:hidden:\n\n\tHome<self>\n\n.. toctree::\n\t:maxdepth: 3\n\t:caption: API Reference\n\t:glob:\n\n\tapi/*\n\n.. toctree::\n\t:maxdepth: 3\n\t:caption: Contributing\n\n\tcontributing\n\tSource\n\n.. sidebar-links::\n\t:caption: Links\n\t:github:\n\n\n\n.. start links\n\n.. only:: html\n\n\tView the :ref:`Function Index <genindex>` or browse the `Source Code <_modules/index.html>`__.\n\n\t:github:repo:`Browse the GitHub Repository <domdfcoding/photo-sort>`\n\n.. end links\n"
},
{
"alpha_fraction": 0.689878523349762,
"alphanum_fraction": 0.71659916639328,
"avg_line_length": 25.276596069335938,
"blob_id": "6bec58a5bf1c692ba8209c9c42086ac68c14b126",
"content_id": "1321804d0b2b7d13f8508b7fc59dc2e48f9607b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1235,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 47,
"path": "/PhotoSorter.py",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#\n# photo_sort.py\n#\n# Copyright (c) 2019 Dominic Davis-Foster <[email protected]>\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n\n#\n# generated by wxGlade 0.9.3 on Sun Jul 14 21:24:46 2019\n#\n\n# 3rd party\nimport wx # nodep\n\n# this package\nfrom photo_sort.launcher import Launcher\n\n\nclass PhotoSort(wx.App):\n\n\tdef OnInit(self):\n\t\tself.photo_sort = Launcher(None, wx.ID_ANY, '')\n\t\tself.SetTopWindow(self.photo_sort)\n\t\tself.photo_sort.Show()\n\t\treturn True\n\n\n# end of class photo_sort\n\nif __name__ == \"__main__\":\n\tphoto_sort = PhotoSort(0)\n\tphoto_sort.MainLoop()\n"
},
{
"alpha_fraction": 0.43291139602661133,
"alphanum_fraction": 0.43291139602661133,
"avg_line_length": 18.75,
"blob_id": "aa0039d5c435a7a3035c6a72dc4f79a660f188b6",
"content_id": "69657fe8de42d3d4ce50d34c80bc88dbb7078fd6",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 395,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 20,
"path": "/doc-source/docs.rst",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "**************************\n:mod:`package_name`\n**************************\n\n.. contents:: Table of Contents\n\n.. automodule:: package_name\n :members:\n :private-members:\n :special-members:\n\n===================================\n:mod:`package_name.__init__`\n===================================\n\n\n.. automodule:: package_name.__init__\n :members:\n :private-members:\n :special-members:\n"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 18.799999237060547,
"blob_id": "6425106fb2896ce7ae5dc9a6d379ac8a4533928e",
"content_id": "e880905258ea0f8cc8339b0a6f0ce7a0fb04a91e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "domdfcoding/Photo-Sort",
"src_encoding": "UTF-8",
"text": "# exiftool\ndomdf-python-tools>=1.5.0\ndomdf-wxpython-tools>=0.2.5\nexifread>=2.3.2\npyexiftool>=0.1.1\n"
}
] | 11 |
Beim/shmodel | https://github.com/Beim/shmodel | ae47f11439c56fceceaa29cdea3e1bbb205eafd3 | 2931d17c8bfda754b9fec9302f262b4876fe19c7 | fdeb4c543c3b606145622dbec899de312d608ab8 | refs/heads/master | 2022-12-25T05:09:06.197554 | 2020-06-16T04:43:07 | 2020-06-16T04:43:07 | 225,151,669 | 0 | 0 | null | 2019-12-01T11:41:05 | 2020-06-16T04:43:26 | 2022-12-08T03:20:12 | Python | [
{
"alpha_fraction": 0.5748624205589294,
"alphanum_fraction": 0.5766000747680664,
"avg_line_length": 41.121952056884766,
"blob_id": "3c202903ebf574ba5e7277e2f21a537ea81f74b1",
"content_id": "f9f7be999f7e3a65e7edcac8d955909571bd7fcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3511,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 82,
"path": "/sh/TrainJobQueueReceiver.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import pika\nimport json\nfrom pika.adapters.blocking_connection import BlockingChannel, BlockingConnection\n\nfrom sh.TrainJob import TrainJob\nfrom config.config_loader import config_loader\n\n\nclass TrainJobQueueReceiver:\n connection = None\n channel = None\n\n def __init__(self, host: str, port: str, username: str, password: str,\n queue_name: str, durable: bool, auto_ack: bool, prefetch_count: int):\n self.connection, self.channel = self.create_connection(\n host, port, username, password, queue_name, durable, auto_ack, prefetch_count)\n self.channel.start_consuming()\n\n def create_connection(self, host: str, port: str, username: str, password: str,\n queue_name: str, durable: bool, auto_ack: bool, prefetch_count: bool) -> (\n BlockingConnection, BlockingChannel):\n \"\"\"\n 建立连接\n :param host:\n :param port:\n :param username:\n :param password:\n :param queue_name:\n :param durable: 持久化\n :param auto_ack: 自动确认\n :param prefetch_count: 每个worker 最多接受的消息数量\n :return: connection, channel\n \"\"\"\n credential = pika.PlainCredentials(username, password)\n connection_params = pika.ConnectionParameters(host=host, credentials=credential, port=port)\n connection = pika.BlockingConnection(connection_params)\n channel = connection.channel()\n channel.queue_declare(queue=queue_name, durable=durable)\n channel.basic_qos(prefetch_count=prefetch_count)\n channel.basic_consume(queue=queue_name, on_message_callback=self.receive_callback, auto_ack=auto_ack)\n print('[pika] connection established, waiting for messages...')\n return connection, channel\n\n def receive_callback(self, ch: BlockingChannel, method, properties, body):\n \"\"\"\n 接受消息的回调\n :param ch:\n :param method:\n :param properties:\n :param body:\n :return:\n \"\"\"\n # {'trainTriples': [[2, 0, 'like'], [2, 1, 'like']], 'modelName': 'TransE', 'gid': 1}\n info = json.loads(body, encoding='utf-8')\n print({\n 'modelName': info['modelName'],\n 'gid': info['gid'],\n 'trainTriplesLen': len(info['trainTriples'])\n })\n train_triples = info['trainTriples']\n model_name = info['modelName']\n gspace_id = info['gid']\n uuid = info['uuid']\n try:\n TrainJob(train_triples, model_name, gspace_id, uuid, use_gpu=config_loader.get_config()['gpu']).run()\n ch.basic_ack(delivery_tag=method.delivery_tag)\n print('ack %s %d' % (model_name, gspace_id))\n except Exception as e:\n print(e)\n ch.basic_nack(delivery_tag=method.delivery_tag)\n print('nack %s %d' % (model_name, gspace_id))\n\nif __name__ == '__main__':\n rabbitmq_config = config_loader.get_config()['rabbitmq']\n receiver = TrainJobQueueReceiver(rabbitmq_config['host'],\n rabbitmq_config['port'],\n rabbitmq_config['username'],\n rabbitmq_config['password'],\n rabbitmq_config['queue_name'],\n rabbitmq_config['durable'],\n rabbitmq_config['auto_ack'],\n rabbitmq_config['prefetch_count'])"
},
{
"alpha_fraction": 0.5475040078163147,
"alphanum_fraction": 0.5491143465042114,
"avg_line_length": 26.30769157409668,
"blob_id": "403000a3aedcd4cbe2fd102ef0b3ddf77766c5e2",
"content_id": "053309f3b7f77bcfb3b832da28f57c61c4b50fca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2564,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 91,
"path": "/sh/ServiceMonitor.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request\nfrom kazoo.client import KazooClient\nimport json\nfrom config.config_loader import config_loader\nfrom Utils import mysql_utils\n\napp = Flask(__name__)\nconfig = config_loader.get_config()\n\n\nclass ServiceMonitor:\n\n def __init__(self, zkhost: str):\n zk = KazooClient(zkhost)\n zk.start()\n service_monitor_path = config['service_monitor']['zkpath']\n zk.ensure_path(service_monitor_path)\n data = {\n 'host': config['service_monitor']['host'],\n 'port': config['service_monitor']['port'],\n }\n zk.create(service_monitor_path + \"/service\", str.encode(json.dumps(data)), ephemeral=True, sequence=True)\n\n def record(self, uid: int, service: str, timestamp: str, duration: str, info: str):\n \"\"\"\n 记录调用信息\n :param uid: 用户id\n :param service: 服务名\n :param timestamp: 调用时间戳 (s)\n :param duration: 调用时长 (s)\n :return:\n \"\"\"\n sql = 'insert into servicemonitorlog (uid, service, timestamp, duration, info) values (%s, %s, %s, %s, %s)'\n mysql_utils.execute(sql, [uid, service, timestamp, duration, info])\n\n def query(self, uid: int):\n \"\"\"\n 查询调用信息\n :param uid: 用户id\n :return:\n \"\"\"\n sql = 'select * from servicemonitorlog where uid=%s'\n response = mysql_utils.query(sql, [uid])\n result = []\n for item in response:\n data = {\n 'uid': item['uid'],\n 'service': item['service'],\n 'timestamp': item['timestamp'],\n 'duration': item['duration'],\n 'info': item['info']\n }\n result.append(data)\n return result\n\nsm = ServiceMonitor(zkhost=config['zkhost'])\n\n\[email protected](\"/report\", methods=['POST'])\ndef report():\n \"\"\"\n 报告调用信息\n body: {\n uid(long),\n service(str),\n timestamp(str),\n duration(str)\n }\n :return:\n \"\"\"\n args = json.loads(request.data)\n sm.record(args['uid'], args['service'], args['timestamp'], args['duration'], args['info'])\n return json.dumps({'succ': True})\n\n\[email protected](\"/query\", methods=['POST'])\ndef query():\n \"\"\"\n 查询调用信息\n body: {\n uid(long),\n }\n :return:\n \"\"\"\n args = json.loads(request.data)\n result = sm.query(args['uid'])\n return json.dumps({'succ': True, 'data': result})\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', config['service_monitor']['port'])"
},
{
"alpha_fraction": 0.606065571308136,
"alphanum_fraction": 0.614918053150177,
"avg_line_length": 33.258426666259766,
"blob_id": "ed054c5f393ca3c4167d87b8a94a385822738844",
"content_id": "a2bd2635990ee9c6f2e7abef82a1ec905de97596",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6100,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 178,
"path": "/sh/ModelControllers.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom openke.config import Trainer, Tester\nfrom openke.module.model import TransE, TransH, TransD\nfrom openke.module.loss import MarginLoss\nfrom openke.module.strategy import NegativeSampling\nfrom openke.data import TrainDataLoader, TestDataLoader\nfrom config.config_loader import config_loader\n\n\nclass BaseModelController:\n\n def __init__(self, benchmark_dir: str, checkpoint_dir: str, use_gpu: bool = True):\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n # shutil.rmtree(checkpoint_dir)\n self.checkpoint_path = '%s/%s.ckpt' % (checkpoint_dir, self.model_name)\n self.parameters_path = '%s/%s.param' % (checkpoint_dir, self.model_name)\n self.use_gpu = use_gpu\n\n def train(self) -> None:\n raise NotImplementedError\n\n def test(self) -> None:\n raise NotImplementedError\n\n\nclass TranseController(BaseModelController):\n\n model_name = 'transe'\n\n def __init__(self, benchmark_dir: str, checkpoint_dir: str, use_gpu: bool = True):\n super(TranseController, self).__init__(benchmark_dir, checkpoint_dir, use_gpu)\n\n self.train_dataloader = TrainDataLoader(\n in_path=benchmark_dir + '/',\n nbatches=100,\n threads=8,\n sampling_mode=\"normal\",\n bern_flag=1,\n filter_flag=1,\n neg_ent=25,\n neg_rel=0)\n\n self.test_dataloader = TestDataLoader(benchmark_dir + '/', \"link\")\n\n self.ent_tot = self.train_dataloader.get_ent_tot()\n self.rel_tot = self.train_dataloader.get_rel_tot()\n\n self.transx = TransE(\n ent_tot = self.ent_tot,\n rel_tot = self.rel_tot,\n p_norm = 1,\n norm_flag = True)\n\n self.model = NegativeSampling(\n model = self.transx,\n loss = MarginLoss(margin = 5.0),\n batch_size = self.train_dataloader.get_batch_size()\n )\n\n def train(self) -> None:\n trainer = Trainer(model=self.model, data_loader=self.train_dataloader, train_times=1000, alpha=1.0, use_gpu=self.use_gpu)\n trainer.run()\n self.transx.save_checkpoint(self.checkpoint_path)\n self.transx.save_parameters(self.parameters_path)\n print(\"save check param\")\n\n def test(self) -> None:\n self.transx.load_checkpoint(self.checkpoint_path)\n tester = Tester(model = self.transx, data_loader = self.test_dataloader, use_gpu=self.use_gpu)\n tester.run_link_prediction(type_constrain = False)\n\n\nclass TranshController(BaseModelController):\n\n model_name = 'transh'\n\n def __init__(self, benchmark_dir: str, checkpoint_dir: str, use_gpu: bool = True):\n super(TranshController, self).__init__(benchmark_dir, checkpoint_dir, use_gpu)\n\n self.train_dataloader = TrainDataLoader(\n in_path=benchmark_dir + '/',\n nbatches=100,\n threads=8,\n sampling_mode=\"normal\",\n bern_flag=1,\n filter_flag=1,\n neg_ent=25,\n neg_rel=0)\n\n self.test_dataloader = TestDataLoader(benchmark_dir + '/', \"link\")\n\n self.ent_tot = self.train_dataloader.get_ent_tot()\n self.rel_tot = self.train_dataloader.get_rel_tot()\n\n self.transx = TransH(\n ent_tot = self.ent_tot,\n rel_tot = self.rel_tot,\n p_norm = 1,\n norm_flag = True)\n\n self.model = NegativeSampling(\n model = self.transx,\n loss = MarginLoss(margin = 4.0),\n batch_size = self.train_dataloader.get_batch_size()\n )\n\n def train(self) -> None:\n trainer = Trainer(model=self.model, data_loader=self.train_dataloader, train_times=1000, alpha=0.5, use_gpu=self.use_gpu)\n trainer.run()\n self.transx.save_checkpoint(self.checkpoint_path)\n self.transx.save_parameters(self.parameters_path)\n\n def test(self) -> None:\n self.transx.load_checkpoint(self.checkpoint_path)\n tester = Tester(model = self.transx, data_loader = self.test_dataloader, use_gpu=self.use_gpu)\n tester.run_link_prediction(type_constrain = False)\n\n\nclass TransdController(BaseModelController):\n\n model_name = 'transd'\n\n def __init__(self, benchmark_dir: str, checkpoint_dir: str, use_gpu: bool = True):\n super(TransdController, self).__init__(benchmark_dir, checkpoint_dir, use_gpu)\n\n self.train_dataloader = TrainDataLoader(\n in_path=benchmark_dir + '/',\n nbatches=100,\n threads=8,\n sampling_mode=\"normal\",\n bern_flag=1,\n filter_flag=1,\n neg_ent=25,\n neg_rel=0)\n\n self.test_dataloader = TestDataLoader(benchmark_dir + '/', \"link\")\n\n self.ent_tot = self.train_dataloader.get_ent_tot()\n self.rel_tot = self.train_dataloader.get_rel_tot()\n\n self.transx = TransD(\n ent_tot = self.ent_tot,\n rel_tot = self.rel_tot,\n p_norm = 1,\n norm_flag = True)\n\n self.model = NegativeSampling(\n model = self.transx,\n loss = MarginLoss(margin = 4.0),\n batch_size = self.train_dataloader.get_batch_size()\n )\n\n def train(self) -> None:\n trainer = Trainer(model=self.model, data_loader=self.train_dataloader, train_times=1000, alpha=1.0, use_gpu=self.use_gpu)\n trainer.run()\n self.transx.save_checkpoint(self.checkpoint_path)\n self.transx.save_parameters(self.parameters_path)\n\n def test(self) -> None:\n self.transx.load_checkpoint(self.checkpoint_path)\n tester = Tester(model = self.transx, data_loader = self.test_dataloader, use_gpu=self.use_gpu)\n tester.run_link_prediction(type_constrain = False)\n\n\nmodel_controllers = {\n 'transe': TranseController,\n 'transh': TranshController,\n 'transd': TransdController,\n}\n\n\ndef model_constructor(model_name: str) -> BaseModelController:\n if model_name in model_controllers:\n return model_controllers[model_name]\n else:\n raise Exception(\"model %s not implemented\" % model_name)\n\n\n"
},
{
"alpha_fraction": 0.6135922074317932,
"alphanum_fraction": 0.6165048480033875,
"avg_line_length": 31.21875,
"blob_id": "ed41c31aa997a0512dfa9bceb9ca31cf1af7b97c",
"content_id": "6109c08a08929b1060c5d72f6f857e7ce7728d52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1030,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 32,
"path": "/sh/ServiceReporter.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "from kazoo.client import KazooClient\nfrom config.config_loader import config_loader\nimport json\nimport requests\n\nconfig = config_loader.get_config()\n\n\nclass ServiceReporter:\n\n monitors = []\n\n def __init__(self, zkhost: str):\n zk = KazooClient(zkhost)\n zk.start()\n service_monitor_path = config['service_monitor']['zkpath']\n children = zk.get_children(service_monitor_path)\n for node in children:\n data, stat = zk.get(\"%s/%s\" % (service_monitor_path, str(node)))\n data = json.loads(data.decode('utf-8'))\n self.monitors.append(data)\n print('monitors: ' + json.dumps(self.monitors))\n\n def report(self, data: dict):\n monitor_info = self.monitors[0]\n url = '%s://%s:%s/%s' % ('http', monitor_info['host'], monitor_info['port'], 'report')\n data_str = json.dumps(data, ensure_ascii=False).encode('utf-8')\n return requests.post(url, data=data_str)\n\n\nif __name__ == '__main__':\n sr = ServiceReporter(zkhost=config['zkhost'])"
},
{
"alpha_fraction": 0.43446603417396545,
"alphanum_fraction": 0.6699029207229614,
"avg_line_length": 14.259259223937988,
"blob_id": "9f2affa63b4c9b8176aee2c9c1e38f0299f856cc",
"content_id": "7941ca67a7fa3f6d565299fb69ec1289827d64da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 412,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 27,
"path": "/requirements.txt",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "certifi==2019.11.28\ncffi==1.13.2\nchardet==3.0.4\ngrpcio==1.26.0\ngrpcio-tools==1.26.0\nidna==2.8\njoblib==0.14.1\nmkl-fft==1.0.15\nmkl-random==1.1.0\nmkl-service==2.3.0\nnumpy==1.17.4\nolefile==0.46\npika==1.1.0\nPillow==6.2.1\nprotobuf==3.11.2\npycparser==2.19\nPyMySQL==0.9.3\nrequests==2.22.0\nscikit-learn==0.22\nscipy==1.4.1\nsix==1.13.0\ntorch==1.3.1\ntorchvision==0.4.2\ntqdm==4.41.0\nurllib3==1.25.7\nkazoo==2.6.1\nflask==1.0.2\n"
},
{
"alpha_fraction": 0.5481916666030884,
"alphanum_fraction": 0.554800808429718,
"avg_line_length": 40.57251739501953,
"blob_id": "a2dd9301dade5f9458fe9d2b3ba0550086fcc32f",
"content_id": "d2d2ddb4ce8252466eb8be328b52a0d5e4bcd0f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5609,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 131,
"path": "/sh/ModelLoader.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom config.config_loader import config_loader\nfrom Utils import mysql_utils\nfrom sh.ModelPredictors import ModelPredictor\n\ncurr_dir = os.path.split(os.path.abspath(__file__))[0]\n\n\nclass ModelLoader:\n\n def __init__(self, use_gpu: bool = True):\n self.use_gpu = use_gpu\n self.models_dir = '%s/../%s' % (curr_dir, config_loader.get_config()['path']['trainedmodels'])\n if not os.path.exists(self.models_dir):\n os.makedirs(self.models_dir)\n self.model_map = self.load()\n\n\n def get_model(self, gid: int, model_name: str) -> ModelPredictor:\n return self.model_map[(str(gid), model_name)]\n\n def check_update(self) -> bool:\n \"\"\"\n 检查模型是否更新\n :return:\n \"\"\"\n local_embed_info = self._get_embed_infos()\n remote_embed_info = self._fetch_embed_infos()\n to_download_embed = remote_embed_info - local_embed_info\n return len(to_download_embed) > 0\n\n def load(self) -> dict:\n \"\"\"\n {\n gspaceId: Long,\n modelName: String,\n lastModified: int,\n modelRelativePath: String\n }\n 返回 {\n (<gid>, <model_name>): <ModelPredictor>,\n ...\n }\n 本地存储embed param 的规则为 {modelId}_{modelName}_{lastModified}.param\n S1 找到本地所有embed param 信息\n S2 获取server embed 信息\n S3 diff 记录server 上有,本地没有的embed param\n S5 下载缺少的embed param\n # S6 删除无用的embed param\n S7 load predictor\n :return:\n \"\"\"\n local_embed_info = self._get_embed_infos()\n remote_embed_info = self._fetch_embed_infos()\n to_download_embed = list(remote_embed_info - local_embed_info)\n # to_delete_embed = list(local_embed_info - remote_embed_info)\n self._download_embed(to_download_embed)\n load_map = {}\n for (gid, modelname, updated) in remote_embed_info:\n paramters_path = '%s/%s' % (self.models_dir, self._make_param_file_name(gid, modelname, updated))\n entity2id_path = '%s/%s' % (self.models_dir, self._make_entity2id_file_name(gid, modelname, updated))\n relation2id_path = '%s/%s' % (self.models_dir, self._make_relation2id_file_name(gid, modelname, updated))\n load_map[(gid, modelname)] = ModelPredictor(modelname,\n paramters_path,\n entity2id_path,\n relation2id_path,\n self.use_gpu)\n return load_map\n\n\n def _download_embed(self, to_download_embed: list) -> None:\n \"\"\"\n 下载模型参数\n 保存的文件格式为\n param: <gid>_<modelname>_<updated>.param\n entity2id: <gid>_<modelname>_<updated>.entity2id.txt\n relation2id: <gid>_<modelname>_<updated>.relation2id.txt\n :param to_download_embed:\n :return: None\n \"\"\"\n for (gid, modelname, updated) in to_download_embed:\n response = mysql_utils.query('select params, entity2id, relation2id from gspacemodelparam where gid=%s and modelname=%s',\n [gid, modelname])[0]\n paramters_path = '%s/%s' % (self.models_dir, self._make_param_file_name(gid, modelname, updated))\n entity2id_path = '%s/%s' % (self.models_dir, self._make_entity2id_file_name(gid, modelname, updated))\n relation2id_path = '%s/%s' % (self.models_dir, self._make_relation2id_file_name(gid, modelname, updated))\n if response['params'] != None and response['entity2id'] != None and response['relation2id'] != None:\n with open(paramters_path, 'w') as f:\n f.write(response['params'])\n with open(entity2id_path, 'w') as f:\n f.write(response['entity2id'])\n with open(relation2id_path, 'w') as f:\n f.write(response['relation2id'])\n\n def _get_embed_infos(self) -> set:\n \"\"\"\n 遍历本地embed 目录,获取embed param 信息\n ((<gid>, <modelname>, <updated>), ...)\n :return:\n \"\"\"\n result = set()\n files = os.listdir(self.models_dir)\n for file in files:\n result.add(self._parse_param_file_name(file))\n return result\n\n def _fetch_embed_infos(self) -> set:\n \"\"\"\n 获取远程embed 信息\n ((<gid>, <modelname>, <updated>), ...)\n :return:\n \"\"\"\n response = mysql_utils.query('select gid, modelname, updated from gspacemodelparam where available=true')\n result = set()\n for item in response:\n result.add( (str(item['gid']), item['modelname'], str(int(item['updated'].timestamp()))) )\n return result\n\n def _parse_param_file_name(self, filename: str) -> (str, str, int):\n [gid, model_name, updated] = filename.split('.')[0].split('_')\n return gid, model_name, updated\n\n def _make_param_file_name(self, gid: str, model_name: str, updated: int) -> str:\n return '%s_%s_%s.param' % (gid, model_name, updated)\n\n def _make_entity2id_file_name(self, gid: str, model_name: str, updated: int) -> str:\n return '%s_%s_%s.entity2id.txt' % (gid, model_name, updated)\n\n def _make_relation2id_file_name(self, gid: str, model_name: str, updated: int) -> str:\n return '%s_%s_%s.relation2id.txt' % (gid, model_name, updated)\n\n"
},
{
"alpha_fraction": 0.5784165263175964,
"alphanum_fraction": 0.5975794196128845,
"avg_line_length": 16.539823532104492,
"blob_id": "5a9db2749dd00dc8f524ff55e86306b1f0ec6c69",
"content_id": "dfe87c918201872b3c7c7f2db895f67232633b46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2161,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 113,
"path": "/README.md",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "\n# 简介\n\nTrans 模型在线训练&部署\n\n# 环境要求\n\n- python3\n- pytorch\n- rabbitmq 3.8\n- mysql 5.7\n\n# 使用\n\n1、安装pytorch\n- 无gpu:`conda install pytorch torchvision cpuonly -c pytorch`\n- 有gpu:https://pytorch.org/get-started/locally/\n\n2、安装其他依赖\n```bash\npip install -r requirements.txt\n# 或\npip install protobuf grpcio grpcio-tools pika numpy tqdm scikit-learn requests PyMySQL\n```\n\n3、编译C++ 文件\n```bash\ncd shmodel/openke\nbash make.sh\n```\n\n4、运行\n```bash\n# 运行服务端(训练模型)\nbash bin/train_server.sh\n# 运行服务端(部署模型)\nbash bin/predict_server.sh\n```\n\n# 配置\n\n```bash\ncd config\ncp config-example.json config-prod.json\n```\n\n修改`config/config.json`\n```json\n{\n \"env\": \"prod\"\n}\n```\n\n修改`config/config-prod.json`\n- server.port # 对应[sh4j](https://github.com/Beim/sh4j) 的server.port 配置\n- grpc.port # grpc server 监听的端口\n- rabbitmq.host\n- rabbitmq.port\n- rabbitmq.username\n- rabbitmq.password\n- mysql.host\n- mysql.port\n- mysql.username\n- mysql.password\n- gpu # 是否使用gpu\n\n```json\n{\n \"server\": {\n \"host\": \"\",\n \"port\": 18080,\n \"protocol\": \"http\"\n },\n \"grpc\": {\n \"port\": 8000\n },\n \"rabbitmq\": {\n \"queue_name\": \"trainJobQueue\",\n \"host\": \"localhost\",\n \"username\": \"root\",\n \"password\": \"123456\",\n \"port\": \"5672\",\n \"durable\": true,\n \"auto_ack\": false,\n \"prefetch_count\": 1\n },\n \"mysql\": {\n \"host\": \"localhost\",\n \"port\": 3306,\n \"database\": \"servicehouse\",\n \"username\": \"root\",\n \"password\": \"123456\"\n },\n \"path\": {\n \"trainedmodels\": \"trainedmodels\",\n \"benchmarks\": \"benchmarks/gspace\",\n \"checkpoint\": \"checkpoints\"\n },\n \"update_interval\": 5,\n \"gpu\": false\n}\n```\n\n# 其他\n\n训练模型使用[OpenKE-PyTorch](https://github.com/thunlp/OpenKE)\n```\n @inproceedings{han2018openke,\n title={OpenKE: An Open Toolkit for Knowledge Embedding},\n author={Han, Xu and Cao, Shulin and Lv Xin and Lin, Yankai and Liu, Zhiyuan and Sun, Maosong and Li, Juanzi},\n booktitle={Proceedings of EMNLP},\n year={2018}\n }\n```\n"
},
{
"alpha_fraction": 0.5830085873603821,
"alphanum_fraction": 0.5865159630775452,
"avg_line_length": 28.779069900512695,
"blob_id": "eb6c7aea9d4d2a2dc9b204230dfa865f8f6aca36",
"content_id": "7dd664c09ac8526c4bd5b75490aaa2ad38584581",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2566,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 86,
"path": "/sh/TrainingServer.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "from kazoo.client import KazooClient\nfrom config.config_loader import config_loader\nimport json\nimport time\nfrom concurrent import futures\nfrom flask import Flask, request\nfrom sh.TrainJob import TrainJob\nfrom sh.ServiceReporter import ServiceReporter\n\napp = Flask(__name__)\nconfig = config_loader.get_config()\nexecutor = futures.ThreadPoolExecutor(max_workers=1)\n\n\nclass TrainJobRegister:\n\n zk = None # zookeeper client\n node_path = None # the real path of the service node\n\n def __init__(self, host: str):\n self.zk = KazooClient(host)\n self.zk.start()\n\n def register(self):\n train_service_path = config['train_service_path']\n zk = self.zk\n zk.ensure_path(train_service_path)\n path = train_service_path + \"/service\"\n data = {\n 'host': config['host'],\n 'port': config['port'],\n 'gpu': config['gpu'],\n 'available': True\n }\n self.node_path = zk.create(path, str.encode(json.dumps(data)), ephemeral=True, sequence=True)\n\n def set_availability(self, state: bool):\n zk = self.zk\n data = json.loads(bytes.decode(zk.get(self.node_path)[0]))\n data['available'] = state\n zk.set(self.node_path, str.encode(json.dumps(data)))\n\n\ntjr = TrainJobRegister(host=config['zkhost'])\n\n\[email protected](\"/train\", methods=['POST'])\ndef train_job_run():\n def run(args: dict):\n train_triples = args['trainTriples']\n model_name = args['modelName']\n gspace_id = args['gid']\n uuid = args['uuid']\n uid = args['uid']\n tjr.set_availability(False)\n start_time = time.time()\n TrainJob(train_triples, model_name, gspace_id, uuid, use_gpu=config_loader.get_config()['gpu']).run()\n # time.sleep(10)\n end_time = time.time()\n tjr.set_availability(True)\n\n sr = ServiceReporter(zkhost=config['zkhost'])\n report_data = {\n 'uid': uid,\n 'service': config['train_service_path'],\n 'timestamp': str(start_time),\n 'duration': str(round(end_time - start_time, 2)),\n 'info': json.dumps({\n 'gpu': config['gpu']\n })\n }\n sr.report(report_data)\n\n args = json.loads(request.data)\n print({\n 'modelName': args['modelName'],\n 'gid': args['gid'],\n 'trainTriplesLen': len(args['trainTriples'])\n })\n executor.submit(run, args)\n return json.dumps({'succ': True})\n\n\nif __name__ == '__main__':\n tjr.register()\n app.run('0.0.0.0', config['port'])\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.559789776802063,
"alphanum_fraction": 0.5637319087982178,
"avg_line_length": 25.275861740112305,
"blob_id": "a8a20d8c1f7bb5f29e3835c4d1ae37495bc112c7",
"content_id": "4792aa564b95968dbc9e41ce0c9acebbf6516c52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 761,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 29,
"path": "/config/config_loader.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import os\nimport json\n\ncurr_dir = os.path.split(os.path.abspath(__file__))[0]\n\n\nclass ConfigLoader:\n\n def __init__(self):\n with open('%s/config.json' % curr_dir, 'r', encoding='utf-8') as f:\n cfg = json.load(f)\n env_activate = cfg['env']\n with open('%s/config-%s.json' % (curr_dir, env_activate), encoding='utf-8') as f:\n env_cfg = json.load(f)\n self.cfg = env_cfg\n\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, '_instance'):\n cls._instance = super(ConfigLoader, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def get_config(self):\n return self.cfg\n\n\nconfig_loader = ConfigLoader()\n\nif __name__ == '__main__':\n print(config_loader.get_config())"
},
{
"alpha_fraction": 0.552740752696991,
"alphanum_fraction": 0.562044620513916,
"avg_line_length": 37.960697174072266,
"blob_id": "7f2a380e00f3d1db879f3f0d4d8e85fbaee73d48",
"content_id": "5216b11862359464ffb2c04f73805512c92822d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8921,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 229,
"path": "/sh/ModelPredictors.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport torch\nimport json\n\nfrom openke.module.model import Model, TransE, TransH, TransD, TransR\n\n\nclass ModelPredictor:\n\n def __init__(self, model_name: str, paramters_path: str, entity2id_path: str, relation2id_path, use_gpu: bool = True):\n self.use_gpu = use_gpu\n self.entity2id_map, self.id2entity_map, self.relation2id_map, self.id2relation_map \\\n = self._get_ent_rel_map(entity2id_path, relation2id_path)\n self.ent_tot = len(self.entity2id_map)\n self.rel_tot = len(self.relation2id_map)\n self.ent_embeddings, self.rel_embeddings = self._get_ent_rel_embedding(paramters_path)\n\n constructor = self._get_model_constructor(model_name)\n self.model = constructor(\n ent_tot=self.ent_tot,\n rel_tot=self.rel_tot,\n p_norm=1,\n norm_flag=True)\n if use_gpu:\n self.model.cuda()\n self.model.load_parameters(paramters_path)\n\n def predict_head_entity(self, t: str, r: str, k: int) -> list:\n \"\"\"\n This method predicts the top k head entities given tail entity and relation.\n :param t: tail entity name\n :param r: relation type\n :param k: top k head entities\n :return: k possible entity names\n \"\"\"\n t = self.entity2id_map[t]\n r = self.relation2id_map[r]\n res = self._predict_head_entity(t, r, k)\n for idx in range(len(res)):\n res[idx] = self.id2entity_map[res[idx]]\n return res\n\n def predict_tail_entity(self, h: str, r: str, k: int) -> list:\n \"\"\"\n This method predicts the top k tail entities given head entity and relation.\n :param h: head entity name\n :param r: relation type\n :param k: top k tail entities\n :return: k possible entity names\n \"\"\"\n h = self.entity2id_map[h]\n r = self.relation2id_map[r]\n res = self._predict_tail_entity(h, r, k)\n for idx in range(len(res)):\n res[idx] = self.id2entity_map[res[idx]]\n return res\n\n def predict_relation(self, h: str, t: str, k: int) -> list:\n \"\"\"\n This methods predict the relation id given head entity and tail entity.\n :param h: head entity name\n :param t: tail entity name\n :param k: top k relations\n :return: k possible relation types\n \"\"\"\n h = self.entity2id_map[h]\n t = self.entity2id_map[t]\n res = self._predict_relation(h, t, k)\n for idx in range(len(res)):\n res[idx] = self.id2relation_map[res[idx]]\n return res\n\n def predict_triple(self, h: str, t: str, r: str, thresh: float) -> bool:\n \"\"\"\n This method tells you whether the given triple (h, t, r) is correct of wrong\n :param h: head entity name\n :param t: tail entity name\n :param r: relation type\n :param thresh: threshold for the triple\n :return:\n \"\"\"\n h = self.entity2id_map[h]\n t = self.entity2id_map[t]\n r = self.relation2id_map[r]\n return self._predict_triple(h, t, r, thresh)\n\n def get_ent_embedding(self, ent: str):\n return self.ent_embeddings[self.entity2id_map[ent]]\n\n def get_rel_embedding(self, rel: str):\n return self.rel_embeddings[self.relation2id_map[rel]]\n\n def _predict_head_entity(self, t: int, r: int, k: int) -> list:\n \"\"\"\n This method predicts the top k head entities given tail entity and relation.\n :param t: tail entity id\n :param r: relation id\n :param k: top k head entities\n :return: k possible entity ids\n \"\"\"\n test_h = self._to_cuda(torch.LongTensor(range(self.ent_tot)), self.use_gpu)\n test_t = self._to_cuda(torch.LongTensor([t] * self.ent_tot), self.use_gpu)\n test_r = self._to_cuda(torch.LongTensor([r] * self.ent_tot), self.use_gpu)\n res = self._predict(test_h, test_t, test_r).reshape(-1).argsort()[:k]\n return list(res)\n\n def _predict_tail_entity(self, h: int, r: int, k: int) -> list:\n \"\"\"\n This method predicts the top k tail entities given head entity and relation.\n :param h: head entity id\n :param r: relation id\n :param k: top k tail entities\n :return: k possible entity ids\n \"\"\"\n test_h = self._to_cuda(torch.LongTensor([h] * self.ent_tot), self.use_gpu)\n test_t = self._to_cuda(torch.LongTensor(range(self.ent_tot)), self.use_gpu)\n test_r = self._to_cuda(torch.LongTensor([r] * self.ent_tot), self.use_gpu)\n res = self._predict(test_h, test_t, test_r).reshape(-1).argsort()[:k]\n return list(res)\n\n def _predict_relation(self, h: int, t: int, k: int) -> list:\n \"\"\"\n This methods predict the relation id given head entity and tail entity.\n :param h: head entity id\n :param t: tail entity id\n :param k: top k relations\n :return: k possible relation ids\n \"\"\"\n test_h = self._to_cuda(torch.LongTensor([h] * self.rel_tot), self.use_gpu)\n test_t = self._to_cuda(torch.LongTensor([t] * self.rel_tot), self.use_gpu)\n test_r = self._to_cuda(torch.LongTensor(range(self.rel_tot)), self.use_gpu)\n res = self._predict(test_h, test_t, test_r).reshape(-1).argsort()[:k]\n return list(res)\n\n def _predict_triple(self, h: int, t: int, r: int, thresh: float) -> bool:\n \"\"\"\n This method tells you whether the given triple (h, t, r) is correct of wrong\n :param h: head entity id\n :param t: tail entity id\n :param r: relation id\n :param thresh: threshold for the triple\n :return:\n \"\"\"\n test_h = self._to_cuda(torch.LongTensor([h]), self.use_gpu)\n test_t = self._to_cuda(torch.LongTensor([t]), self.use_gpu)\n test_r = self._to_cuda(torch.LongTensor([r]), self.use_gpu)\n res = self._predict(test_h, test_t, test_r)[0]\n return res < thresh\n\n def _to_cuda(self, t: torch.Tensor, use_gpu: bool) -> torch.Tensor:\n if use_gpu:\n return t.cuda()\n else:\n return t\n\n def _predict(self, batch_h: torch.LongTensor, batch_t: torch.LongTensor, batch_r: torch.LongTensor, mode: str = None) -> np.ndarray:\n \"\"\"\n :param batch_h: head entity ids\n :param batch_t: tail entity ids\n :param batch_r: relation ids\n :param mode:\n :return: scores\n \"\"\"\n data = {\n 'batch_h': batch_h,\n 'batch_t': batch_t,\n 'batch_r': batch_r,\n 'mode': mode\n }\n return self.model.predict(data)\n\n def _get_ent_rel_map(self, entity2id_path: str, relation2id_path: str) -> (dict, dict, dict, dict):\n entity2id_map = {}\n id2entity_map = {}\n relation2id_map = {}\n id2relation_map = {}\n with open(entity2id_path, 'r') as f:\n rows = f.read().split('\\n')[1:]\n for row in rows:\n items = row.split('\\t')\n if len(items) != 2:\n continue\n entity = str(items[0])\n id = int(items[1])\n entity2id_map[entity] = id\n id2entity_map[id] = entity\n with open(relation2id_path, 'r') as f:\n rows = f.read().split('\\n')[1:]\n for row in rows:\n items = row.split('\\t')\n if len(items) != 2:\n continue\n relation = str(items[0])\n id = int(items[1])\n relation2id_map[relation] = id\n id2relation_map[id] = relation\n return entity2id_map, id2entity_map, relation2id_map, id2relation_map\n\n def _get_model_constructor(self, model_name: str) -> Model:\n models = {\n 'transe': TransE,\n 'transh': TransH,\n 'transd': TransD,\n }\n if model_name in models:\n return models[model_name]\n else:\n raise NotImplementedError\n\n def _get_ent_rel_embedding(self, paramters_path: str):\n with open(paramters_path, 'r') as f:\n params = json.load(f)\n ent_embedding = params['ent_embeddings.weight']\n rel_embedding = params['rel_embeddings.weight']\n return ent_embedding, rel_embedding\n\nif __name__ == '__main__':\n import os\n\n curr_dir = os.path.split(os.path.abspath(__file__))[0]\n predictor = ModelPredictor('transe',\n '%s/../checkpoint/gspace/1/transe.param' % curr_dir,\n '%s/../benchmarks/gspace/1/entity2id.txt' % curr_dir,\n '%s/../benchmarks/gspace/1/relation2id.txt' % curr_dir,\n False)\n res = predictor.predict_head_entity(440, 13, 10)\n # res = predictor.predict_tail_entity(439, 13, 10)\n # res = predictor.predict_triple(439, 440, 13, 10)\n print(res)"
},
{
"alpha_fraction": 0.6922303438186646,
"alphanum_fraction": 0.7005280256271362,
"avg_line_length": 44.68965530395508,
"blob_id": "2f34ad5bb8dcee1b910c8a1a45fdfe0092fc6081",
"content_id": "f376559b043355cfbe5c47b45ca1afec9bfee5a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3977,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 87,
"path": "/sh/EmbeddingServer.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "from concurrent import futures\nfrom google.protobuf import wrappers_pb2 as wrappers\nimport grpc\nimport time\n\nfrom protos import embedding_pb2_grpc as embedding_pb2_grpc, embedding_pb2 as embedding_pb2\nfrom config.config_loader import config_loader\nfrom sh.ModelLoader import ModelLoader\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\nclass EmbeddingServicer(embedding_pb2_grpc.GraphEmbeddingServiceServicer):\n\n def __init__(self, model_loader: ModelLoader):\n self.model_loader = model_loader\n\n def predictHead(self, request: embedding_pb2.PredictHeadRequest, context) -> embedding_pb2.PredictPartResponse:\n print('[%s] predictHead\\n' % time.time(), request)\n model = self.model_loader.get_model(request.gid, request.modelName)\n res = model.predict_head_entity(request.tail, request.relation, request.k)\n return embedding_pb2.PredictPartResponse(val=res)\n\n def predictTail(self, request: embedding_pb2.PredictTailRequest, context) -> embedding_pb2.PredictPartResponse:\n print('[%s] predictTail\\n' % time.time(), request)\n model = self.model_loader.get_model(request.gid, request.modelName)\n res = model.predict_tail_entity(request.head, request.relation, request.k)\n return embedding_pb2.PredictPartResponse(val=res)\n\n def predictRelation(self, request: embedding_pb2.PredictRelationRequest, context) -> embedding_pb2.PredictPartResponse:\n print('[%s] predictRelation\\n' % time.time(), request)\n model = self.model_loader.get_model(request.gid, request.modelName)\n res = model.predict_relation(request.head, request.tail, request.k)\n return embedding_pb2.PredictPartResponse(val=res)\n\n def predictTriple(self, request: embedding_pb2.PredictTripleRequest, context) -> wrappers.BoolValue:\n print('[%s] predictTriple\\n' % time.time(), request)\n model = self.model_loader.get_model(request.gid, request.modelName)\n res = model.predict_triple(request.head, request.tail, request.relation, request.thresh)\n return wrappers.BoolValue(value=res)\n\n def getEntityEmbedding(self, request: embedding_pb2.GetEmbeddingRequest, context) -> embedding_pb2.GetEmbeddingResponse:\n print('[%s] getEntityEmbedding\\n' % time.time(), request)\n model = self.model_loader.get_model(request.gid, request.modelName)\n res = model.get_ent_embedding(request.val)\n return embedding_pb2.GetEmbeddingResponse(val=res)\n\n def getRelationEmbedding(self, request: embedding_pb2.GetEmbeddingRequest, context) -> embedding_pb2.GetEmbeddingResponse:\n print('[%s] getRelationEmbedding\\n' % time.time(), request)\n model = self.model_loader.get_model(request.gid, request.modelName)\n res = model.get_rel_embedding(request.val)\n return embedding_pb2.GetEmbeddingResponse(val=res)\n\n\ndef serve(model_loader: ModelLoader):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n embedding_pb2_grpc.add_GraphEmbeddingServiceServicer_to_server(\n EmbeddingServicer(model_loader), server\n )\n port = config_loader.get_config()['grpc']['port']\n server.add_insecure_port('[::]:%d' % port)\n server.start()\n print('start serve on [::]:%d' % port)\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n print('stop serve...')\n server.stop(0)\n\n\ndef update_model(model_loader: ModelLoader):\n try:\n while True:\n time.sleep(config_loader.get_config()['update_interval'])\n if model_loader.check_update():\n print('[%s] updating model...' % time.time())\n model_loader.model_map = model_loader.load()\n except KeyboardInterrupt:\n print('stop udpate model')\n\n\nif __name__ == '__main__':\n model_loader = ModelLoader(config_loader.get_config()['gpu'])\n executor = futures.ThreadPoolExecutor(max_workers=2)\n executor.submit(serve, model_loader)\n update_model(model_loader)\n\n\n"
},
{
"alpha_fraction": 0.5583809018135071,
"alphanum_fraction": 0.5656460523605347,
"avg_line_length": 34.05454635620117,
"blob_id": "a21f3a31773e76d063e694fef69329ba862a4aa6",
"content_id": "0514eef8b395341b8357982c6919f664c31baee3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1927,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 55,
"path": "/Utils.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\nimport pymysql\n\nfrom config.config_loader import config_loader\n\n\nclass RequestUtils:\n\n def __init__(self):\n server_config = config_loader.get_config()['server']\n self.protocol = server_config['protocol']\n self.host = server_config['host']\n self.port = server_config['port']\n\n def post(self, path, data, cookies=None):\n url = '%s://%s:%s/%s' % (self.protocol, self.host, self.port, path)\n data_str = json.dumps(data, ensure_ascii=False).encode('utf-8')\n res = requests.post(url, data=data_str, cookies=cookies)\n return res\n\n def get(self, path, params, cookies=None):\n url = '%s://%s:%s/%s' % (self.protocol, self.host, self.port, path)\n res = requests.get(url, params=params, cookies=cookies)\n return res\n\n\nclass MysqlUtils:\n\n def __init__(self):\n mysql_config = config_loader.get_config()['mysql']\n self.db = pymysql.connect(host=mysql_config['host'],\n port=mysql_config['port'],\n database=mysql_config['database'],\n user=mysql_config['username'],\n password=mysql_config['password'],\n max_allowed_packet=1024*1024*1024,\n cursorclass=pymysql.cursors.DictCursor)\n\n def execute(self, sql: str, args: list = None, expect_rows: int = 1) -> bool:\n with self.db.cursor() as cursor:\n affected_rows = cursor.execute(sql, args)\n self.db.commit()\n return affected_rows == expect_rows\n\n def query(self, sql: str, args: list = None) -> list:\n with self.db.cursor() as cursor:\n cursor.execute(sql, args)\n result = list(cursor.fetchall())\n self.db.commit()\n return result\n\n\nrequest_utils = RequestUtils()\nmysql_utils = MysqlUtils()"
},
{
"alpha_fraction": 0.47941964864730835,
"alphanum_fraction": 0.4980286955833435,
"avg_line_length": 35.128204345703125,
"blob_id": "ae4aec289e8aee997b15d5fb31b020cb234722b4",
"content_id": "e42b1353af4b6e2486d03674f3ef62ec90bb9dee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12824,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 351,
"path": "/sh/TrainJob.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport shutil\nimport json\n\nfrom sh.ModelControllers import model_constructor\nfrom Utils import mysql_utils\nfrom config.config_loader import config_loader\n\ncurr_dir = os.path.split(os.path.abspath(__file__))[0]\n\n\nclass TrainJob:\n\n def __init__(self, triples: list, model_name: str, gspace_id: int, uuid: str, use_gpu: bool = True):\n self.triples = triples\n self.model_name = model_name\n self.gspace_id = gspace_id\n self.uuid = uuid\n self.use_gpu = use_gpu\n\n benchmarks = config_loader.get_config()['path']['benchmarks']\n checkpoint = config_loader.get_config()['path']['checkpoint']\n\n self.BENCHMARK_DIRPATH = '%s/../%s/%s' % (curr_dir, benchmarks, gspace_id)\n self.ENTITY2ID_PATH = '%s/entity2id.txt' % self.BENCHMARK_DIRPATH\n self.RELATION2ID_PATH = '%s/relation2id.txt' % self.BENCHMARK_DIRPATH\n self.TRAIN2ID_PATH = '%s/train2id.txt' % self.BENCHMARK_DIRPATH\n self.VALID2ID_PATH = '%s/valid2id.txt' % self.BENCHMARK_DIRPATH\n self.TEST2ID_PATH = '%s/test2id.txt' % self.BENCHMARK_DIRPATH\n self.CHECKPOINT_DIRPATH = '%s/../%s/gspace/%s' % (curr_dir, checkpoint, gspace_id)\n \n self.model_constructor = model_constructor(model_name)\n\n def run(self):\n \"\"\"\n S1 准备数据\n S2 训练\n S3 测试\n S4 上传结果\n :return:\n \"\"\"\n \"\"\"\n {'trainTriples': [], 'modelName': 'transe', 'gid': 4, 'uuid': '2f6963fc-59c9-4b2b-933c-1b9714c6120f'}\n train num 1, test num 1\n #train: 0, #valid: 0, #test: 0\n Input Files Path : /root/shmodel_copy/sh/../benchmarks/gspace/4/\n The toolkit is importing datasets.\n The total of relations is 0.\n The total of entities is 0.\n The total of train triples is 1.\n Input Files Path : /root/shmodel_copy/sh/../benchmarks/gspace/4/\n The total of test triples is 0.\n The total of valid triples is 0.\n bin/train_server.sh: line 2: 38132 Segmentation fault PYTHONPATH=. python sh/TrainJobQueueReceiver.py\n \"\"\"\n print('in train job')\n if len(self.triples) == 0:\n print('no training data')\n return\n self._prepare_data(self.triples, self.gspace_id)\n model = self.model_constructor(self.BENCHMARK_DIRPATH, self.CHECKPOINT_DIRPATH, self.use_gpu)\n model.train()\n model.test()\n self._upload_param(model.parameters_path)\n print('finish trian job')\n return\n\n def _upload_param(self, param_path: str) -> None:\n # TODO 使用上传文件方式,传入mysql 的param 文件不能过大\n\n print('prepare upload param...')\n with open(param_path, 'r') as f:\n params = f.read()\n with open(self.ENTITY2ID_PATH, 'r') as f:\n entity2id = f.read()\n with open(self.RELATION2ID_PATH, 'r') as f:\n relation2id = f.read()\n\n print('params len = %d' % len(params))\n mysql_utils.execute(\n 'update gspacemodelparam set params=%s where gid=%s and modelname=%s',\n [params, self.gspace_id, self.model_name])\n print('upload_param gid[%d] model_name[%s]' % (self.gspace_id, self.model_name))\n\n mysql_utils.execute(\n 'update gspacemodelparam set entity2id=%s where gid=%s and modelname=%s',\n [entity2id, self.gspace_id, self.model_name])\n print('update entity2id')\n mysql_utils.execute(\n 'update gspacemodelparam set relation2id=%s where gid=%s and modelname=%s',\n [relation2id, self.gspace_id, self.model_name])\n print('update relation2id')\n\n\n mysql_utils.execute(\n 'update gspacemodelparam set available=true where gid=%s and modelname=%s',\n [self.gspace_id, self.model_name])\n print('update available=true')\n # request_path = 'embed/gspace/%d/model/%s' % (self.gspace_id, self.model_name)\n # data = {\n # 'uuid': self.uuid,\n # 'param': param\n # }\n # response = request_utils.post(request_path, data)\n # print('upload_param: ', response)\n\n def _prepare_data(self, triples: list, gspace_id: int):\n \"\"\"\n 准备训练、测试数据\n 在benchmarks/gspace/<gspace_id> 目录下生成:\n entity2id.txt, relation2id.txt, train2id.txt, valid2id.txt, test2id.txt\n type_constraint.txt, 1-1.txt, 1-n.txt, n-1.txt, n-n.txt, test2id_all.txt\n :param triples: triples [[head, tail, relType], ...]\n :param gspace_id: 图空间id\n :return:\n \"\"\"\n TRAIN_RATIO = 0.8\n TEST_RATIO = 0.1\n TRAIN_NUM = max(int(len(triples) * TRAIN_RATIO), 1)\n TEST_NUM = max(int(len(triples) * TEST_RATIO), 1)\n if os.path.exists(self.BENCHMARK_DIRPATH):\n shutil.rmtree(self.BENCHMARK_DIRPATH)\n os.makedirs(self.BENCHMARK_DIRPATH)\n\n entities = set()\n rels = set()\n for [head, tail, rel] in triples:\n entities.add(head)\n entities.add(tail)\n rels.add(rel)\n entities = list(entities)\n rels = list(rels)\n\n entity2idmap = {}\n rel2idmap = {}\n with open(self.ENTITY2ID_PATH, 'w') as f:\n f.write('%d\\n' % len(entities))\n for idx in range(len(entities)):\n entity = entities[idx]\n entity2idmap[entity] = idx\n f.write('%s\\t%d\\n' % (entity, idx))\n del entities\n with open(self.RELATION2ID_PATH, 'w') as f:\n f.write('%d\\n' % len(rels))\n for idx in range(len(rels)):\n rel = rels[idx]\n rel2idmap[rel] = idx\n f.write('%s\\t%d\\n' % (rel, idx))\n del rels\n\n for idx in range(len(triples)):\n [head, tail, rel] = triples[idx]\n triples[idx] = [\n entity2idmap[head],\n entity2idmap[tail],\n rel2idmap[rel]\n ]\n triples = np.random.permutation(triples)\n print('train num %d, test num %d' % (TRAIN_NUM, TEST_NUM))\n train_triples = triples[: TRAIN_NUM]\n test_triples = triples[TRAIN_NUM : TRAIN_NUM + TEST_NUM]\n valid_triples = triples[-TEST_NUM:]\n print('#train: %d, #valid: %d, #test: %d' % (len(train_triples), len(valid_triples), len(test_triples)))\n del triples\n\n with open(self.TRAIN2ID_PATH, 'w') as f:\n f.write('%d\\n' % len(train_triples))\n for [headid, tailid, relid] in train_triples:\n f.write('%d %d %d\\n' % (headid, tailid, relid))\n with open(self.VALID2ID_PATH, 'w') as f:\n f.write('%d\\n' % len(valid_triples))\n for [headid, tailid, relid] in valid_triples:\n f.write('%d %d %d\\n' % (headid, tailid, relid))\n with open(self.TEST2ID_PATH, 'w') as f:\n f.write('%d\\n' % len(test_triples))\n for [headid, tailid, relid] in test_triples:\n f.write('%d %d %d\\n' % (headid, tailid, relid))\n del train_triples, valid_triples, test_triples\n\n self._nn_prepare_data(self.BENCHMARK_DIRPATH)\n\n @staticmethod\n def _nn_prepare_data(benchmark_gspace_dir_path: str):\n \"\"\"\n 准备测试、验证数据\n 已有train2id.txt, valid2id.txt, test2id.txt\n 生成 type_constraint.txt, 1-1.txt, 1-n.txt, n-1.txt, n-n.txt, test2id_all.txt\n :param benchmark_gspace_dir_path: 存放上述文件的路径\n :return:\n \"\"\"\n lef = {}\n rig = {}\n rellef = {}\n relrig = {}\n\n triple = open(\"%s/train2id.txt\" % benchmark_gspace_dir_path, \"r\")\n valid = open(\"%s/valid2id.txt\" % benchmark_gspace_dir_path, \"r\")\n test = open(\"%s/test2id.txt\" % benchmark_gspace_dir_path, \"r\")\n\n tot = (int)(triple.readline())\n for i in range(tot):\n content = triple.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n tot = (int)(valid.readline())\n for i in range(tot):\n content = valid.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n tot = (int)(test.readline())\n for i in range(tot):\n content = test.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n test.close()\n valid.close()\n triple.close()\n\n f = open(\"%s/type_constrain.txt\" % benchmark_gspace_dir_path, \"w\")\n f.write(\"%d\\n\" % (len(rellef)))\n for i in rellef:\n f.write(\"%s\\t%d\" % (i, len(rellef[i])))\n for j in rellef[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.write(\"%s\\t%d\" % (i, len(relrig[i])))\n for j in relrig[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.close()\n\n rellef = {}\n totlef = {}\n relrig = {}\n totrig = {}\n # lef: (h, r)\n # rig: (r, t)\n for i in lef:\n if not i[1] in rellef:\n rellef[i[1]] = 0\n totlef[i[1]] = 0\n rellef[i[1]] += len(lef[i])\n totlef[i[1]] += 1.0\n\n for i in rig:\n if not i[0] in relrig:\n relrig[i[0]] = 0\n totrig[i[0]] = 0\n relrig[i[0]] += len(rig[i])\n totrig[i[0]] += 1.0\n\n s11 = 0\n s1n = 0\n sn1 = 0\n snn = 0\n f = open(\"%s/test2id.txt\" % benchmark_gspace_dir_path, \"r\")\n tot = (int)(f.readline())\n for i in range(tot):\n content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if (rign < 1.5 and lefn < 1.5):\n s11 += 1\n if (rign >= 1.5 and lefn < 1.5):\n s1n += 1\n if (rign < 1.5 and lefn >= 1.5):\n sn1 += 1\n if (rign >= 1.5 and lefn >= 1.5):\n snn += 1\n f.close()\n\n f = open(\"%s/test2id.txt\" % benchmark_gspace_dir_path, \"r\")\n f11 = open(\"%s/1-1.txt\" % benchmark_gspace_dir_path, \"w\")\n f1n = open(\"%s/1-n.txt\" % benchmark_gspace_dir_path, \"w\")\n fn1 = open(\"%s/n-1.txt\" % benchmark_gspace_dir_path, \"w\")\n fnn = open(\"%s/n-n.txt\" % benchmark_gspace_dir_path, \"w\")\n fall = open(\"%s/test2id_all.txt\" % benchmark_gspace_dir_path, \"w\")\n tot = (int)(f.readline())\n fall.write(\"%d\\n\" % (tot))\n f11.write(\"%d\\n\" % (s11))\n f1n.write(\"%d\\n\" % (s1n))\n fn1.write(\"%d\\n\" % (sn1))\n fnn.write(\"%d\\n\" % (snn))\n for i in range(tot):\n content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if (rign < 1.5 and lefn < 1.5):\n f11.write(content)\n fall.write(\"0\" + \"\\t\" + content)\n if (rign >= 1.5 and lefn < 1.5):\n f1n.write(content)\n fall.write(\"1\" + \"\\t\" + content)\n if (rign < 1.5 and lefn >= 1.5):\n fn1.write(content)\n fall.write(\"2\" + \"\\t\" + content)\n if (rign >= 1.5 and lefn >= 1.5):\n fnn.write(content)\n fall.write(\"3\" + \"\\t\" + content)\n fall.close()\n f.close()\n f11.close()\n f1n.close()\n fn1.close()\n fnn.close()\n\nif __name__ == '__main__':\n l = []\n for i in range(1000):\n l.append([i, i + 1, 'like%d' % (i % 20)])\n job = TrainJob(l, 'transe', 1, 'f35a7da8-49e4-43ec-aa75-e67e6d935f69')\n job.run()\n\n"
},
{
"alpha_fraction": 0.7833333611488342,
"alphanum_fraction": 0.7833333611488342,
"avg_line_length": 29.5,
"blob_id": "ace09ec8845bc4eea2d8422a7efb71edcb7eff3a",
"content_id": "d62f2a5107cb2c3c10bad50e711d420bed4af830",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 2,
"path": "/bin/monitor.sh",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nPYTHONPATH=. python sh/ServiceMonitor.py"
},
{
"alpha_fraction": 0.7286324501037598,
"alphanum_fraction": 0.7336182594299316,
"avg_line_length": 41.54545593261719,
"blob_id": "4d34012bfcfa29bfbb428b9d0dbc58e51385b7a7",
"content_id": "c43586d25ec96c598a25d57dfb5639e44cbe0bf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5682,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 132,
"path": "/protos/embedding_pb2_grpc.py",
"repo_name": "Beim/shmodel",
"src_encoding": "UTF-8",
"text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom protos import embedding_pb2 as embedding__pb2\nfrom google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2\n\n\nclass GraphEmbeddingServiceStub(object):\n # missing associated documentation comment in .proto file\n pass\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.predictHead = channel.unary_unary(\n '/com.ices.sh.embedding.rpc.GraphEmbeddingService/predictHead',\n request_serializer=embedding__pb2.PredictHeadRequest.SerializeToString,\n response_deserializer=embedding__pb2.PredictPartResponse.FromString,\n )\n self.predictTail = channel.unary_unary(\n '/com.ices.sh.embedding.rpc.GraphEmbeddingService/predictTail',\n request_serializer=embedding__pb2.PredictTailRequest.SerializeToString,\n response_deserializer=embedding__pb2.PredictPartResponse.FromString,\n )\n self.predictRelation = channel.unary_unary(\n '/com.ices.sh.embedding.rpc.GraphEmbeddingService/predictRelation',\n request_serializer=embedding__pb2.PredictRelationRequest.SerializeToString,\n response_deserializer=embedding__pb2.PredictPartResponse.FromString,\n )\n self.predictTriple = channel.unary_unary(\n '/com.ices.sh.embedding.rpc.GraphEmbeddingService/predictTriple',\n request_serializer=embedding__pb2.PredictTripleRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BoolValue.FromString,\n )\n self.getEntityEmbedding = channel.unary_unary(\n '/com.ices.sh.embedding.rpc.GraphEmbeddingService/getEntityEmbedding',\n request_serializer=embedding__pb2.GetEmbeddingRequest.SerializeToString,\n response_deserializer=embedding__pb2.GetEmbeddingResponse.FromString,\n )\n self.getRelationEmbedding = channel.unary_unary(\n '/com.ices.sh.embedding.rpc.GraphEmbeddingService/getRelationEmbedding',\n request_serializer=embedding__pb2.GetEmbeddingRequest.SerializeToString,\n response_deserializer=embedding__pb2.GetEmbeddingResponse.FromString,\n )\n\n\nclass GraphEmbeddingServiceServicer(object):\n # missing associated documentation comment in .proto file\n pass\n\n def predictHead(self, request, context):\n \"\"\"预测头实体\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def predictTail(self, request, context):\n \"\"\"预测尾实体\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def predictRelation(self, request, context):\n \"\"\"预测关系类型\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def predictTriple(self, request, context):\n \"\"\"预测三元组是否正确\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def getEntityEmbedding(self, request, context):\n \"\"\"获取实体embedding\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def getRelationEmbedding(self, request, context):\n \"\"\"获取关系embedding\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_GraphEmbeddingServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'predictHead': grpc.unary_unary_rpc_method_handler(\n servicer.predictHead,\n request_deserializer=embedding__pb2.PredictHeadRequest.FromString,\n response_serializer=embedding__pb2.PredictPartResponse.SerializeToString,\n ),\n 'predictTail': grpc.unary_unary_rpc_method_handler(\n servicer.predictTail,\n request_deserializer=embedding__pb2.PredictTailRequest.FromString,\n response_serializer=embedding__pb2.PredictPartResponse.SerializeToString,\n ),\n 'predictRelation': grpc.unary_unary_rpc_method_handler(\n servicer.predictRelation,\n request_deserializer=embedding__pb2.PredictRelationRequest.FromString,\n response_serializer=embedding__pb2.PredictPartResponse.SerializeToString,\n ),\n 'predictTriple': grpc.unary_unary_rpc_method_handler(\n servicer.predictTriple,\n request_deserializer=embedding__pb2.PredictTripleRequest.FromString,\n response_serializer=google_dot_protobuf_dot_wrappers__pb2.BoolValue.SerializeToString,\n ),\n 'getEntityEmbedding': grpc.unary_unary_rpc_method_handler(\n servicer.getEntityEmbedding,\n request_deserializer=embedding__pb2.GetEmbeddingRequest.FromString,\n response_serializer=embedding__pb2.GetEmbeddingResponse.SerializeToString,\n ),\n 'getRelationEmbedding': grpc.unary_unary_rpc_method_handler(\n servicer.getRelationEmbedding,\n request_deserializer=embedding__pb2.GetEmbeddingRequest.FromString,\n response_serializer=embedding__pb2.GetEmbeddingResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'com.ices.sh.embedding.rpc.GraphEmbeddingService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n"
}
] | 15 |
psalmankhan/ar_enterprises | https://github.com/psalmankhan/ar_enterprises | 259c9150a83a3b2c0ca3a808b14637e5a885f599 | 0b79e1bf1f8915f2f5466afef5b4b67e034d0552 | b7459adf63e0ad8eccfb75d52e119f7c5ce7c542 | refs/heads/main | 2023-04-19T00:10:51.539829 | 2021-05-11T09:41:00 | 2021-05-11T09:41:00 | 365,925,202 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4866666793823242,
"alphanum_fraction": 0.5081481337547302,
"avg_line_length": 32.75,
"blob_id": "465eb56c0704bc529b0f9aea0f5585d1d8d91823",
"content_id": "6ffc621d0432a430aff44d632c3874ce095cf6a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1350,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 40,
"path": "/core/migrations/0001_initial.py",
"repo_name": "psalmankhan/ar_enterprises",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2 on 2021-05-09 18:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('email', models.EmailField(max_length=50)),\n ('message', models.TextField()),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('email', models.EmailField(max_length=50)),\n ('phone', models.CharField(max_length=10)),\n ('total', models.CharField(max_length=4)),\n ('brand', models.CharField(choices=[('JSW', 'JSW'), ('Dalmia', 'Dalmia'), ('Ultratech', 'Ultratech')], default='JSW', max_length=30)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.61328125,
"alphanum_fraction": 0.6263020634651184,
"avg_line_length": 20.94285774230957,
"blob_id": "c8fbbd763341ec0f0fc2545dae769c2f9a438b17",
"content_id": "7953a5554288f6f8bc9e7b896e73a69c6d355dee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 768,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 35,
"path": "/core/models.py",
"repo_name": "psalmankhan/ar_enterprises",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\nclass CommonInfo(models.Model):\n name = models.CharField(max_length=100)\n email = models.EmailField(max_length=50)\n class Meta:\n abstract = True\n\nclass Order(CommonInfo):\n\n JSW = 'JSW'\n Dalmia = 'Dalmia'\n Ultratech = 'Ultratech'\n\n CEMENT_CHOICES = [\n (JSW, 'JSW'),\n (Dalmia, 'Dalmia'),\n (Ultratech, 'Ultratech'),\n ]\n\n phone = models.CharField(max_length=10)\n total = models.CharField(max_length=4)\n brand = models.CharField(max_length=30, choices=CEMENT_CHOICES, default=JSW,)\n\n def __unicode__(self):\n return self.name\n\nclass Contact(CommonInfo):\n\n message = models.TextField()\n\n def __unicode__(self):\n return self.name\n"
},
{
"alpha_fraction": 0.6527272462844849,
"alphanum_fraction": 0.6600000262260437,
"avg_line_length": 31.352941513061523,
"blob_id": "0c6e5f540751f366be5a57835cc7af91a105f99a",
"content_id": "c656d8673c416381aed6ac1e70f1acc8ce1d2e8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 17,
"path": "/core/admin.py",
"repo_name": "psalmankhan/ar_enterprises",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Order, Contact\n\n# Register your models here.\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n list_display = ('name', 'email', 'phone', 'total', 'brand')\n list_display_links = ('name', 'email')\n search_fields = ('name', 'email', 'brand')\n list_per_page = 25\n\[email protected](Contact)\nclass ContactAdmin(admin.ModelAdmin):\n list_display = ('name', 'email', 'message')\n list_display_links = ('name', 'email')\n search_fields = ('name', 'email')\n list_per_page = 25\n"
},
{
"alpha_fraction": 0.6249409317970276,
"alphanum_fraction": 0.6249409317970276,
"avg_line_length": 33.704917907714844,
"blob_id": "3b2d5463fab3624b5277cd9832c6db830b9a60fa",
"content_id": "5b41093e9bfa502fa5fc8886acb8f2f2a7b66951",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2117,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 61,
"path": "/core/views.py",
"repo_name": "psalmankhan/ar_enterprises",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect\nfrom .models import Order, Contact\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\n\ndef home(request):\n return render(request, 'home.html')\n\ndef about(request):\n return render(request, 'about.html')\n\ndef order(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n total = request.POST.get('total')\n brand = request.POST.get('brand')\n\n order_form = Order(name=name, email=email, phone=phone,\n total=total, brand=brand)\n\n # admin_info = User.objects.get(is_superuser=True)\n # admin_email = admin_info.email\n\n # send_mail(\n # subject='Order Received',\n # message='You received an order. Please check admin section',\n # from_email=settings.EMAIL_HOST_USER,\n # recipient_list=[admin_email],\n # fail_silently=False,\n # )\n order_form.save()\n messages.success(request, 'Your order request has been submitted successfully. We will get back you shortly')\n return render(request, 'order.html')\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email = request.POST.get('email')\n message = request.POST.get('message')\n\n contact_form = Contact(name=name, email=email, message=message)\n\n # admin_info = User.objects.get(is_superuser=True)\n # admin_email = admin_info.email\n\n # send_mail(\n # subject='Enquiry',\n # message='You received a message. Please check admin section',\n # from_email=settings.EMAIL_HOST_USER,\n # recipient_list=[admin_email],\n # fail_silently=False,\n # )\n\n contact_form.save()\n\n messages.success(request, 'Your message has been submitted successfully. We will get back you shortly')\n return render(request, 'contact.html')\n"
}
] | 4 |
seatgeek/dd-trace-py | https://github.com/seatgeek/dd-trace-py | 718a6cb7725f38733c04e5a45a08dfc15e116f69 | 1b5c37b5e006e9c71ea3cf0e3a349478dc73617d | d71af3a5d888fac12dabd5345daf60d2a419c20c | refs/heads/master | 2023-08-31T19:06:17.726120 | 2020-06-29T15:07:43 | 2020-06-29T15:07:43 | 246,694,817 | 1 | 0 | NOASSERTION | 2020-03-11T22:45:07 | 2020-06-29T20:33:15 | 2020-06-29T21:06:06 | Python | [
{
"alpha_fraction": 0.6007882356643677,
"alphanum_fraction": 0.6021018624305725,
"avg_line_length": 31.469194412231445,
"blob_id": "cc88cc1d389f7e9816b39f35b1a2c3d12889475a",
"content_id": "e1fece5be3dc4ed42350298bb2074bc5494ca17a",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6856,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 211,
"path": "/ddtrace/profiling/_periodic.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\nimport sys\nimport threading\n\nfrom ddtrace.profiling import _service\nfrom ddtrace.vendor import attr\nfrom ddtrace.vendor import six\n\n\nPERIODIC_THREAD_IDS = set()\n\n\nclass PeriodicThread(threading.Thread):\n \"\"\"Periodic thread.\n\n This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`\n seconds.\n\n \"\"\"\n\n def __init__(self, interval, target, name=None, on_shutdown=None):\n \"\"\"Create a periodic thread.\n\n :param interval: The interval in seconds to wait between execution of the periodic function.\n :param target: The periodic function to execute every interval.\n :param name: The name of the thread.\n :param on_shutdown: The function to call when the thread shuts down.\n \"\"\"\n super(PeriodicThread, self).__init__(name=name)\n self._target = target\n self._on_shutdown = on_shutdown\n self.interval = interval\n self.quit = threading.Event()\n self.daemon = True\n\n def stop(self):\n \"\"\"Stop the thread.\"\"\"\n self.quit.set()\n\n def run(self):\n \"\"\"Run the target function periodically.\"\"\"\n PERIODIC_THREAD_IDS.add(self.ident)\n\n try:\n while not self.quit.wait(self.interval):\n self._target()\n if self._on_shutdown is not None:\n self._on_shutdown()\n finally:\n PERIODIC_THREAD_IDS.remove(self.ident)\n\n\nclass _GeventPeriodicThread(PeriodicThread):\n \"\"\"Periodic thread.\n\n This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`\n seconds.\n\n \"\"\"\n\n # That's the value Python 2 uses in its `threading` module\n SLEEP_INTERVAL = 0.005\n\n def __init__(self, interval, target, name=None, on_shutdown=None):\n \"\"\"Create a periodic thread.\n\n :param interval: The interval in seconds to wait between execution of the periodic function.\n :param target: The periodic function to execute every interval.\n :param name: The name of the thread.\n :param on_shutdown: The function to call when the thread shuts down.\n \"\"\"\n super(_GeventPeriodicThread, self).__init__(interval, target, name, on_shutdown)\n import gevent.monkey\n\n self._sleep = gevent.monkey.get_original(\"time\", \"sleep\")\n try:\n # Python ≥ 3.8\n self._get_native_id = gevent.monkey.get_original(\"threading\", \"get_native_id\")\n except AttributeError:\n self._get_native_id = None\n self._tident = None\n\n @property\n def ident(self):\n return self._tident\n\n def start(self):\n \"\"\"Start the thread.\"\"\"\n import gevent.monkey\n\n start_new_thread = gevent.monkey.get_original(six.moves._thread.__name__, \"start_new_thread\")\n\n self.quit = False\n self.has_quit = False\n threading._limbo[self] = self\n try:\n self._tident = start_new_thread(self.run, tuple())\n except Exception:\n del threading._limbo[self]\n if self._get_native_id:\n self._native_id = self._get_native_id()\n\n def join(self, timeout=None):\n # FIXME: handle the timeout argument\n while not self.has_quit:\n self._sleep(self.SLEEP_INTERVAL)\n\n def stop(self):\n \"\"\"Stop the thread.\"\"\"\n self.quit = True\n\n def run(self):\n \"\"\"Run the target function periodically.\"\"\"\n PERIODIC_THREAD_IDS.add(self._tident)\n\n with threading._active_limbo_lock:\n threading._active[self._tident] = self\n del threading._limbo[self]\n try:\n while self.quit is False:\n self._target()\n slept = 0\n while self.quit is False and slept < self.interval:\n self._sleep(self.SLEEP_INTERVAL)\n slept += self.SLEEP_INTERVAL\n if self._on_shutdown is not None:\n self._on_shutdown()\n except Exception:\n # Exceptions might happen during interpreter shutdown.\n # We're mimicking what `threading.Thread` does in daemon mode, we ignore them.\n # See `threading.Thread._bootstrap` for details.\n if sys is not None:\n raise\n finally:\n try:\n self.has_quit = True\n del threading._active[self._tident]\n PERIODIC_THREAD_IDS.remove(self._tident)\n except Exception:\n # Exceptions might happen during interpreter shutdown.\n # We're mimicking what `threading.Thread` does in daemon mode, we ignore them.\n # See `threading.Thread._bootstrap` for details.\n if sys is not None:\n raise\n\n\ndef PeriodicRealThread(*args, **kwargs):\n \"\"\"Create a PeriodicRealThread based on the underlying thread implementation (native, gevent, etc).\n\n This is exactly like PeriodicThread, except that it runs on a *real* OS thread. Be aware that this might be tricky\n in e.g. the gevent case, where Lock object must not be shared with the MainThread (otherwise it'd dead lock).\n\n \"\"\"\n if \"gevent\" in sys.modules:\n import gevent.monkey\n\n if gevent.monkey.is_module_patched(\"threading\"):\n return _GeventPeriodicThread(*args, **kwargs)\n return PeriodicThread(*args, **kwargs)\n\n\[email protected]\nclass PeriodicService(_service.Service):\n \"\"\"A service that runs periodically.\"\"\"\n\n _interval = attr.ib()\n _worker = attr.ib(default=None, init=False, repr=False)\n\n _real_thread = False\n \"Class variable to override if the service should run in a real OS thread.\"\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, value):\n self._interval = value\n # Update the interval of the PeriodicThread based on ours\n if self._worker:\n self._worker.interval = value\n\n def start(self):\n \"\"\"Start the periodic service.\"\"\"\n super(PeriodicService, self).start()\n periodic_thread_class = PeriodicRealThread if self._real_thread else PeriodicThread\n self._worker = periodic_thread_class(\n self.interval,\n target=self.periodic,\n name=\"%s:%s\" % (self.__class__.__module__, self.__class__.__name__),\n on_shutdown=self.on_shutdown,\n )\n self._worker.start()\n\n def join(self, timeout=None):\n if self._worker:\n self._worker.join(timeout)\n\n def stop(self):\n \"\"\"Stop the periodic collector.\"\"\"\n if self._worker:\n self._worker.stop()\n super(PeriodicService, self).stop()\n\n @staticmethod\n def on_shutdown():\n pass\n\n @staticmethod\n def periodic():\n pass\n"
},
{
"alpha_fraction": 0.7328881621360779,
"alphanum_fraction": 0.7562604546546936,
"avg_line_length": 38.93333435058594,
"blob_id": "61db55fc08cd8bab8f349c0ae2ed22bd84a69078",
"content_id": "2b5fadf05c2491d376e22acb4c6f2051c3e1c3e5",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1198,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 30,
"path": "/.circleci/scripts/test_build.sh",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nset -eux -o pipefail\n\nif [[ \"$OSTYPE\" == \"msys\" ]]; then\n # Install python version and create a virtuallenv\n nuget install python -Version $PYTHON_VERSION -ExcludeVersion -OutputDirectory .\n ./python/tools/python.exe --version\n ./python/tools/python.exe -m pip install virtualenv\n ./python/tools/python.exe -m virtualenv env\n # When running script under Windows executor we need to activate the venv\n # created for the specific Python version\n source env/Scripts/activate\nfi\n\n# Install required dependencies\n# DEV: `pyopenssl` needed until the following PR is released\n# https://github.com/pypa/twine/pull/447\n# DEV: `wheel` is needed to run `bdist_wheel`\npip install twine readme_renderer[md] pyopenssl wheel cython\n# Ensure we didn't cache from previous runs\nrm -rf build/ dist/\n# Manually build any extensions to ensure they succeed\npython setup.py build_ext --force\n# Ensure source package will build\npython setup.py sdist\n# Ensure wheel will build\npython setup.py bdist_wheel\n# Ensure package long description is valid and will render\n# https://github.com/pypa/twine/tree/6c4d5ecf2596c72b89b969ccc37b82c160645df8#twine-check\ntwine check dist/*\n"
},
{
"alpha_fraction": 0.5478547811508179,
"alphanum_fraction": 0.5907590985298157,
"avg_line_length": 36.875,
"blob_id": "0de7f8420b95e3a7aade5968ab1523110d6178ce",
"content_id": "ec60a39369e679d48b68b0a602eb2729994f81ab",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 8,
"path": "/tests/unit/test_ext.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "from ddtrace.ext import aws\n\n\ndef test_flatten_dict():\n \"\"\"Ensure that flattening of a nested dict results in a normalized, 1-level dict\"\"\"\n d = dict(A=1, B=2, C=dict(A=3, B=4, C=dict(A=5, B=6)))\n e = dict(A=1, B=2, C_A=3, C_B=4, C_C_A=5, C_C_B=6)\n assert aws._flatten_dict(d, sep=\"_\") == e\n"
},
{
"alpha_fraction": 0.6841391921043396,
"alphanum_fraction": 0.6845910549163818,
"avg_line_length": 29.73611068725586,
"blob_id": "ee9e9643dc8135c8a55209e78ca2b36ed42194cc",
"content_id": "0b902cbea7ad04c3fb0a64ce181b964342c88e53",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2213,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 72,
"path": "/ddtrace/contrib/logging/patch.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport ddtrace\n\nfrom ...utils.wrappers import unwrap as _u\nfrom ...vendor.wrapt import wrap_function_wrapper as _w\n\nRECORD_ATTR_TRACE_ID = \"dd.trace_id\"\nRECORD_ATTR_SPAN_ID = \"dd.span_id\"\nRECORD_ATTR_ENV = \"dd.env\"\nRECORD_ATTR_VERSION = \"dd.version\"\nRECORD_ATTR_SERVICE = \"dd.service\"\nRECORD_ATTR_VALUE_ZERO = 0\nRECORD_ATTR_VALUE_EMPTY = \"\"\n\nddtrace.config._add(\"logging\", dict(tracer=None,)) # by default, override here for custom tracer\n\n\ndef _get_current_span(tracer=None):\n \"\"\"Helper to get the currently active span\"\"\"\n if not tracer:\n tracer = ddtrace.tracer\n\n if not tracer.enabled:\n return None\n\n return tracer.current_span()\n\n\ndef _w_makeRecord(func, instance, args, kwargs):\n # Get the LogRecord instance for this log\n record = func(*args, **kwargs)\n\n setattr(record, RECORD_ATTR_VERSION, ddtrace.config.version or \"\")\n setattr(record, RECORD_ATTR_ENV, ddtrace.config.env or \"\")\n setattr(record, RECORD_ATTR_SERVICE, ddtrace.config.service or \"\")\n\n # logs from internal logger may explicitly pass the current span to\n # avoid deadlocks in getting the current span while already in locked code.\n span_from_log = getattr(record, ddtrace.constants.LOG_SPAN_KEY, None)\n if isinstance(span_from_log, ddtrace.Span):\n span = span_from_log\n else:\n span = _get_current_span(tracer=ddtrace.config.logging.tracer)\n\n if span:\n setattr(record, RECORD_ATTR_TRACE_ID, span.trace_id)\n setattr(record, RECORD_ATTR_SPAN_ID, span.span_id)\n else:\n setattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_ZERO)\n setattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_ZERO)\n\n return record\n\n\ndef patch():\n \"\"\"\n Patch ``logging`` module in the Python Standard Library for injection of\n tracer information by wrapping the base factory method ``Logger.makeRecord``\n \"\"\"\n if getattr(logging, \"_datadog_patch\", False):\n return\n setattr(logging, \"_datadog_patch\", True)\n\n _w(logging.Logger, \"makeRecord\", _w_makeRecord)\n\n\ndef unpatch():\n if getattr(logging, \"_datadog_patch\", False):\n setattr(logging, \"_datadog_patch\", False)\n\n _u(logging.Logger, \"makeRecord\")\n"
},
{
"alpha_fraction": 0.5974895358085632,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 18.590164184570312,
"blob_id": "83d0f5f0ed82626a94282dc563a3cf99bdc349d1",
"content_id": "8a81e1061b9061ea067d3d2dc73113e53bb997e6",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1195,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 61,
"path": "/ddtrace/internal/runtime/__init__.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import os\nimport uuid\n\nfrom .runtime_metrics import (\n RuntimeTags,\n RuntimeMetrics,\n RuntimeWorker,\n)\n\n\n__all__ = [\n \"RuntimeTags\",\n \"RuntimeMetrics\",\n \"RuntimeWorker\",\n \"get_runtime_id\",\n]\n\n\ndef _generate_runtime_id():\n return uuid.uuid4().hex\n\n\n_RUNTIME_ID = _generate_runtime_id()\n\n\nif hasattr(os, \"register_at_fork\"):\n\n def _set_runtime_id():\n global _RUNTIME_ID\n _RUNTIME_ID = _generate_runtime_id()\n\n os.register_at_fork(after_in_child=_set_runtime_id)\n\n def get_runtime_id():\n return _RUNTIME_ID\n\n\nelse:\n # Non-POSIX systems or Python < 3.7\n import threading\n\n _RUNTIME_PID = os.getpid()\n\n def _set_runtime_id():\n global _RUNTIME_ID, _RUNTIME_PID\n _RUNTIME_ID = _generate_runtime_id()\n _RUNTIME_PID = os.getpid()\n\n _RUNTIME_LOCK = threading.Lock()\n\n def get_runtime_id():\n with _RUNTIME_LOCK:\n pid = os.getpid()\n if pid != _RUNTIME_PID:\n _set_runtime_id()\n return _RUNTIME_ID\n\n\nget_runtime_id.__doc__ = \"\"\"Return a unique string identifier for this runtime.\n\nDo not store this identifier as it can change when, e.g., the process forks.\"\"\"\n"
},
{
"alpha_fraction": 0.6390041708946228,
"alphanum_fraction": 0.6514523029327393,
"avg_line_length": 31.133333206176758,
"blob_id": "4dd25402e8ebd93b72b7b8ad9f80daf81097cbbd",
"content_id": "e4c814d4617f15ebbcd02ec9a05ffd45b794b607",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 15,
"path": "/tests/profiling/test_compat.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "\"\"\"Test compatibility with old ddtrace.profile name.\"\"\"\nimport subprocess\nimport os\n\n\ndef test_call_script():\n subp = subprocess.Popen(\n [\"python\", os.path.join(os.path.dirname(__file__), \"compat_program.py\")], stdout=subprocess.PIPE\n )\n stdout, stderr = subp.communicate()\n assert subp.wait() == 42\n hello, interval, stacks = stdout.decode().strip().split(\"\\n\")\n assert hello == \"hello world\"\n assert float(interval) >= 0.01\n assert int(stacks) >= 1\n"
},
{
"alpha_fraction": 0.7213930487632751,
"alphanum_fraction": 0.7230514287948608,
"avg_line_length": 27.714284896850586,
"blob_id": "3f5385b3e59d41161051b92177a4da149df9a203",
"content_id": "618c477c76371f25fc7ac909103abed9bef2da7f",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 603,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 21,
"path": "/ddtrace/profiling/bootstrap/sitecustomize.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\n\"\"\"Bootstrapping code that is run when using the `pyddprofile`.\"\"\"\nimport os\n\nfrom ddtrace.profiling import bootstrap\nfrom ddtrace.profiling import profiler\n\n\ndef start_profiler():\n if hasattr(bootstrap, \"profiler\"):\n bootstrap.profiler.stop()\n # Export the profiler so we can introspect it if needed\n bootstrap.profiler = profiler.Profiler()\n bootstrap.profiler.start()\n\n\nstart_profiler()\n# When forking, all threads are stop in the child.\n# Restart a new profiler.\nif hasattr(os, \"register_at_fork\"):\n os.register_at_fork(after_in_child=start_profiler)\n"
},
{
"alpha_fraction": 0.7451368570327759,
"alphanum_fraction": 0.7533794641494751,
"avg_line_length": 32.70000076293945,
"blob_id": "e7a26e4b438c3ef547a06c2219fd77c09d4abdfd",
"content_id": "b759cef779542d2d61b56b776ec6a209f4cd2d92",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3033,
"license_type": "permissive",
"max_line_length": 160,
"num_lines": 90,
"path": "/README.md",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "# dd-trace-py\n\n[](https://circleci.com/gh/DataDog/dd-trace-py/tree/master)\n[](https://pypi.org/project/ddtrace/)\n[](https://pypi.org/project/ddtrace/)\n[](http://pypi.datadoghq.com/trace/docs/installation_quickstart.html#opentracing)\n\n`ddtrace` is Datadog's tracing library for Python. It is used to trace requests\nas they flow across web servers, databases and microservices so that developers\nhave great visibility into bottlenecks and troublesome requests.\n\n## Getting Started\n\nFor a basic product overview, installation and quick start, check out our\n[setup documentation][setup docs].\n\nFor more advanced usage and configuration, check out our [API\ndocumentation][pypi docs].\n\nFor descriptions of terminology used in APM, take a look at the [official\ndocumentation][visualization docs].\n\n[setup docs]: https://docs.datadoghq.com/tracing/setup/python/\n[pypi docs]: http://pypi.datadoghq.com/trace/docs/\n[visualization docs]: https://docs.datadoghq.com/tracing/visualization/\n\n\n## Development\n\n### Contributing\n\nSee [docs/contributing.rst](docs/contributing.rst).\n\n### Testing\n\n\n#### Environment\n\nThe test suite requires many backing services such as PostgreSQL, MySQL, Redis\nand more. We use ``docker`` and ``docker-compose`` to run the services in our CI\nand for development. To run the test matrix, please [install docker][docker] and\n[docker-compose][docker-compose] using the instructions provided by your platform. Then\nlaunch them through:\n\n $ docker-compose up -d\n\n\n[docker]: https://www.docker.com/products/docker\n[docker-compose]: https://www.docker.com/products/docker-compose\n\n\n#### Running Tests in docker\n\nOnce your docker-compose environment is running, you can run the test runner image:\n\n $ docker-compose run --rm testrunner\n\nNow you are in a bash shell. You can now run tests as you would do in your local environment:\n\n $ tox -e '{py35,py36}-redis{210}'\n\nWe also provide a shell script to execute commands in the provided container.\n\nFor example to run the tests for `redis-py` 2.10 on Python 3.5 and 3.6:\n\n $ ./scripts/ddtest tox -e '{py35,py36}-redis{210}'\n\nIf you want to run a list of tox environment (as CircleCI does) based on a\npattern, you can use the following command:\n\n $ scripts/ddtest scripts/run-tox-scenario '^futures_contrib-'\n\n### Continuous Integration\n\nWe use CircleCI 2.0 for our continuous integration.\n\n\n#### Configuration\n\nThe CI tests are configured through [config.yml](.circleci/config.yml).\n\n\n#### Running Locally\n\nThe CI tests can be run locally using the `circleci` CLI. More information about\nthe CLI can be found at https://circleci.com/docs/2.0/local-cli/.\n\nAfter installing the `circleci` CLI, you can run jobs by name. For example:\n\n $ circleci build --job django\n"
},
{
"alpha_fraction": 0.7707509994506836,
"alphanum_fraction": 0.7707509994506836,
"avg_line_length": 27.11111068725586,
"blob_id": "b759dfdc7f15a37c086c618aa827c92a1cfab69b",
"content_id": "a42f1c61b11011d6d35df19dd969889da68ec300",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 9,
"path": "/ddtrace/profile/collector/__init__.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import importlib\nimport sys\n\nfrom ddtrace.utils import deprecation\n\n\ndeprecation.deprecation(\"ddtrace.profile\", \"Use ddtrace.profiling instead.\")\n\nsys.modules[__name__] = importlib.import_module(__name__.replace(\"ddtrace.profile\", \"ddtrace.profiling\"))\n"
},
{
"alpha_fraction": 0.8098159432411194,
"alphanum_fraction": 0.8098159432411194,
"avg_line_length": 39.75,
"blob_id": "589733aca8fd2d174dc3565685ee58cf728acfd8",
"content_id": "9535eb4cba2156e47e6a06813e9d981b41dd6145",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 163,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 4,
"path": "/ddtrace/profiling/auto.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "\"\"\"Automatically starts a collector when imported.\"\"\"\nfrom ddtrace.profiling.bootstrap import sitecustomize # noqa\n\nstart_profiler = sitecustomize.start_profiler\n"
},
{
"alpha_fraction": 0.6133463978767395,
"alphanum_fraction": 0.6208701133728027,
"avg_line_length": 22.335878372192383,
"blob_id": "f7033ff7c75d20f266bbce0f3df35d7ddda6e11b",
"content_id": "57d9b754bcff1e5864a90acc809d88b465d45c20",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3057,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 131,
"path": "/tests/profiling/test_periodic.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import os\nimport threading\n\nimport pytest\n\nfrom ddtrace.profiling import _periodic\nfrom ddtrace.profiling import _service\n\n\nif os.getenv(\"DD_PROFILE_TEST_GEVENT\", False):\n import gevent\n\n class Event(object):\n \"\"\"\n We can't use gevent Events here[0], nor can we use native threading\n events (because gevent is not multi-threaded).\n\n So for gevent, since it's not multi-threaded and will not run greenlets\n in parallel (for our usage here, anyway) we can write a dummy Event\n class which just does a simple busy wait on a shared variable.\n\n [0] https://github.com/gevent/gevent/issues/891\n \"\"\"\n\n state = False\n\n def wait(self):\n while not self.state:\n gevent.sleep(0.001)\n\n def set(self):\n self.state = True\n\n\nelse:\n Event = threading.Event\n\n\ndef test_periodic():\n x = {\"OK\": False}\n\n thread_started = Event()\n thread_continue = Event()\n\n def _run_periodic():\n thread_started.set()\n x[\"OK\"] = True\n thread_continue.wait()\n\n def _on_shutdown():\n x[\"DOWN\"] = True\n\n t = _periodic.PeriodicRealThread(0.001, _run_periodic, on_shutdown=_on_shutdown)\n t.start()\n thread_started.wait()\n assert t.ident in _periodic.PERIODIC_THREAD_IDS\n thread_continue.set()\n t.stop()\n t.join()\n assert x[\"OK\"]\n assert x[\"DOWN\"]\n assert t.ident not in _periodic.PERIODIC_THREAD_IDS\n if hasattr(threading, \"get_native_id\"):\n assert t.native_id is not None\n\n\ndef test_periodic_error():\n x = {\"OK\": False}\n\n thread_started = Event()\n thread_continue = Event()\n\n def _run_periodic():\n thread_started.set()\n thread_continue.wait()\n raise ValueError\n\n def _on_shutdown():\n x[\"DOWN\"] = True\n\n t = _periodic.PeriodicRealThread(0.001, _run_periodic, on_shutdown=_on_shutdown)\n t.start()\n thread_started.wait()\n assert t.ident in _periodic.PERIODIC_THREAD_IDS\n thread_continue.set()\n t.stop()\n t.join()\n assert \"DOWN\" not in x\n assert t.ident not in _periodic.PERIODIC_THREAD_IDS\n\n\ndef test_gevent_class():\n if os.getenv(\"DD_PROFILE_TEST_GEVENT\", False):\n assert isinstance(_periodic.PeriodicRealThread(1, sum), _periodic._GeventPeriodicThread)\n else:\n assert isinstance(_periodic.PeriodicRealThread(1, sum), _periodic.PeriodicThread)\n\n\ndef test_periodic_real_thread_name():\n def do_nothing():\n pass\n\n t = _periodic.PeriodicRealThread(interval=1, target=do_nothing)\n t.start()\n assert t in threading.enumerate()\n t.stop()\n t.join()\n\n\ndef test_periodic_service_start_stop():\n t = _periodic.PeriodicService(1)\n t.start()\n with pytest.raises(_service.ServiceAlreadyRunning):\n t.start()\n t.stop()\n t.join()\n t.stop()\n t.stop()\n t.join()\n t.join()\n\n\ndef test_periodic_join_stop_no_start():\n t = _periodic.PeriodicService(1)\n t.join()\n t.stop()\n t.join()\n t = _periodic.PeriodicService(1)\n t.stop()\n t.join()\n t.stop()\n"
},
{
"alpha_fraction": 0.6296610236167908,
"alphanum_fraction": 0.6371186375617981,
"avg_line_length": 31.240436553955078,
"blob_id": "45a78c5875b7182a33a084f03991f4bb110e5341",
"content_id": "1addd99e2e87656ab7a8f2ddfcdafaf6d898f32a",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5905,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 183,
"path": "/ddtrace/profiling/profiler.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\nimport atexit\nimport logging\nimport os\n\nfrom ddtrace.profiling import recorder\nfrom ddtrace.profiling import scheduler\nfrom ddtrace.utils import deprecation\nfrom ddtrace.vendor import attr\nfrom ddtrace.profiling.collector import exceptions\nfrom ddtrace.profiling.collector import memory\nfrom ddtrace.profiling.collector import stack\nfrom ddtrace.profiling.collector import threading\nfrom ddtrace.profiling.exporter import file\nfrom ddtrace.profiling.exporter import http\n\n\nLOG = logging.getLogger(__name__)\n\n\nENDPOINT_TEMPLATE = \"https://intake.profile.{}/v1/input\"\n\n\ndef _get_endpoint():\n legacy = os.environ.get(\"DD_PROFILING_API_URL\")\n if legacy:\n deprecation.deprecation(\"DD_PROFILING_API_URL\", \"Use DD_SITE\")\n return legacy\n site = os.environ.get(\"DD_SITE\", \"datadoghq.com\")\n return ENDPOINT_TEMPLATE.format(site)\n\n\ndef _get_api_key():\n legacy = os.environ.get(\"DD_PROFILING_API_KEY\")\n if legacy:\n deprecation.deprecation(\"DD_PROFILING_API_KEY\", \"Use DD_API_KEY\")\n return legacy\n return os.environ.get(\"DD_API_KEY\")\n\n\ndef _build_default_exporters(service, env, version):\n _OUTPUT_PPROF = os.environ.get(\"DD_PROFILING_OUTPUT_PPROF\")\n if _OUTPUT_PPROF:\n return [\n file.PprofFileExporter(_OUTPUT_PPROF),\n ]\n\n api_key = _get_api_key()\n if api_key:\n # Agentless mode\n endpoint = _get_endpoint()\n else:\n hostname = os.environ.get(\"DD_AGENT_HOST\", os.environ.get(\"DATADOG_TRACE_AGENT_HOSTNAME\", \"localhost\"))\n port = int(os.environ.get(\"DD_TRACE_AGENT_PORT\", 8126))\n endpoint = os.environ.get(\"DD_TRACE_AGENT_URL\", \"http://%s:%d\" % (hostname, port)) + \"/profiling/v1/input\"\n\n return [\n http.PprofHTTPExporter(service=service, env=env, version=version, api_key=api_key, endpoint=endpoint),\n ]\n\n\ndef _get_service_name():\n for service_name_var in (\"DD_SERVICE\", \"DD_SERVICE_NAME\", \"DATADOG_SERVICE_NAME\"):\n service_name = os.environ.get(service_name_var)\n if service_name is not None:\n return service_name\n\n\n# This ought to use `enum.Enum`, but since it's not available in Python 2, we just use a dumb class.\[email protected](repr=False)\nclass ProfilerStatus(object):\n \"\"\"A Profiler status.\"\"\"\n\n status = attr.ib()\n\n def __repr__(self):\n return self.status.upper()\n\n\nProfilerStatus.STOPPED = ProfilerStatus(\"stopped\")\nProfilerStatus.RUNNING = ProfilerStatus(\"running\")\n\n\[email protected]\nclass Profiler(object):\n \"\"\"Run profiling while code is executed.\n\n Note that the whole Python process is profiled, not only the code executed. Data from all running threads are\n caught.\n\n If no collectors are provided, default ones are created.\n If no exporters are provided, default ones are created.\n\n \"\"\"\n\n service = attr.ib(factory=_get_service_name)\n env = attr.ib(factory=lambda: os.environ.get(\"DD_ENV\"))\n version = attr.ib(factory=lambda: os.environ.get(\"DD_VERSION\"))\n tracer = attr.ib(default=None)\n collectors = attr.ib(default=None)\n exporters = attr.ib(default=None)\n _schedulers = attr.ib(init=False, factory=list)\n status = attr.ib(init=False, type=ProfilerStatus, default=ProfilerStatus.STOPPED)\n\n @staticmethod\n def _build_default_collectors(tracer):\n r = recorder.Recorder(\n max_events={\n # Allow to store up to 10 threads for 60 seconds at 100 Hz\n stack.StackSampleEvent: 10 * 60 * 100,\n stack.StackExceptionSampleEvent: 10 * 60 * 100,\n # This can generate one event every 0.1s if 100% are taken — though we take 5% by default.\n # = (60 seconds / 0.1 seconds)\n memory.MemorySampleEvent: int(60 / 0.1),\n },\n default_max_events=int(os.environ.get(\"DD_PROFILING_MAX_EVENTS\", recorder.Recorder._DEFAULT_MAX_EVENTS)),\n )\n return [\n stack.StackCollector(r, tracer=tracer),\n memory.MemoryCollector(r),\n exceptions.UncaughtExceptionCollector(r),\n threading.LockCollector(r),\n ]\n\n def __attrs_post_init__(self):\n if self.collectors is None:\n self.collectors = self._build_default_collectors(self.tracer)\n\n if self.exporters is None:\n self.exporters = _build_default_exporters(self.service, self.env, self.version)\n\n if self.exporters:\n for rec in self.recorders:\n self._schedulers.append(scheduler.Scheduler(recorder=rec, exporters=self.exporters))\n\n @property\n def recorders(self):\n return set(c.recorder for c in self.collectors)\n\n def start(self, stop_on_exit=True):\n \"\"\"Start the profiler.\n\n :param stop_on_exit: Whether to stop the profiler and flush the profile on exit.\n \"\"\"\n for col in self.collectors:\n try:\n col.start()\n except RuntimeError:\n # `tracemalloc` is unavailable?\n pass\n\n for s in self._schedulers:\n s.start()\n\n self.status = ProfilerStatus.RUNNING\n\n if stop_on_exit:\n atexit.register(self.stop)\n\n def stop(self, flush=True):\n \"\"\"Stop the profiler.\n\n :param flush: Wait for the flush of the remaining events before stopping.\n \"\"\"\n for col in reversed(self.collectors):\n col.stop()\n\n for col in reversed(self.collectors):\n col.join()\n\n for s in reversed(self._schedulers):\n s.stop()\n\n if flush:\n for s in reversed(self._schedulers):\n s.join()\n\n self.status = ProfilerStatus.STOPPED\n\n # Python 2 does not have unregister\n if hasattr(atexit, \"unregister\"):\n # You can unregister a method that was not registered, so no need to do any other check\n atexit.unregister(self.stop)\n"
},
{
"alpha_fraction": 0.6808411478996277,
"alphanum_fraction": 0.681775689125061,
"avg_line_length": 27.53333282470703,
"blob_id": "bfc11a7d8900e90cdc713a121b87b006bba5374c",
"content_id": "24db44ec7b9aabc5adbc30a3f9aeb2e78c75e9bc",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2140,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 75,
"path": "/ddtrace/contrib/logging/__init__.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDatadog APM traces can be integrated with the logs product by:\n\n1. Having ``ddtrace`` patch the ``logging`` module. This will add trace\nattributes to the log record.\n\n2. Updating the log formatter used by the application. In order to inject\ntracing information into a log the formatter must be updated to include the\ntracing attributes from the log record. ``ddtrace-run`` will do this\nautomatically for you by specifying a format. For more detail or instructions\nfor how to do this manually see the manual section below.\n\nWith these in place the trace information will be injected into a log entry\nwhich can be used to correlate the log and trace in Datadog.\n\n\nddtrace-run\n-----------\n\nWhen using ``ddtrace-run``, enable patching by setting the environment variable\n``DD_LOGS_INJECTION=true``. The logger by default will have a format that\nincludes trace information::\n\n import logging\n from ddtrace import tracer\n\n log = logging.getLogger()\n log.level = logging.INFO\n\n\n @tracer.wrap()\n def hello():\n log.info('Hello, World!')\n\n hello()\n\nManual Instrumentation\n----------------------\n\nIf you prefer to instrument manually, patch the logging library then update the\nlog formatter as in the following example\n\nMake sure that your log format exactly matches the following::\n\n from ddtrace import patch_all; patch_all(logging=True)\n import logging\n from ddtrace import tracer\n\n FORMAT = ('%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] '\n '[dd.service=%(dd.service)s dd.env=%(dd.env)s '\n 'dd.version=%(dd.version)s '\n 'dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s]'\n '- %(message)s')\n logging.basicConfig(format=FORMAT)\n log = logging.getLogger()\n log.level = logging.INFO\n\n\n @tracer.wrap()\n def hello():\n log.info('Hello, World!')\n\n hello()\n\"\"\"\n\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = [\"logging\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch, unpatch\n\n __all__ = [\"patch\", \"unpatch\"]\n"
},
{
"alpha_fraction": 0.5620549321174622,
"alphanum_fraction": 0.5701932907104492,
"avg_line_length": 33.49122619628906,
"blob_id": "6a20111f9149a58e4ad6a1c66e9e391689e57dcc",
"content_id": "2c472b2358e5c1d46523114806215c244a4fc888",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1966,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 57,
"path": "/tests/contrib/tornado/test_config.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "from ddtrace.filters import FilterRequestsOnUrl\n\nfrom .utils import TornadoTestCase\n\n\nclass TestTornadoSettings(TornadoTestCase):\n \"\"\"\n Ensure that Tornado web application properly configures the given tracer.\n \"\"\"\n def get_settings(self):\n # update tracer settings\n return {\n 'datadog_trace': {\n 'default_service': 'custom-tornado',\n 'tags': {'env': 'production', 'debug': 'false'},\n 'enabled': False,\n 'agent_hostname': 'dd-agent.service.consul',\n 'agent_port': 8126,\n 'settings': {\n 'FILTERS': [\n FilterRequestsOnUrl(r'http://test\\.example\\.com'),\n ],\n },\n },\n }\n\n def test_tracer_is_properly_configured(self):\n # the tracer must be properly configured\n assert self.tracer.tags == {'env': 'production', 'debug': 'false'}\n assert self.tracer.enabled is False\n assert self.tracer.writer.api.hostname == 'dd-agent.service.consul'\n assert self.tracer.writer.api.port == 8126\n # settings are properly passed\n assert self.tracer.writer._filters is not None\n assert len(self.tracer.writer._filters) == 1\n assert isinstance(self.tracer.writer._filters[0], FilterRequestsOnUrl)\n\n\nclass TestTornadoSettingsEnabled(TornadoTestCase):\n def get_settings(self):\n return {\n 'datadog_trace': {\n 'default_service': 'custom-tornado',\n 'enabled': True,\n },\n }\n\n def test_service(self):\n \"\"\"Ensure that the default service for a Tornado web application is configured.\"\"\"\n response = self.fetch('/success/')\n assert 200 == response.code\n\n spans = self.get_spans()\n assert 1 == len(spans)\n\n assert 'custom-tornado' == spans[0].service\n assert 'tornado.request' == spans[0].name\n"
},
{
"alpha_fraction": 0.6235954761505127,
"alphanum_fraction": 0.6235954761505127,
"avg_line_length": 34.599998474121094,
"blob_id": "2e9c611c813e47cfff958142b6478adfc9573e3a",
"content_id": "14425ea86006a3b9b1879d38076d822b4c0ab62d",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 178,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 5,
"path": "/tests/commands/ddtrace_run_service.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import os\n\nif __name__ == \"__main__\":\n assert os.getenv(\"DATADOG_SERVICE_NAME\") == \"my_test_service\" or os.getenv(\"DD_SERVICE\") == \"my_test_service\"\n print(\"Test success\")\n"
},
{
"alpha_fraction": 0.6650246381759644,
"alphanum_fraction": 0.6779880523681641,
"avg_line_length": 30.104839324951172,
"blob_id": "a000b3997eb5edde7707dc28161c95e085d7b2b0",
"content_id": "c3c52410f8fa340e401bb6b9aff5d4ed79eee29e",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3857,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 124,
"path": "/tests/profiling/collector/test_threading.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import threading\nimport time\n\nimport pytest\n\nfrom ddtrace.vendor.six.moves import _thread\n\nfrom ddtrace.profiling import recorder\nfrom ddtrace.profiling.collector import threading as collector_threading\n\nfrom . import test_collector\n\n\ndef test_repr():\n test_collector._test_repr(\n collector_threading.LockCollector,\n \"LockCollector(status=<ServiceStatus.STOPPED: 'stopped'>, \"\n \"recorder=Recorder(default_max_events=32768, max_events={}), capture_pct=5.0, nframes=64)\",\n )\n\n\ndef test_wrapper():\n r = recorder.Recorder()\n collector = collector_threading.LockCollector(r)\n with collector:\n\n class Foobar(object):\n lock_class = threading.Lock\n\n def __init__(self):\n lock = self.lock_class()\n assert lock.acquire()\n lock.release()\n\n # Try to access the attribute\n lock = Foobar.lock_class()\n assert lock.acquire()\n lock.release()\n\n # Try this way too\n Foobar()\n\n\ndef test_patch():\n r = recorder.Recorder()\n lock = threading.Lock\n collector = collector_threading.LockCollector(r)\n collector.start()\n assert lock == collector.original\n # wrapt makes this true\n assert lock == threading.Lock\n collector.stop()\n assert lock == threading.Lock\n assert collector.original == threading.Lock\n\n\ndef test_lock_acquire_events():\n r = recorder.Recorder()\n with collector_threading.LockCollector(r, capture_pct=100):\n lock = threading.Lock()\n lock.acquire()\n assert len(r.events[collector_threading.LockAcquireEvent]) == 1\n assert len(r.events[collector_threading.LockReleaseEvent]) == 0\n event = r.events[collector_threading.LockAcquireEvent][0]\n assert event.lock_name == \"test_threading.py:60\"\n assert event.thread_id == _thread.get_ident()\n assert event.wait_time_ns > 0\n # It's called through pytest so I'm sure it's gonna be that long, right?\n assert len(event.frames) > 3\n assert event.nframes > 3\n assert event.frames[0] == (__file__, 61, \"test_lock_acquire_events\")\n assert event.sampling_pct == 100\n\n\ndef test_lock_release_events():\n r = recorder.Recorder()\n with collector_threading.LockCollector(r, capture_pct=100):\n lock = threading.Lock()\n lock.acquire()\n time.sleep(0.1)\n lock.release()\n assert len(r.events[collector_threading.LockAcquireEvent]) == 1\n assert len(r.events[collector_threading.LockReleaseEvent]) == 1\n event = r.events[collector_threading.LockReleaseEvent][0]\n assert event.lock_name == \"test_threading.py:78\"\n assert event.thread_id == _thread.get_ident()\n assert event.locked_for_ns >= 0.1\n # It's called through pytest so I'm sure it's gonna be that long, right?\n assert len(event.frames) > 3\n assert event.nframes > 3\n assert event.frames[0] == (__file__, 81, \"test_lock_release_events\")\n assert event.sampling_pct == 100\n\n\[email protected](group=\"threading-lock-create\",)\ndef test_lock_create_speed_patched(benchmark):\n r = recorder.Recorder()\n with collector_threading.LockCollector(r):\n benchmark(threading.Lock)\n\n\[email protected](group=\"threading-lock-create\",)\ndef test_lock_create_speed(benchmark):\n benchmark(threading.Lock)\n\n\ndef _lock_acquire_release(lock):\n lock.acquire()\n lock.release()\n\n\[email protected](group=\"threading-lock-acquire-release\",)\[email protected](\n \"pct\", range(5, 61, 5),\n)\ndef test_lock_acquire_release_speed_patched(benchmark, pct):\n r = recorder.Recorder()\n with collector_threading.LockCollector(r, capture_pct=pct):\n benchmark(_lock_acquire_release, threading.Lock())\n\n\[email protected](group=\"threading-lock-acquire-release\",)\ndef test_lock_acquire_release_speed(benchmark):\n benchmark(_lock_acquire_release, threading.Lock())\n"
},
{
"alpha_fraction": 0.6318770051002502,
"alphanum_fraction": 0.6326860785484314,
"avg_line_length": 25.29787254333496,
"blob_id": "74dde1d262b9f656fb95476aa6292ef352d81ba4",
"content_id": "1a0173598a74e98fde6e698897881d8c59d97613",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1236,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 47,
"path": "/ddtrace/profiling/_service.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import enum\nimport threading\n\nfrom ddtrace.vendor import attr\n\n\nclass ServiceStatus(enum.Enum):\n \"\"\"A Service status.\"\"\"\n\n STOPPED = \"stopped\"\n RUNNING = \"running\"\n\n\nclass ServiceAlreadyRunning(RuntimeError):\n pass\n\n\[email protected]\nclass Service(object):\n \"\"\"A service that can be started or stopped.\"\"\"\n\n status = attr.ib(default=ServiceStatus.STOPPED, type=ServiceStatus, init=False)\n _service_lock = attr.ib(factory=threading.Lock, repr=False, init=False)\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n return self.stop()\n\n def start(self):\n \"\"\"Start the service.\"\"\"\n # Use a lock so we're sure that if 2 threads try to start the service at the same time, one of them will raise\n # an error.\n with self._service_lock:\n if self.status == ServiceStatus.RUNNING:\n raise ServiceAlreadyRunning(\"%s is already running\" % self.__class__.__name__)\n self.status = ServiceStatus.RUNNING\n\n def stop(self):\n \"\"\"Stop the service.\"\"\"\n self.status = ServiceStatus.STOPPED\n\n @staticmethod\n def join(timeout=None):\n \"\"\"Join the service once stopped.\"\"\"\n"
},
{
"alpha_fraction": 0.6921241283416748,
"alphanum_fraction": 0.6921241283416748,
"avg_line_length": 28.928571701049805,
"blob_id": "00fe936ffec033e62a088cb5902afe3591c0b210",
"content_id": "893469904780b3d5cba68b87139c8af63f635eb5",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 14,
"path": "/tests/profiling/collector/test_service.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "from ddtrace.profiling import _service\n\n\ndef test_service_status():\n s = _service.Service()\n assert s.status == _service.ServiceStatus.STOPPED\n s.start()\n assert s.status == _service.ServiceStatus.RUNNING\n s.stop()\n assert s.status == _service.ServiceStatus.STOPPED\n s.start()\n assert s.status == _service.ServiceStatus.RUNNING\n s.stop()\n assert s.status == _service.ServiceStatus.STOPPED\n"
},
{
"alpha_fraction": 0.6418918967247009,
"alphanum_fraction": 0.64987713098526,
"avg_line_length": 30.50967788696289,
"blob_id": "17b6d68aead01391a566e458955b1490b86ea03d",
"content_id": "e080669cc2918209c04601fdcc6304914bef10e6",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4884,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 155,
"path": "/tests/profiling/test_profiler.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "import pytest\n\nimport ddtrace\nfrom ddtrace.profiling import profiler\nfrom ddtrace.profiling.collector import stack\nfrom ddtrace.profiling.exporter import http\n\n\ndef test_status():\n p = profiler.Profiler()\n assert repr(p.status) == \"STOPPED\"\n p.start()\n assert repr(p.status) == \"RUNNING\"\n p.stop()\n assert repr(p.status) == \"STOPPED\"\n\n\ndef test_restart():\n p = profiler.Profiler()\n p.start()\n p.stop(flush=False)\n p.start()\n p.stop(flush=False)\n\n\ndef test_multiple_stop():\n \"\"\"Check that the profiler can be stopped twice.\n\n This is useful since the atexit.unregister call might not exist on Python 2,\n therefore the profiler can be stopped twice (once per the user, once at exit).\n \"\"\"\n p = profiler.Profiler()\n p.start()\n p.stop()\n p.stop()\n\n\[email protected](\n \"service_name_var\", (\"DD_SERVICE\", \"DD_SERVICE_NAME\", \"DATADOG_SERVICE_NAME\"),\n)\ndef test_default_from_env(service_name_var, monkeypatch):\n monkeypatch.setenv(\"DD_API_KEY\", \"foobar\")\n monkeypatch.setenv(service_name_var, \"foobar\")\n prof = profiler.Profiler()\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.service == \"foobar\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\ndef test_service_api(monkeypatch):\n monkeypatch.setenv(\"DD_API_KEY\", \"foobar\")\n prof = profiler.Profiler(service=\"foobar\")\n assert prof.service == \"foobar\"\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.service == \"foobar\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\ndef test_tracer_api(monkeypatch):\n monkeypatch.setenv(\"DD_API_KEY\", \"foobar\")\n prof = profiler.Profiler(tracer=ddtrace.tracer)\n assert prof.tracer == ddtrace.tracer\n for collector in prof.collectors:\n if isinstance(collector, stack.StackCollector):\n assert collector.tracer == ddtrace.tracer\n break\n else:\n pytest.fail(\"Unable to find stack collector\")\n\n\ndef test_env_default(monkeypatch):\n monkeypatch.setenv(\"DD_API_KEY\", \"foobar\")\n monkeypatch.setenv(\"DD_ENV\", \"staging\")\n monkeypatch.setenv(\"DD_VERSION\", \"123\")\n prof = profiler.Profiler()\n assert prof.env == \"staging\"\n assert prof.version == \"123\"\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.env == \"staging\"\n assert exporter.version == \"123\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\ndef test_env_api():\n prof = profiler.Profiler(env=\"staging\", version=\"123\")\n assert prof.env == \"staging\"\n assert prof.version == \"123\"\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.env == \"staging\"\n assert exporter.version == \"123\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\[email protected](\n \"name_var\", (\"DD_API_KEY\", \"DD_PROFILING_API_KEY\"),\n)\ndef test_env_api_key(name_var, monkeypatch):\n monkeypatch.setenv(name_var, \"foobar\")\n prof = profiler.Profiler()\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.api_key == \"foobar\"\n assert exporter.endpoint == \"https://intake.profile.datadoghq.com/v1/input\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\ndef test_env_no_api_key():\n prof = profiler.Profiler()\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.api_key is None\n assert exporter.endpoint == \"http://localhost:8126/profiling/v1/input\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\ndef test_env_endpoint_url(monkeypatch):\n monkeypatch.setenv(\"DD_AGENT_HOST\", \"foobar\")\n monkeypatch.setenv(\"DD_TRACE_AGENT_PORT\", \"123\")\n prof = profiler.Profiler()\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.api_key is None\n assert exporter.endpoint == \"http://foobar:123/profiling/v1/input\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n\n\ndef test_env_endpoint_url_no_agent(monkeypatch):\n monkeypatch.setenv(\"DD_SITE\", \"datadoghq.eu\")\n monkeypatch.setenv(\"DD_API_KEY\", \"123\")\n prof = profiler.Profiler()\n for exporter in prof.exporters:\n if isinstance(exporter, http.PprofHTTPExporter):\n assert exporter.api_key == \"123\"\n assert exporter.endpoint == \"https://intake.profile.datadoghq.eu/v1/input\"\n break\n else:\n pytest.fail(\"Unable to find HTTP exporter\")\n"
},
{
"alpha_fraction": 0.6723163723945618,
"alphanum_fraction": 0.6760828495025635,
"avg_line_length": 23.136363983154297,
"blob_id": "b5bc826f957495ccb2e8f6d643c758654aa2699e",
"content_id": "c179ff1bac516655d2d1f722637e242df44ddf59",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1062,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 44,
"path": "/ddtrace/contrib/redis/__init__.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTraces redis client queries.\n\nIf you are not autoinstrumenting with ``ddtrace-run`` then install the redis\ninstrumentation with::\n\n from ddtrace import patch\n patch(redis=True)\n\n\nGlobal Configuration\n~~~~~~~~~~~~~~~~~~~~\n\n.. py:data:: ddtrace.config.redis[\"service\"]\n\n The service name reported by default for your redis instances.\n\n Default: ``\"redis\"``\n\n\nInstance Configuration\n~~~~~~~~~~~~~~~~~~~~~~\n\n from ddtrace import Pin\n import redis\n\n # Override service name for this instance\n Pin.override(client, service=\"redis-queue\")\n\n # This will report a span with the default settings\n client = redis.StrictRedis(host=\"localhost\", port=6379)\n client.get(\"my-key\")\n\"\"\"\n\nfrom ...utils.importlib import require_modules\n\nrequired_modules = [\"redis\", \"redis.client\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .tracers import get_traced_redis, get_traced_redis_from\n\n __all__ = [\"get_traced_redis\", \"get_traced_redis_from\", \"patch\"]\n"
},
{
"alpha_fraction": 0.5723393559455872,
"alphanum_fraction": 0.5890142321586609,
"avg_line_length": 30.859375,
"blob_id": "ed03222745518f536195b2b7ab1c9fb725a21654",
"content_id": "4e0b96f07bafab2a4edd0fa5d62834b0704306e2",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4078,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 128,
"path": "/docs/configuration.rst",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": ".. _Configuration:\n\n===============\n Configuration\n===============\n\n`ddtrace` can be configured using environment variable. They are listed\nbelow:\n\n.. list-table::\n :widths: 3 1 1 4\n :header-rows: 1\n\n * - Variable Name\n - Type\n - Default value\n - Description\n * - ``DD_ENV``\n - String\n -\n - Set an application's environment e.g. ``prod``, ``pre-prod``, ``stage``. Added in ``v0.36.0``.\n * - ``DATADOG_ENV``\n - String\n -\n - Deprecated: use ``DD_ENV``\n * - ``DD_SERVICE``\n - String\n - (autodetected)\n - Set the service name to be used for this application. A default is\n provided for these integrations: :ref:`bottle`, :ref:`flask`, :ref:`grpc`,\n :ref:`pyramid`, :ref:`pylons`, :ref:`tornado`, :ref:`celery`, :ref:`django` and\n :ref:`falcon`. Added in ``v0.36.0``.\n * - ``DD_SERVICE_NAME`` or ``DATADOG_SERVICE_NAME``\n - String\n -\n - Deprecated: use ``DD_SERVICE``.\n * - ``DD_TAGS``\n - String\n -\n - Set global tags to be attached to every span. e.g. ``key1:value1,key2,value2``. Added in ``v0.38.0``.\n * - ``DD_VERSION``\n - String\n -\n - Set an application's version in traces and logs e.g. ``1.2.3``,\n ``6c44da20``, ``2020.02.13``. Added in ``v0.36.0``.\n * - ``DD_SITE``\n - String\n - datadoghq.com\n - Specify which site to use for uploading profiles. Set to\n ``datadoghq.eu`` to use EU site.\n * - ``DATADOG_TRACE_ENABLED``\n - Boolean\n - True\n - Enable web framework and library instrumentation. When false, your\n application code will not generate any traces.\n * - ``DATADOG_TRACE_DEBUG``\n - Boolean\n - False\n - Enable debug logging in the tracer\n * - ``DATADOG_PATCH_MODULES``\n - String\n -\n - Override the modules patched for this execution of the program. Must be\n a list in the ``module1:boolean,module2:boolean`` format. For example,\n ``boto:true,redis:false``.\n * - ``DATADOG_PRIORITY_SAMPLING``\n - Boolean\n - True\n - Enables :ref:`Priority Sampling`.\n * - ``DD_LOGS_INJECTION``\n - Boolean\n - True\n - Enables :ref:`Logs Injection`.\n * - ``DD_TRACE_AGENT_URL``\n - URL\n - ``http://localhost:8126``\n - The URL to use to connect the Datadog agent. The url can starts with\n ``http://`` to connect using HTTP or with ``unix://`` to use a Unix\n Domain Socket.\n * - ``DATADOG_TRACE_AGENT_HOSTNAME``\n - String\n -\n - Deprecated: use ``DD_TRACE_AGENT_URL``\n * - ``DATADOG_TRACE_AGENT_PORT``\n - Integer\n -\n - Deprecated: use ``DD_TRACE_AGENT_URL``\n * - ``DD_PROFILING_API_TIMEOUT``\n - Float\n - 10\n - The timeout in seconds before dropping events if the HTTP API does not\n reply.\n * - ``DD_API_KEY``\n - String\n -\n - The Datadog API key to use when uploading profiles.\n * - ``DD_PROFILING_API_URL``\n - URL\n - ``https://intake.profile.datadoghq.com/v1/input``\n - The Datadog API HTTP endpoint to use when uploading events.\n * - ``DD_PROFILING_MAX_TIME_USAGE_PCT``\n - Float\n - 2\n - The percentage of maximum time the stack profiler can use when computing\n statistics. Must be greather than 0 and lesser or equal to 100.\n * - ``DD_PROFILING_MAX_FRAMES``\n - Integer\n - 64\n - The maximum number of frames to capture in stack execution tracing.\n * - ``DD_PROFILING_CAPTURE_PCT``\n - Float\n - 10\n - The percentage of events that should be captured (e.g. memory\n allocation). Greater values reduce the program execution speed. Must be\n greater than 0 lesser or equal to 100.\n * - ``DD_PROFILING_UPLOAD_INTERVAL``\n - Float\n - 60\n - The interval in seconds to wait before flushing out recorded events.\n * - ``DD_PROFILING_IGNORE_PROFILER``\n - Boolean\n - True\n - Whether to ignore the profiler in the generated data.\n * - ``DD_PROFILING_TAGS``\n - String\n -\n - The tags to apply to uploaded profile. Must be a list in the\n ``key1:value,key2:value2`` format.\n"
},
{
"alpha_fraction": 0.6016577482223511,
"alphanum_fraction": 0.6036080121994019,
"avg_line_length": 33.18333435058594,
"blob_id": "944d08493e43452cf5fb2b25385a59257d53a3b3",
"content_id": "a72861e9ef02c7c784b90ac6f5a37b14d5b210f9",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2051,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 60,
"path": "/ddtrace/profiling/scheduler.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\nimport logging\n\nfrom ddtrace import compat\nfrom ddtrace.profiling import _attr\nfrom ddtrace.profiling import _periodic\nfrom ddtrace.profiling import _traceback\nfrom ddtrace.profiling import exporter\nfrom ddtrace.vendor import attr\n\nLOG = logging.getLogger(__name__)\n\n\[email protected]\nclass Scheduler(_periodic.PeriodicService):\n \"\"\"Schedule export of recorded data.\"\"\"\n\n recorder = attr.ib()\n exporters = attr.ib()\n _interval = attr.ib(factory=_attr.from_env(\"DD_PROFILING_UPLOAD_INTERVAL\", 60, float))\n _configured_interval = attr.ib(init=False)\n _last_export = attr.ib(init=False, default=None)\n\n def __attrs_post_init__(self):\n # Copy the value to use it later since we're going to adjust the real interval\n self._configured_interval = self.interval\n\n def start(self):\n \"\"\"Start the scheduler.\"\"\"\n LOG.debug(\"Starting scheduler\")\n super(Scheduler, self).start()\n self._last_export = compat.time_ns()\n LOG.debug(\"Scheduler started\")\n\n def flush(self):\n \"\"\"Flush events from recorder to exporters.\"\"\"\n LOG.debug(\"Flushing events\")\n if self.exporters:\n events = self.recorder.reset()\n start = self._last_export\n self._last_export = compat.time_ns()\n for exp in self.exporters:\n try:\n exp.export(events, start, self._last_export)\n except exporter.ExportError as e:\n LOG.error(\"%s. Ignoring.\", _traceback.format_exception(e))\n except Exception:\n LOG.exception(\n \"Unexpected error while exporting events. \"\n \"Please report this bug to https://github.com/DataDog/dd-trace-py/issues\"\n )\n\n def periodic(self):\n start_time = compat.monotonic()\n try:\n self.flush()\n finally:\n self.interval = max(0, self._configured_interval - (compat.monotonic() - start_time))\n\n on_shutdown = flush\n"
},
{
"alpha_fraction": 0.740031898021698,
"alphanum_fraction": 0.7496013045310974,
"avg_line_length": 30.350000381469727,
"blob_id": "c7e3a6fb104b5fce3f52ad832d668d9abe256dce",
"content_id": "de812fd58d3f879cc456251c3688b26101cefde6",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 627,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 20,
"path": "/ddtrace/contrib/redis/tracers.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "from redis import StrictRedis\n\nfrom ...utils.deprecation import deprecated\n\n\nDEFAULT_SERVICE = \"redis\"\n\n\n@deprecated(message=\"Use patching instead (see the docs).\", version=\"1.0.0\")\ndef get_traced_redis(ddtracer, service=DEFAULT_SERVICE, meta=None):\n return _get_traced_redis(ddtracer, StrictRedis, service, meta)\n\n\n@deprecated(message=\"Use patching instead (see the docs).\", version=\"1.0.0\")\ndef get_traced_redis_from(ddtracer, baseclass, service=DEFAULT_SERVICE, meta=None):\n return _get_traced_redis(ddtracer, baseclass, service, meta)\n\n\ndef _get_traced_redis(ddtracer, baseclass, service, meta):\n return baseclass\n"
},
{
"alpha_fraction": 0.6204208731651306,
"alphanum_fraction": 0.6313328146934509,
"avg_line_length": 30.292682647705078,
"blob_id": "5d3b0f33c2dc0907c1d9cea1747b745da10277e9",
"content_id": "e8e43b681b45f537705845181a2ff45f45d87136",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1283,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 41,
"path": "/ddtrace/ext/aws.py",
"repo_name": "seatgeek/dd-trace-py",
"src_encoding": "UTF-8",
"text": "BLACKLIST_ENDPOINT = [\"kms\", \"sts\"]\nBLACKLIST_ENDPOINT_TAGS = {\n \"s3\": [\"params.Body\"],\n}\n\n\ndef _flatten_dict(d, sep=\".\", prefix=\"\"):\n \"\"\"\n Returns a normalized dict of depth 1 with keys in order of embedding\n\n \"\"\"\n # adapted from https://stackoverflow.com/a/19647596\n return (\n {prefix + sep + k if prefix else k: v for kk, vv in d.items() for k, v in _flatten_dict(vv, sep, kk).items()}\n if isinstance(d, dict)\n else {prefix: d}\n )\n\n\ndef truncate_arg_value(value, max_len=1024):\n \"\"\"Truncate values which are bytes and greater than `max_len`.\n Useful for parameters like 'Body' in `put_object` operations.\n \"\"\"\n if isinstance(value, bytes) and len(value) > max_len:\n return b\"...\"\n\n return value\n\n\ndef add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):\n if endpoint_name not in BLACKLIST_ENDPOINT:\n blacklisted = BLACKLIST_ENDPOINT_TAGS.get(endpoint_name, [])\n tags = dict((name, value) for (name, value) in zip(args_names, args) if name in args_traced)\n tags = _flatten_dict(tags)\n tags = {k: truncate_arg_value(v) for k, v in tags.items() if k not in blacklisted}\n span.set_tags(tags)\n\n\nREGION = \"aws.region\"\nAGENT = \"aws.agent\"\nOPERATION = \"aws.operation\"\n"
}
] | 24 |
iskdrews/exploring-python | https://github.com/iskdrews/exploring-python | d6bc2e6dc2e67da5cbc191cda24eeb591d95bcc1 | 7c876247efbe9b92fb1363c7e40eef06ee454938 | f337c9a5efbc00985027c352e13e8f27df0b52ab | refs/heads/master | 2022-11-16T04:05:17.475827 | 2020-07-09T23:00:17 | 2020-07-09T23:00:17 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7903226017951965,
"alphanum_fraction": 0.7903226017951965,
"avg_line_length": 62,
"blob_id": "a6bac7eb3d9387ec4347f5bb1fda423edc9cadeb",
"content_id": "aa7e46302bdeb47ce2786792a1d566fbb2902063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 1,
"path": "/Courses/Udemy - Complete Python Bootcamp/README.md",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "# Udemy- Complete Python Bootcamp: From Zero to Hero in Python"
},
{
"alpha_fraction": 0.4406130313873291,
"alphanum_fraction": 0.5517241358757019,
"avg_line_length": 19.153846740722656,
"blob_id": "2b25cf73a865e61a9d1b78d9733db7f09b7e2dd7",
"content_id": "eac5f15ffe50bf82a8c4f4db052cc6464a678691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 13,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-dictionaries.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "my_dict = { 'Key1': 'value', 'key2': 'value2' }\nmy_dict['key2']\n\nprices_lookup = {'apples': 2.99, 'oranges': 1.99}\nprices_lookup['apples']\n\nd = {'k1': 123, 'k2': [0,1,2], 'k3': {'insideKey':100}}\nd['k2']\nd['k3']['insideKey']\nd['k4'] = 1000\nd\nd.keys()\nd.values()"
},
{
"alpha_fraction": 0.4765625,
"alphanum_fraction": 0.6171875,
"avg_line_length": 13.333333015441895,
"blob_id": "a88faf64151f4b03edc763945a9f98cb1c29df47",
"content_id": "be05c224a9b8bed7e6571cb968da40cd2459e8eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 9,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-sets.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "mySet = set()\nmySet.add(1)\nmySet\nmySet.add(2)\nmySet.add(2)\nmySet.add('d')\n\nmyList = [1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,]\nset(myList)"
},
{
"alpha_fraction": 0.6138888597488403,
"alphanum_fraction": 0.6583333611488342,
"avg_line_length": 14.69565200805664,
"blob_id": "8d55188dd060f1bdccd74d10b08fe1bdebf9458f",
"content_id": "d98669fd87c6b510486984fe537c45c8d8a84083",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 23,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-lists.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "my_list = [1,2,3]\nmy_list = ['String', 100, 10.11]\nlen(my_list)\nmy_list[0]\nmy_list[1:]\nanother_list = ['Four', 'Five']\nmy_list + another_list\nmy_list.append('Fuck')\nmy_list.pop()\n\nnum_list = [4,2,1,8]\nnum_list.sort()\ntype(num_list)\n\nmy_sorted_list = num_list.sort()\nmy_sorted_list\ntype(my_sorted_list) # NoneType \n\n# None type\nNone\n\nnum_list.reverse()\nnum_list"
},
{
"alpha_fraction": 0.5817757248878479,
"alphanum_fraction": 0.6121495366096497,
"avg_line_length": 34.66666793823242,
"blob_id": "aaa4b7ad7ddaef027e69e2edfd4cc10a9d29a2d9",
"content_id": "b177220f9f311cbf6ec0cad941ee1ba43071b34a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 12,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-prrint.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "print('This is a {} string'.format('Inserted'))\nprint('The {} {} {}'.format('fox', 'brown', 'quick'))\nprint('The {2} {1} {0}'.format('fox', 'brown', 'quick'))\nprint('The {f} {f} {f}'.format(f='fox', b='brown', q='quick'))\n\nresult = 100/777\nprint('The result was {r}'.format(r=result))\nprint('The result was {r:1.3f}'.format(r=result))\n\n# f-string and this new to python 3.6\nname = 'iskander'\nprint(f'Hello, his name is {name}')\n"
},
{
"alpha_fraction": 0.5961002707481384,
"alphanum_fraction": 0.6629526615142822,
"avg_line_length": 13.399999618530273,
"blob_id": "a89fee8732cfa990ac84b92a91e08d339384fa8b",
"content_id": "d4c32ee1c22d2973220d7b2c08970f80818e532e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 359,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 25,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-string.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "'Hello'\n\"Hello\"\n'This is also a stirng'\n\"I'm going\"\nprint(\"Hello\")\nlen('Iskander Andrews')\n\nmyString = \"Hello World\"\nmyString[0]\nmyString[-1]\nmyString[-7] \n\nmyString[2:]\nmyString[:3]\nmyString[3:6]\nmyString[::4]\nmyString[::-1] # Inverse Array\nmyString[0:4:2]\n\nname = \"Sam\"\n# name[0] = \"H\"\nlast_letters = name[1:]\nlast_letters\n'P' + last_letters\nlast_letters * 1000000000"
},
{
"alpha_fraction": 0.5166666507720947,
"alphanum_fraction": 0.5666666626930237,
"avg_line_length": 9,
"blob_id": "1f9bac53a666886476e990ca94aec049afa3a56a",
"content_id": "76fb43a2f08b63d3c8e012c4daa34d9f3d04a64d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 6,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-variables.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "a = 5 \nprint(a)\nprint(a + a)\na = 11\nprint(a)\nprint(type(a))\n"
},
{
"alpha_fraction": 0.5069980025291443,
"alphanum_fraction": 0.5312767624855042,
"avg_line_length": 35.0927848815918,
"blob_id": "7572dcb0caa9c0e1c76dcd046976357f38b06672",
"content_id": "2aa7793884d6da108ae72ece78482c9004da42b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3501,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 97,
"path": "/Machine Learning/Email Span Detection/pycharm/app.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "import tkinter as Tk\nfrom mail_checker import *\nfrom tkinter import messagebox\n# ntlk.download('punkt')\n\nclass App(object):\n t = Tk.Text\n\n def __init__(self, parent):\n self.root = parent\n self.root.title(\"Email SPAM or HAM detector\")\n self.frame = Tk.Frame(parent)\n self.frame.pack()\n btn = Tk.Button(self.frame, text=\"About Team\", command=self.open_About_Team_Frame, bg='black', font=(\"\", 20),\n fg='green', width=20)\n btn.pack()\n btn = Tk.Button(self.frame, text=\"Start\", command=self.open_Program_Frame, bg='black', font=(\"\", 20), fg='red',\n width=20,)\n btn.pack()\n def hide(self):\n self.root.withdraw()\n\n def open_About_Team_Frame(self):\n self.hide()\n about_team_Frame = Tk.Toplevel()\n about_team_Frame.geometry(\"600x250\")\n about_team_Frame.title(\"otherFrame\")\n andrew = Tk.Label(about_team_Frame, text=\"Andrew Amir 20150153\", bg='black', fg=\"blue\",\n font=(\"Helvetica\", 20))\n andrew.pack()\n yassen = Tk.Label(about_team_Frame, text=\"Yassen Hatem 20150633\", fg=\"blue\", font=(\"Helvetica\", 20))\n yassen.pack()\n marc = Tk.Label(about_team_Frame, text=\"Marc Essam 20150398\", fg=\"blue\", font=(\"Helvetica\", 20))\n marc.pack()\n eyad = Tk.Label(about_team_Frame, text=\"Eyad Mohamed 20150156\", fg=\"blue\", font=(\"Helvetica\", 20))\n eyad.pack()\n mina = Tk.Label(about_team_Frame, text=\"Mina Mofreh 20150667\", fg=\"blue\", font=(\"Helvetica\", 20))\n mina.pack()\n handler = lambda: self.onCloseOtherFrame(about_team_Frame)\n btn = Tk.Button(about_team_Frame, text=\"Back\", command=handler)\n btn.pack()\n\n def onCloseOtherFrame(self, otherFrame):\n otherFrame.destroy()\n self.show()\n\n def show(self):\n self.root.update()\n self.root.deiconify()\n\n def format_text(self):\n text = self.t.get(\"1.0\", 'end-1c')\n email_text = ''\n for i in text:\n if i != '\\n':\n email_text += i\n else:\n email_text += ' '\n self.fina_result(email_text)\n\n def fina_result(self, text):\n result = ''\n text_color = ''\n if len(text) == 0:\n result = \"it seems that you entered nothing!\"\n text_color = \"blue\"\n else:\n if self.check_mail(text):\n result = \" SPAM EMAIL \"\n else:\n result = \" HAM EMAIL \"\n\n messagebox.showinfo(\"Information\", result)\n\n #################################################\n def check_mail(self, str):\n pr_message = process_message(str)\n if sc_tf_idf.classify(pr_message):\n return True\n else:\n return False\n #################################################\n\n def open_Program_Frame(self):\n self.hide()\n otherFrame = Tk.Toplevel()\n otherFrame.geometry(\"500x400\")\n otherFrame.title(\"Email SPAM or HAM detector\")\n L1 = Tk.Label(otherFrame, text=\"Enter Email Text\")\n L1.pack()\n self.t = Tk.Text(otherFrame, height=20, width=40, bd=5)\n self.t.pack()\n handler = lambda: self.onCloseOtherFrame(otherFrame)\n btn = Tk.Button(otherFrame, text=\"Back\", width=15, command=handler)\n btn.pack()\n btn = Tk.Button(otherFrame, text=\"Check\", fg='green', width=15, command=self.format_text)\n btn.pack()\n"
},
{
"alpha_fraction": 0.5370370149612427,
"alphanum_fraction": 0.5987654328346252,
"avg_line_length": 11.461538314819336,
"blob_id": "0c4467d6e4cb8f0d9e3dfd4ad993cc4d11c03991",
"content_id": "412a2a0a51e10ccabfcece5c5ba0e7c4d63f7e2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 13,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-tuples.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "myTuple = (1,2,3)\nmylist = [1,2,3]\n\ntype(myTuple)\ntype(mylist)\n\nmyTuple2 = ('one', 100)\n\nmyTuple = ('a', 'a', 'b')\nmyTuple.count('a')\nmyTuple.index('a')\n\nmyTuple\n"
},
{
"alpha_fraction": 0.5227272510528564,
"alphanum_fraction": 0.6136363744735718,
"avg_line_length": 3.5,
"blob_id": "f50eae8d09610f29343492a3085041a75be13453",
"content_id": "47ce392972c63f0e82e74dc04eee5c1a18e11914",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 10,
"path": "/Courses/Udemy - Complete Python Bootcamp/python-objects-data-structures/python-boolean.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "True\nFalse\n\ntype(True)\n1>2\n\n1==2\n\nb = None\nb"
},
{
"alpha_fraction": 0.6478180885314941,
"alphanum_fraction": 0.6653349995613098,
"avg_line_length": 35.11627960205078,
"blob_id": "4d7cb8b184eb27a2612464d28ba555d476113a42",
"content_id": "467fa443834913d8dacbb7c850ffde7460e647bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3254,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 86,
"path": "/Machine Learning/Email Span Detection/spam.py",
"repo_name": "iskdrews/exploring-python",
"src_encoding": "UTF-8",
"text": "import numpy as np \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt\r\nimport string\r\nfrom nltk.stem import SnowballStemmer\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\ndata = pd.read_csv(\"spam.csv\",encoding='latin-1')\r\ndata.head()\r\ndata=data.drop([\"Unnamed: 2\", \"Unnamed: 3\", \"Unnamed: 4\"], axis=1)\r\ndata=data.rename(columns={\"v1\":\"class\", \"v2\":\"text\"})\r\ndata.head()\r\ndata['length']=data['text'].apply(len)\r\ndata.head()\r\ncount_class=pd.value_counts(data['class'], sort=True)\r\ncount_class.plot(kind= 'bar', color= [\"blue\", \"orange\"])\r\nplt.title('Bar chart')\r\nplt.show()\r\ncount_class.plot(kind = 'pie', autopct='%1.0f%%')\r\nplt.title('Pie chart')\r\nplt.ylabel('')\r\nplt.show()\r\nimport seaborn as sns\r\nham =data[data['class'] == 'ham']['text'].str.len()\r\nsns.distplot(ham, label='Ham')\r\nspam = data[data['class'] == 'spam']['text'].str.len()\r\nsns.distplot(spam, label='Spam')\r\nplt.title('Distribution by Length')\r\nplt.legend()\r\nfrom collections import Counter\r\nount1 = Counter(\" \".join(data[data['class']=='ham'][\"text\"]).split()).most_common(30)\r\ndata1 = pd.DataFrame.from_dict(count1)\r\ndata1 = data1.rename(columns={0: \"words of ham\", 1 : \"count\"})\r\ncount2 = Counter(\" \".join(data[data['class']=='spam'][\"text\"]).split()).most_common(30)\r\ndata2 = pd.DataFrame.from_dict(count2)\r\ndata2 = data2.rename(columns={0: \"words of spam\", 1 : \"count_\"})\r\n\r\ndata1.plot.bar(legend = False, color = 'purple',figsize = (20,15))\r\ny_pos = np.arange(len(data1[\"words of ham\"]))\r\nplt.xticks(y_pos, data1[\"words of ham\"])\r\nplt.title('Top 30 words of ham')\r\nplt.xlabel('words')\r\nplt.ylabel('number')\r\nplt.show()\r\n\r\ndata2.plot.bar(legend = False, color = 'green', figsize = (20,17))\r\ny_pos = np.arange(len(data2[\"words of spam\"]))\r\nplt.xticks(y_pos, data2[\"words of spam\"])\r\nplt.title('Top 30 words of spam')\r\nplt.xlabel('words')\r\nplt.ylabel('number')\r\nplt.show()\r\nimport matplotlib as mpl\r\nmpl.rcParams['patch.force_edgecolor'] = True\r\nplt.style.use('seaborn-bright')\r\ndata.hist(column='length', by='text', bins=50,figsize=(11,5))\r\ndef pre_process(text):\r\n \r\n text = text.translate(str.maketrans('', '', string.punctuation))\r\n text = [word for word in text.split() if word.lower() not in stopwords.words('english')]\r\n words = \"\"\r\n for i in text:\r\n stemmer = SnowballStemmer(\"english\")\r\n words += (stemmer.stem(i))+\" \"\r\n return words\r\n\r\nimport nltk\r\n\r\nnltk.download('stopwords')\r\n\r\ntextFeatures = data['text'].copy()\r\ntextFeatures = textFeatures.apply(pre_process)\r\nvectorizer = TfidfVectorizer(\"english\")\r\nfeatures = vectorizer.fit_transform(textFeatures)\r\nfeatures_train, features_test, labels_train, labels_test = train_test_split(features, data['class'], test_size=0.3, random_state=111)\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.svm import SVC\r\n\r\n\r\nfor x in range (1, 7):\r\n svc = SVC(kernel='sigmoid', gamma=x)\r\n svc.fit(features_train, labels_train)\r\n prediction = svc.predict(features_test)\r\n print(\"Accuracy Test\", accuracy_score(labels_test,prediction))\r\n print(\"Accuracy Traning\", svc.score(features_train, labels_train))\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
}
] | 11 |
kr-neerav/euler | https://github.com/kr-neerav/euler | 168b5343d0fab8012786418f22e3a1885c961f8f | 4ce4125a94fe51fb5fa11e18b6e1e7e6ae51a0d5 | b95d82880097b6041f7a58b62f8bd98f766bd213 | refs/heads/master | 2020-03-29T23:17:25.954866 | 2019-09-06T04:15:53 | 2019-09-06T04:15:57 | 150,465,561 | 0 | 0 | null | 2018-09-26T17:38:14 | 2018-09-26T23:42:33 | 2018-09-27T00:00:19 | Python | [
{
"alpha_fraction": 0.5685997009277344,
"alphanum_fraction": 0.602545976638794,
"avg_line_length": 24.25,
"blob_id": "6fd9568ca4d1209c14c9c0f9fb21878c0a507065",
"content_id": "5dc3d907ccb0658ac87134ddbffdd4144bdf2988",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 28,
"path": "/problem4/attempt4.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef is_large(a:int, b:int) -> bool:\n \"\"\"check if a is greater than or equal to b\"\"\"\n return a>=b\n\ndef get_remainder(n:int, d:int) -> int:\n \"\"\"return the remainder when n is divided by d\"\"\"\n return n%d\n\ndef is_palindrome(num:int) -> bool:\n \"\"\"check if a number is palindrome by reversing the number\"\"\"\n rev = 0\n temp = num\n while temp!= 0:\n rev = rev*10 + get_remainder(temp, 10)\n temp = int(temp/10)\n return num == rev\n\nnum1 = 999\nnum2 = 999\nmax_palindrome = 0\nfor i in range(num1,99,-1):\n for j in range(num2,i-1,-1):\n if is_large(i*j, max_palindrome) and is_palindrome(i*j):\n max_palindrome = i*j\n\nprint(max_palindrome)\n"
},
{
"alpha_fraction": 0.5704697966575623,
"alphanum_fraction": 0.6107382774353027,
"avg_line_length": 18.866666793823242,
"blob_id": "046143ff5f34dcbad1b3e310756f0baad9f4aa03",
"content_id": "d3d447c8957f7d3efb5b044b715ce645cd8594ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/problem1/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\ndef is_divisible(i:int, j:int) -> bool:\n \"\"\"return if i is divisible by j\"\"\"\n return i%j == 0\n\nupper_limit = 1000\nnums = []\ndiv1 = 3\ndiv2 = 5\n\nfor i in range(upper_limit):\n if is_divisible(i, div1) or is_divisible(i, div2):\n nums.append(i)\nprint(sum(nums))\n"
},
{
"alpha_fraction": 0.5317460298538208,
"alphanum_fraction": 0.5555555820465088,
"avg_line_length": 24.200000762939453,
"blob_id": "b1651511f14daf52f3b23248041570abf8474747",
"content_id": "7342d69a5e555f56e0ac500a9b411f1134bd8d6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 20,
"path": "/problem9/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef check_sum(a:int, b:int, c:int, limit:int) -> bool:\n \"\"\"check if sum of a, b, c is less than limit\"\"\"\n return a + b + c == limit\n\ndef find_square(a:int) -> int:\n \"\"\"find the square of a given a\"\"\"\n return a*a\n\nfound = False\nlimit = 1000\nfor i in range(1,limit,1):\n for j in range(i+1,limit,1):\n if (find_square(i) + find_square(j) == find_square(limit - i - j)):\n found = True\n break\n if found:\n break\nprint(i*j*(1000-i-j))\n"
},
{
"alpha_fraction": 0.5588942170143127,
"alphanum_fraction": 0.5865384340286255,
"avg_line_length": 20.894737243652344,
"blob_id": "128fe68a776fe6234aeed32bb831084977497771",
"content_id": "627eaa0203e566a4bb1db2d772a740c09f3ba6a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 38,
"path": "/problem12/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\ndef add(a:int, b:int) -> int:\n \"\"\"return the result of addition of a and b\"\"\"\n return a + b\n\n\ndef get_triangle_number(n:int) -> int:\n \"\"\"return the n th triangle number i.e. 1 + 2 + 3 + ... + n\"\"\"\n return int(n*(n+1)/2)\n\ndef is_divisor(n:int, i:int) -> bool:\n \"\"\"check if n is divisible by i\"\"\"\n return n%i == 0\n\ndef get_divisors(n:int) -> int:\n \"\"\"get all divisors of number n\"\"\"\n divisors = 0\n for i in range(1,int(n/2)+1):\n if is_divisor(n,i):\n divisors = divisors + 1\n return divisors + 1\n\ndivisors = 0\nn = 0\ntriangle_number = 1\nwhile(divisors < 500):\n n = n + 1\n triangle_number = get_triangle_number(n)\n divisors = get_divisors(triangle_number)\n if is_divisor(n, 100):\n print(n)\n\n\n\nprint(triangle_number)\nprint(n)\nprint(len(divisors))\n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 71,
"blob_id": "54e7ced7c317dac9dade2f98122067a3fcc34d46",
"content_id": "9d99d8722dca425d531b38883ef07739712b4583",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 1,
"path": "/README.md",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "This project will be used to create solutions for Project Euler program\n"
},
{
"alpha_fraction": 0.552293598651886,
"alphanum_fraction": 0.5614678859710693,
"avg_line_length": 18.464284896850586,
"blob_id": "5adf4ed6174be8d8a335214aaa4c33f1f51f905f",
"content_id": "348a2405048ea69e3844b8d5feee2e67446fc39f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 545,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 28,
"path": "/problem5/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\ndef is_small(a:int, b:int) -> bool:\n \"\"\"check if a is smaller than or equal to b\"\"\"\n return a <= b\n\ndef get_remainder(n:int, d:int) -> int:\n \"\"\"return the remainder when n is divided by d\"\"\"\n return n%d\n\ndef hcf(a:int, b:int) -> int:\n \"\"\"find the hcf of a and b\"\"\"\n if b == 0:\n return a\n return hcf(b, get_remainder(a,b))\n\ndef lcm(a:int, b:int) -> int:\n \"\"\"find the lcm of a and b\"\"\"\n return int(a*b/hcf(a,b))\n\nnum_list = range(3,21)\n\na = 2\nfor b in num_list:\n a = lcm(a,b)\n\nprint(a)\n"
},
{
"alpha_fraction": 0.6148359775543213,
"alphanum_fraction": 0.6452686786651611,
"avg_line_length": 35.877193450927734,
"blob_id": "2df0ded4b92b8adc3d309e516a7da2b39d360ecf",
"content_id": "717b45a9b64e5a45cb3a7b4043b30bc1bbe37ddc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2103,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 57,
"path": "/problem3/attempt3.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport math\n\ndef is_divisible(inpt:int, i:int) -> bool:\n \"\"\"check if inpt is divisible by i\"\"\"\n return inpt%i == 0\n\ndef get_probable_prime(n: int) -> [int]:\n \"\"\"prime numbers always take the form of 6n-1 or 6n+1. This function generates the \n probable prime numbers closet to n\"\"\"\n return [6*n-1, 6*n+1]\n\ndef is_small(a:int, b:int) -> bool:\n \"\"\"check if a is smaller than or equal to b\"\"\"\n return a <= b\n\ndef is_prime(inpt:int) -> bool:\n \"\"\"check if a number is prime or not\"\"\"\n inpt_sqrt = int(math.sqrt(inpt))\n n = 1\n prime1, prime2 = get_probable_prime(n)\n # loop to check all numbers of the format 6n-1 and 6n+1 less than inpt_sqrt which divide inpt\n while is_small(prime2, inpt_sqrt) or is_small(prime1, inpt_sqrt):\n if is_divisible(inpt, prime1) or is_divisible(inpt, prime2):\n return False\n #get next set of probable prime numbers\n n = n + 1\n prime1, prime2 = get_probable_prime(n)\n return True\n\ndef get_prime_factors(inpt:int) -> [int]:\n \"\"\"generate all prime factors of a given number\"\"\"\n # for 2 and 3 manual check is to be performed as they are not in the format of 6n-1 and 6n+1\n prime_factors = []\n if is_divisible(num,2):\n prime_factors.append(2)\n if is_divisible(num,3):\n prime_factors.append(3)\n inpt_sqrt = int(math.sqrt(inpt))\n #generate first set of probable prime numbers\n n = 1\n prime1, prime2 = get_probable_prime(n)\n #loop to check for all numbers less than inpt_sqrt of the format 6n-1 and 6n+1 which are prime and divide inpt\n while is_small(prime1, inpt_sqrt) or is_small(prime2, inpt_sqrt):\n if is_prime(prime1) and is_divisible(inpt, prime1):\n prime_factors.append(prime1)\n if is_prime(prime2) and is_small(prime2, inpt_sqrt) and is_divisible(inpt, prime2):\n prime_factors.append(prime2)\n #generate next set of probable prime numbers\n n = n + 1\n prime1, prime2 = get_probable_prime(n)\n return prime_factors\n\n\nnum = 600851475143\nprint(max(get_prime_factors(num)))\n\n"
},
{
"alpha_fraction": 0.5948753356933594,
"alphanum_fraction": 0.6260387897491455,
"avg_line_length": 29.08333396911621,
"blob_id": "503cd3b718738c4230e2d128d74f36ea5a760d1c",
"content_id": "079e0550789f774ab90626df456f48d354728029",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1444,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 48,
"path": "/problem10/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport math\n\ndef is_divisible(inpt:int, i:int) -> bool:\n \"\"\"check if inpt is divisible by i\"\"\"\n return inpt%i == 0\n \ndef is_small(a:int, b:int) -> bool:\n \"\"\"check if a is smaller than or equal to b\"\"\"\n return a <= b\n \ndef get_probable_prime(n: int) -> [int]:\n \"\"\"prime numbers always take the form of 6n-1 or 6n+1. This function generates the \n probable prime numbers closet to n\"\"\"\n return [6*n-1, 6*n+1]\n \ndef is_prime(inpt:int) -> bool:\n \"\"\"check if a number is prime or not\"\"\"\n inpt_sqrt = int(math.sqrt(inpt))\n n = 1\n prime1, prime2 = get_probable_prime(n)\n # loop to check all numbers of the format 6n-1 and 6n+1 less than inpt_sqrt which divide inpt\n while is_small(prime2, inpt_sqrt) or is_small(prime1, inpt_sqrt):\n if is_divisible(inpt, prime1) or is_divisible(inpt, prime2):\n return False\n #get next set of probable prime numbers\n n = n + 1\n prime1, prime2 = get_probable_prime(n)\n return True\n\ndef is_large(a:int, b:int) -> bool:\n \"\"\"check if a is greater than or equal to b\"\"\"\n return a>=b\n\nprimes = [2,3]\nn = 1\nlimit = 2000000\nprime1, prime2 = get_probable_prime(n)\nwhile is_small(prime1,limit):\n if is_prime(prime1):\n primes.append(prime1)\n if is_prime(prime2) and is_small(prime2,limit):\n primes.append(prime2)\n n = n + 1\n prime1, prime2 = get_probable_prime(n)\n\nprint(sum(primes))\n"
},
{
"alpha_fraction": 0.5657370686531067,
"alphanum_fraction": 0.5796812772750854,
"avg_line_length": 20.826086044311523,
"blob_id": "633ace959f73ad6381189d7f3219f946758144a1",
"content_id": "1cb27582bbc29bbd9bda2e5eb004f8e8c0b34131",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 23,
"path": "/problem6/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef square(num:int) -> int:\n \"\"\"return the square of num\"\"\"\n return num*num\n\ndef sum_of_squares(num:int) -> int:\n \"\"\"return sum of squres of numbers from 1 to num\"\"\"\n s = 0\n for i in range(num):\n s = s + square(i)\n return s\n\ndef square_of_sum(num:int) -> int:\n \"\"\"return the square of sum of numbers from 1 to num\"\"\"\n s = 0\n for i in range(num):\n s = s + i\n return square(s)\n\nnum = 101\n\nprint(square_of_sum(num) - sum_of_squares(num))\n"
},
{
"alpha_fraction": 0.6357827186584473,
"alphanum_fraction": 0.6773163080215454,
"avg_line_length": 23.076923370361328,
"blob_id": "258c9bf5e590a3a0435e1a133ab464b99f2ab927",
"content_id": "fead8358e43c466135be3fe08bef9c6e15e84b30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 13,
"path": "/problem1/attempt3.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef sum_of_series(upper_limit:int, factor:int) -> int:\n s = int(upper_limit/factor)\n return int(factor*s*(s+1)/2)\n\nupper_limit = 999\ndiv1 = 3\ndiv2 = 5\n\noutput = sum_of_series(upper_limit, div1) + sum_of_series(upper_limit, div2) - sum_of_series(upper_limit, div1*div2)\n\nprint(output)\n"
},
{
"alpha_fraction": 0.511226236820221,
"alphanum_fraction": 0.5457685589790344,
"avg_line_length": 17.09375,
"blob_id": "a64699b47222f5bc040c3d59b6fad0212e6f466d",
"content_id": "d082d2b2f9f13955a878a4b9a90e6a4f9ce3cbe0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 32,
"path": "/problem14/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nseries_end = 1\nlimit = 999999\n\n\ndef get_next(n:int) -> int:\n \"\"\"get the next number in series\"\"\"\n if n%2 == 0:\n return int(n/2)\n return 3*n + 1\n\ndef get_series_len(n:int) -> int:\n \"\"\"get the length of Collatz series starting at n\"\"\"\n l = 1\n while n >1:\n n = get_next(n)\n l = l + 1\n return l\n\nmax_series_len = 0\nmax_series_n = 0\nn = limit\nwhile n > 0:\n l = get_series_len(n)\n if max_series_len < l:\n max_series_len = l\n max_series_n = n\n n = n - 1\n\nprint(max_series_len)\nprint(max_series_n)\n"
},
{
"alpha_fraction": 0.5199999809265137,
"alphanum_fraction": 0.5542857050895691,
"avg_line_length": 11.428571701049805,
"blob_id": "3b9b99c61f0722de0277923d0d186f4ab310b5d3",
"content_id": "7caf0038ee431b08fc9f1758ff0dd6f1cff00b97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 14,
"path": "/problem13/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nf = open('input.txt','r')\n\ninpt = f.readlines()\n\n\ns = 0\nfor num_str in inpt:\n num = int(num_str[:13])\n s = s + num\n\nprint(s)\nprint(str(s)[:10])\n\n"
},
{
"alpha_fraction": 0.5167286396026611,
"alphanum_fraction": 0.5464683771133423,
"avg_line_length": 23.454545974731445,
"blob_id": "9e1096126615db6575eb23da9a04a9b453e7905f",
"content_id": "70e1c440e39e9ade290213a82908271c28a598fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 22,
"path": "/problem9/attempt3.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef check_sum(a:int, b:int, c:int, limit:int) -> bool:\n \"\"\"check if sum of a, b, c is less than limit\"\"\"\n return a + b + c == limit\n\ndef find_square(a:int) -> int:\n \"\"\"find the square of a given a\"\"\"\n return a*a\n\nfound = False\nlimit = 1000\na = int(limit/3) - 1\nb = int(limit/2) - 1\nfor i in range(1,a,1):\n for j in range(i+1,b,1):\n if (find_square(i) + find_square(j) == find_square(limit - i - j)):\n found = True\n break\n if found:\n break\nprint(i*j*(1000-i-j))\n"
},
{
"alpha_fraction": 0.57833331823349,
"alphanum_fraction": 0.6066666841506958,
"avg_line_length": 21.185184478759766,
"blob_id": "d482ccd124f363c0f34209c1a386b69f90103d80",
"content_id": "0c4e1030c3ec309b3a7768847d98c4dceb351e48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 600,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 27,
"path": "/problem3/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport math\n\ndef is_divisible(inpt:int, i:int) -> bool:\n \"\"\"check if inpt is divisible by i\"\"\"\n return inpt%i == 0\n\ndef is_prime(inpt:int) -> bool:\n \"\"\"check if a number is prime or not\"\"\"\n inpt_sqrt = int(math.sqrt(inpt))\n prime_flag = True\n for i in range(2,inpt_sqrt):\n if is_divisible(inpt,i):\n prime_flag = False\n break\n return prime_flag\n\n\nnum = 600851475143\nmax_factor = 0\nnum_sqrt = int(math.sqrt(num))\nfor i in range(2,num_sqrt):\n if is_prime(i) and is_divisible(num,i):\n max_factor = i\n\nprint(max_factor)\n\n"
},
{
"alpha_fraction": 0.5908359885215759,
"alphanum_fraction": 0.6254019141197205,
"avg_line_length": 27.930233001708984,
"blob_id": "afa37da18530ca024ea2af76f5b8b769858d06dd",
"content_id": "b25fd0f5eb9bf5fd3f6ffe66e946af429abca77c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1244,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 43,
"path": "/problem7/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport math\n\ndef is_divisible(inpt:int, i:int) -> bool:\n \"\"\"check if inpt is divisible by i\"\"\"\n return inpt%i == 0\n\ndef is_small(a:int, b:int) -> bool:\n \"\"\"check if a is smaller than or equal to b\"\"\"\n return a <= b\n\ndef get_probable_prime(n: int) -> [int]:\n \"\"\"prime numbers always take the form of 6n-1 or 6n+1. This function generates the\n probable prime numbers closet to n\"\"\"\n return [6*n-1, 6*n+1]\n\ndef is_prime(inpt:int) -> bool:\n \"\"\"check if a number is prime or not\"\"\"\n inpt_sqrt = int(math.sqrt(inpt))\n n = 1\n prime1, prime2 = get_probable_prime(n)\n # loop to check all numbers of the format 6n-1 and 6n+1 less than inpt_sqrt which divide inpt\n while is_small(prime2, inpt_sqrt) or is_small(prime1, inpt_sqrt):\n if is_divisible(inpt, prime1) or is_divisible(inpt, prime2):\n return False\n #get next set of probable prime numbers\n n = n + 1\n prime1, prime2 = get_probable_prime(n)\n return True\n\n\nprimes = [2,3]\nn = 1\nwhile len(primes) < 10001:\n prime1, prime2 = get_probable_prime(n)\n if is_prime(prime1):\n primes.append(prime1)\n if is_prime(prime2):\n primes.append(prime2)\n n = n + 1\n\nprint(primes[10000])\n"
},
{
"alpha_fraction": 0.29855072498321533,
"alphanum_fraction": 0.7845410704612732,
"avg_line_length": 30.363636016845703,
"blob_id": "4eabe9e79fcadc1796f2841bdb2a49d6d1d04247",
"content_id": "10fca38b5a4e649db77828b1f1c643481bdd3331",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2070,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 66,
"path": "/problem8/attempt3.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\ninpt = '73167176531330624919225119674426574742355349194934\\\n96983520312774506326239578318016984801869478851843\\\n85861560789112949495459501737958331952853208805511\\\n12540698747158523863050715693290963295227443043557\\\n66896648950445244523161731856403098711121722383113\\\n62229893423380308135336276614282806444486645238749\\\n30358907296290491560440772390713810515859307960866\\\n70172427121883998797908792274921901699720888093776\\\n65727333001053367881220235421809751254540594752243\\\n52584907711670556013604839586446706324415722155397\\\n53697817977846174064955149290862569321978468622482\\\n83972241375657056057490261407972968652414535100474\\\n82166370484403199890008895243450658541227588666881\\\n16427171479924442928230863465674813919123162824586\\\n17866458359124566529476545682848912883142607690042\\\n24219022671055626321111109370544217506941658960408\\\n07198403850962455444362981230987879927244284909188\\\n84580156166097919133875499200524063689912560717606\\\n05886116467109405077541002256983155200055935729725\\\n71636269561882670428252483600823257530420752963450'\n\n\ndef multiply(a:int, b:int) ->int:\n \"\"\"retur product of a and b\"\"\"\n return a*b\n\ndef multiply_list(num_list:[int]) ->int:\n \"\"\"return the product of numbers in a list\"\"\"\n result = 1\n for num in num_list:\n result = multiply(result,num)\n return result\n\ndef str_to_numlist(inpt:str) -> [int]:\n \"\"\"return a string as list of single digit numbers\"\"\"\n out = []\n for digit in inpt:\n out.append(int(digit))\n return out\n\ndef update_result(a:int, b:int, result:int) -> int:\n \"\"\"udpate the result based on a and b\"\"\"\n return int(result*b/a)\n\nlist_length = 13\ninpt_length = len(inpt)\ninit_list = str_to_numlist(inpt[0:list_length])\ntemp = multiply_list(init_list)\nmax_sum = temp\n\n\nfor i in range(list_length, inpt_length):\n a = init_list.pop(0)\n b = int(inpt[i])\n init_list.append(b)\n if a == 0:\n temp = multiply_list(init_list)\n else:\n temp = update_result(a, b, temp)\n if max_sum < temp:\n max_sum = temp\n\nprint(max_sum)\n"
},
{
"alpha_fraction": 0.5792682766914368,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 22.399999618530273,
"blob_id": "eabd753bf6255298f0a1f7e1c21f530bbea03ced",
"content_id": "342cf38a35e3d20228f9b9cf54b2446ec22a0c2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 35,
"path": "/problem10/attempt5.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nlimit = 2000000\ninpt_list = list(range(limit + 1))\n\ndef get_next_id(idx:int) -> int:\n \"\"\"get the id of the next non zero number from inpt_list starting at idx\"\"\"\n i = idx\n while i < limit:\n if inpt_list[i]>0:\n break\n i = i + 1\n return i\n\ndef set_idx(idx:int) -> None:\n \"\"\"set the value at idx to 0 in input list. This function will be used to\n track the number of operations\"\"\"\n global inpt_list\n inpt_list[idx] = 0\n\ndef mark_non_prime(val:int) -> None:\n \"\"\"mark all multiples of val as 0 as they are non prime\"\"\"\n tmp = val + val\n while tmp < limit:\n set_idx(tmp)\n tmp = tmp + val\n\nset_idx(1)\nprime = 2\nwhile prime < limit:\n mark_non_prime(prime)\n prime = get_next_id(prime+1)\n\ninpt_list.pop()\nprint(sum(inpt_list))\n\n"
},
{
"alpha_fraction": 0.4508928656578064,
"alphanum_fraction": 0.4955357015132904,
"avg_line_length": 13,
"blob_id": "8dc4d0f328bb2e80ff42e03cdd09d64841e8addf",
"content_id": "2cb136c5c14977ddcaa6bc1f7771438ae4a90ce3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 16,
"path": "/problem2/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef get_next(a:int, b:int) -> int:\n return a + b\n\nupper_limit = 4000000\na = 1\nb = 1\nc = a + b\ns = -2\nwhile c <= upper_limit:\n s = s + c\n c = get_next(a,b)\n a = b + c\n b = a + c\nprint(s)\n"
},
{
"alpha_fraction": 0.5198463797569275,
"alphanum_fraction": 0.5544174313545227,
"avg_line_length": 19.552631378173828,
"blob_id": "93667db74ea9fc72093ee35ee5951a342c2b1865",
"content_id": "4e15a0da388264acca74d4cf86cdd0c9596b436c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 781,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 38,
"path": "/problem14/attempt3.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nseries_end = 1\nlimit = 1000000\nseries_len = {}\n\ndef get_next(n:int) -> int:\n \"\"\"get the next number in series\"\"\"\n if n%2 == 0:\n return int(n/2)\n return 3*n + 1\n\ndef get_series_len(n:int) -> int:\n \"\"\"get the length of Collatz series starting at n\"\"\"\n global series_len\n if n == 1:\n return 1\n if str(n) in series_len.keys():\n return series_len[str(n)]\n if n%2 == 0:\n return 1 + get_series_len(int(n/2))\n else:\n return 1 + get_series_len(3*n + 1)\n\n\nmax_series_len = 0\nmax_series_n = 0\nn = 1\nwhile n < limit :\n l = get_series_len(n)\n series_len[str(n)] = l\n if max_series_len < l:\n max_series_len = l\n max_series_n = n\n n = n + 1\n\nprint(max_series_len)\nprint(max_series_n)\n"
},
{
"alpha_fraction": 0.5904977321624756,
"alphanum_fraction": 0.6199095249176025,
"avg_line_length": 25,
"blob_id": "f4bf27b0204ecbe33412b5c8a27162206980b7a9",
"content_id": "902a4cce1f9d8f33e6686f1d02885844e5583bce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/problem6/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef square(num:int) -> int:\n \"\"\"return the square of num\"\"\"\n return num*num\n\ndef sum_of_squares(num:int) -> int:\n \"\"\"return sum of squres of numbers from 1 to num\"\"\"\n return int(num*(num+1)*(2*num+1)/6)\n\ndef square_of_sum(num:int) -> int:\n \"\"\"return the square of sum of numbers from 1 to num\"\"\"\n return int((num*(num+1)/2)*(num*(num+1)/2))\n\nnum = 100\n\nprint(square_of_sum(num) - sum_of_squares(num))\n"
},
{
"alpha_fraction": 0.5464285612106323,
"alphanum_fraction": 0.5892857313156128,
"avg_line_length": 17.66666603088379,
"blob_id": "156eb7713dd79218c53d1e1e2bf0ee6efd5f2f5d",
"content_id": "cb4e2a5db1c41737fe72ad730f6324192478d699",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/problem1/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\ndef is_divisible(i:int, j:int) -> bool:\n \"\"\"return if i is divisible by j\"\"\"\n return i%j == 0\n\nupper_limit = 1000\ndiv1 = 3\ndiv2 = 5\ns = 0\n\nfor i in range(upper_limit):\n if is_divisible(i, div1) or is_divisible(i, div2):\n s = s + i\nprint(s)\n"
},
{
"alpha_fraction": 0.582577109336853,
"alphanum_fraction": 0.61524498462677,
"avg_line_length": 21,
"blob_id": "08fd56b2bb87daad7a6a08a01c58f7f437ae67a3",
"content_id": "0e31aeadb663efd53360bf51c65f4ab23bc6a65f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 25,
"path": "/problem3/attempt2.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport math\n\ndef is_divisible(inpt:int, i:int) -> bool:\n \"\"\"check if inpt is divisible by i\"\"\"\n return inpt%i == 0\n\ndef is_prime(inpt:int) -> bool:\n \"\"\"check if a number is prime or not\"\"\"\n inpt_sqrt = int(math.sqrt(inpt))\n for i in range(3,inpt_sqrt,2):\n if is_divisible(inpt,i):\n return False\n return True\n\n\nnum = 600851475143\nmax_factor = 0\nnum_sqrt = int(math.sqrt(num))\nfor i in range(3,num_sqrt,2):\n if is_prime(i) and is_divisible(num,i):\n max_factor = i\n\nprint(max_factor)\n\n"
},
{
"alpha_fraction": 0.5563282370567322,
"alphanum_fraction": 0.5785813927650452,
"avg_line_length": 21.40625,
"blob_id": "99ddf74d7db72e7fd8947a8970d9f95848f8b086",
"content_id": "8d32e80b357b2ed5d7d5bffeff4a54ac880c59bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 32,
"path": "/problem12/attempt1.py",
"repo_name": "kr-neerav/euler",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\ndef get_triangle_number(n:int) -> int:\n \"\"\"return the n th triangle number i.e. 1 + 2 + 3 + ... + n\"\"\"\n num = 0\n for i in range(1,n+1):\n num = num + i\n return num\n\ndef is_divisor(n:int, i:int) -> bool:\n \"\"\"check if n is divisible by i\"\"\"\n return n%i == 0\n\ndef get_divisors(n:int) -> [int]:\n \"\"\"get all divisors of number n\"\"\"\n divisors = []\n for i in range(1,n+1):\n if is_divisor(n,i):\n divisors.append(i)\n return divisors\n\ndivisors = []\nn = 1\ntriangle_number = 1\nwhile(len(divisors) <= 500):\n triangle_number = get_triangle_number(n)\n divisors = get_divisors(triangle_number)\n n = n + 1\n\nprint(triangle_number)\nprint(divisors)\n\n\n"
}
] | 23 |
joshhogg/learntris | https://github.com/joshhogg/learntris | 9e2a7890db501cd7d4fadf0863f98917d51f37b9 | c42118ef46276507c4f8e627ed007b96138dd612 | 45c68f80a8f27b5c99fdcd824374ba917aefda50 | refs/heads/master | 2021-05-26T15:11:55.668516 | 2014-04-22T21:42:52 | 2014-05-01T04:59:14 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5771656632423401,
"alphanum_fraction": 0.5900373458862305,
"avg_line_length": 24.309446334838867,
"blob_id": "116e05624e2eaec4ceb6b0ddc5d73f3f4f17c143",
"content_id": "90c5f1bbb0d182d2aa3a1b9d2c60204171bdb03f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7769,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 307,
"path": "/learntris.py",
"repo_name": "joshhogg/learntris",
"src_encoding": "UTF-8",
"text": "def command_split(user_command):\n\tcommands = list(user_command)\n\ti = 0\n\twhile i < len(commands):\n\t\tif commands[i].isspace() is True:\n\t\t\tcommands.pop(i)\n\t\ti = i + 1\n\ti = 0\n\twhile i < len(commands):\n\t\tif commands[i] == '?':\n\t\t\tcommands[i] += commands.pop(i+1)\n\t\ti = i + 1\n\treturn commands\n\t\ndef initialize_grid():\n\ti = 0\n\tgrid = []\n\tfor i in range(22):\n\t\tgrid.append(\". \"*10)\n\treturn grid\n\ndef print_grid(grid):\n\tprint '\\n'.join(grid)\n\ndef spawn_tetra(grid, active_tetramino):\n\ti = 0\n\tif len(active_tetramino) < 4:\n\t\twhile i < len(active_tetramino):\n\t\t\ts = list(grid[i])\n\t\t\ts_add = list(active_tetramino[i])\n\t\t\ttetra_len = len(s_add)+1\n\t\t\tx = 1\n\t\t\twhile x < tetra_len:\n\t\t\t\ts[-8-x] = s_add[-x]\n\t\t\t\tx = x + 1\n\t\t\ts = ''.join(s)\n\t\t\tgrid[i] = s.upper()\n\t\t\ti = i + 1\n\telse:\n\t\twhile i < len(active_tetramino):\n\t\t\ts = list(grid[i])\n\t\t\ts_add = list(active_tetramino[i])\n\t\t\ttetra_len = len(s_add)+1\n\t\t\tx = 1\n\t\t\twhile x < tetra_len:\n\t\t\t\ts[-6-x] = s_add[-x]\n\t\t\t\tx = x + 1\n\t\t\ts = ''.join(s)\n\t\t\tgrid[i] = s.upper()\n\t\t\ti = i + 1\t\n\treturn grid, active_tetramino\n\t\ndef given():\n\tline_count = 0\n\tuser_input = []\n\twhile len(user_input) < 22:\n\t\tline = raw_input(\"\")\n\t\tuser_input.append(line)\n\treturn user_input\n\ndef step(grid, score, clearedlines):\n\ti = 0\n\tfor i in range(len(grid)):\n\t\tif '.' in grid[i]:\n\t\t\tpass\n\t\telse:\n\t\t\tgrid[i] = \". \"*10\n\t\t\tscore = score + 100\n\t\t\tclearedlines = clearedlines + 1\n\treturn grid, score, clearedlines\n\ndef set_tetramino(tetramino):\n\tactive_tetramino = []\n\tif tetramino == 'I':\n\t\tactive_tetramino = ['. . . . ', 'c c c c ', '. . . . ', '. . . . ']\n\telif tetramino == 'O':\n\t\tactive_tetramino = ['y y ', 'y y ']\n\telif tetramino == 'Z':\n\t\tactive_tetramino = ['r r . ', '. r r ', '. . . ']\n\telif tetramino == 'S':\n\t\tactive_tetramino = ['. g g ', 'g g . ', '. . . ']\n\telif tetramino == 'J':\n\t\tactive_tetramino = ['b . . ', 'b b b ', '. . . ']\n\telif tetramino == 'L':\n\t\tactive_tetramino = ['. . o ', 'o o o ', '. . . ']\n\telif tetramino == 'T':\n\t\tactive_tetramino = ['. m . ', 'm m m ', '. . . ']\n\telse:\n\t\tpass\n\treturn active_tetramino\n\t\ndef display_tetramino(active_tetramino):\n\tprint '\\n'.join(active_tetramino)\n\t\ndef clear():\t\n\ti = 0\n\tgrid = []\n\tfor i in range(22):\n\t\tgrid.append(\". \"*10)\n\treturn (grid)\n\ndef rotate_ccw(active_tetramino):\n\ttetra_rotate = []\n\tfor i in range(len(active_tetramino)):\n\t\ttetra_rotate.append(active_tetramino[i].split())\n\ttemp_tetramino = []\n\t#active_tetramino = []\n\t#for i in range(len(tetra_rotate)):\n\t\t#temp_tetramino.append(' ')\n\tlast_col = len(tetra_rotate) - 1\n\tfor i in range(len(tetra_rotate)):\n\t\tfor j in range(len(tetra_rotate)):\n\t\t\tif j == 0:\n\t\t\t#temp_tetramino[j] = tetra_rotate[j][i]\n\t\t\t\ttemp_tetramino.append(tetra_rotate[j][last_col-i] + ' ')\n\t\t\telse:\n\t\t\t\ttemp_tetramino[i] = temp_tetramino[i] + (tetra_rotate[j][last_col-i] + ' ')\n\t\t#active_tetramino.append(temp_tetramino)\n\treturn temp_tetramino\n\ndef rotate_cw(active_tetramino):\n\ttetra_rotate = []\n\tfor i in range(len(active_tetramino)):\n\t\ttetra_rotate.append(active_tetramino[i].split())\n\ttemp_tetramino = []\n\t#active_tetramino = []\n\t#for i in range(len(tetra_rotate)):\n\t\t#temp_tetramino.append(' ')\n\tlast_col = len(tetra_rotate) - 1\n\tfor i in range(len(tetra_rotate)):\n\t\tfor j in range(len(tetra_rotate)):\n\t\t\tif j == 0:\n\t\t\t#temp_tetramino[j] = tetra_rotate[j][i]\n\t\t\t\ttemp_tetramino.append(tetra_rotate[last_col-j][i] + ' ')\n\t\t\telse:\n\t\t\t\ttemp_tetramino[i] = temp_tetramino[i] + (tetra_rotate[last_col-j][i] + ' ')\n\t\t#active_tetramino.append(temp_tetramino)\n\treturn temp_tetramino\n\ndef nudge_left(grid, active_tetramino):\n\trow = 0\n\tif (\n\t\tgrid[row][:len(active_tetramino[row])] != active_tetramino[row].upper() and \n\t\tgrid[row+1][:len(active_tetramino[row+1])] != active_tetramino[row+1].upper()\n\t\t):\n\t\t# Check that the 'spawning' area isn't equal to the active tetramino to\n\t\t# ensure you collide with the left wall\n\t\twhile row < len(active_tetramino):\n\n\t\t\ti = 0\n\t\t\ts = list(grid[row])\n\t\t\twhile i < len(s):\n\t\t\t\tif i == (len(s)-2):\n\t\t\t\t\ts[i] = '.'\n\t\t\t\t\t#s[i] = s[0]\n\t\t\t\telif i == (len(s)-1):\n\t\t\t\t\ts[i] = ' '\n\t\t\t\t\t#s[i] = s[1]\n\t\t\t\telse:\n\t\t\t\t\ts[i] = s[i+2]\n\t\t\t\ti += 1\n\t\t\t\t# print ''.join(s)\n\t\t\t\t# raw_input()\n\t\t\ts = ''.join(s)\n\t\t\tgrid[row] = s\n\t\t\trow += 1\n\t\t\t\n\telse:\n\t\tpass\n\treturn grid\n\t\ndef nudge_right(grid, active_tetramino):\n\trow = 0\n\tif (\n\t\t(active_tetramino[row][len(active_tetramino[row])-2:] == '. ' and \n\t\tactive_tetramino[row+1][len(active_tetramino[row])-2:] == '. ')\n\t\t):\n\t\t# Check that the 'tetramino' isn't all '. ' on the right side, erroneously causing it to stop moving\n\t\tif (\n\t\tgrid[row][-len(active_tetramino[row])+2:] != active_tetramino[row][:len(active_tetramino[row])-2].upper() \n\t\tand \n\t\tgrid[row+1][-len(active_tetramino[row+1])+2:] != active_tetramino[row+1][:len(active_tetramino[row])-2].upper()\n\t\t):\n\t\t\t# Check that the 'spawning' area isn't equal to the active tetramino,\n\t\t\t# minus the row which is all '. ', to ensure you collide with the right wall\n\t\t\twhile row < len(active_tetramino):\n\t\t\t\ts = list(grid[row])\n\t\t\t\ti = len(s)-1\n\t\t\t\twhile i >= 0:\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ts[i] = '.'\n\t\t\t\t\t\t# s[i] = s[len(s)-2]\n\t\t\t\t\telif i == 1:\n\t\t\t\t\t\ts[i] = ' '\n\t\t\t\t\t\t# s[i] = s[len(s)-1]\n\t\t\t\t\telse:\n\t\t\t\t\t\ts[i] = s[i-2]\n\t\t\t\t\ti -= 1\n\t\t\t\ts = ''.join(s)\n\t\t\t\tgrid[row] = s\n\t\t\t\trow += 1\n\t\telse:\n\t\t\tpass\n\telif (\n\tgrid[row][-len(active_tetramino[row]):] != active_tetramino[row].upper() \n\tand grid[row+1][-len(active_tetramino[row+1]):] != active_tetramino[row+1].upper()\n\t):\n\t\t# Check that the 'spawning' area isn't equal to the active tetramino to\n\t\t# ensure you collide with the right wall\n\t\twhile row < len(active_tetramino):\n\t\t\ts = list(grid[row])\n\t\t\ti = len(s)-1\n\t\t\twhile i >= 0:\n\t\t\t\tif i == 0:\n\t\t\t\t\ts[i] = '.'\n\t\t\t\t\t# s[i] = s[len(s)-2]\n\t\t\t\telif i == 1:\n\t\t\t\t\ts[i] = ' '\n\t\t\t\t\t# s[i] = s[len(s)-1]\n\t\t\t\telse:\n\t\t\t\t\ts[i] = s[i-2]\n\t\t\t\ti -= 1\n\t\t\ts = ''.join(s)\n\t\t\tgrid[row] = s\n\t\t\trow += 1\n\telse:\n\t\tpass\n\treturn grid\t\n\ndef nudge_down(grid):\n\trow = 0\n\ts = []\n\twhile row < 2:\n\t\ts.append(grid[row])\n\t\trow += 1\n\trow -= 1\n\tgrid[row-1] = '. '*10\n\ti = 0\n\twhile i < len(s):\n\t\tgrid[row] = s[i]\n\t\trow += 1\n\t\ti += 1\n\treturn grid\n\ndef quit():\n\tpass\n\t\ndef execute(grid, score, clearedlines, active_tetramino):\n\n\ti = 0\n\tuser_command = raw_input(\"\")\n\tcommands = command_split(user_command)\n\tcommand_length = len(commands)\n\tindex_length = command_length - 1\n\t\n\twhile i < command_length:\n\t\tif commands[i] == 'p':\n\t\t\tprint_grid(grid)\n\t\telif commands[i] == 'g':\n\t\t\tgrid = given()\n\t\telif commands[i] == 'P':\n\t\t\tprint_grid(grid)\n\t\telif commands[i] == 'c':\n\t\t\tgrid = clear()\n\t\telif commands[i] == 's':\n\t\t\tgrid, score, clearedlines = step(grid, score, clearedlines)\n\t\telif commands[i] == '?s':\n\t\t\tprint score\n\t\telif commands[i] == '?n':\n\t\t\tprint clearedlines\n\t\telif commands[i] in {'I', 'O', 'Z', 'S', 'J', 'L', 'T'}:\n\t\t\ttetramino = commands[i]\n\t\t\tactive_tetramino = set_tetramino(tetramino)\n\t\t\tgrid, active_tetramino = spawn_tetra(grid, active_tetramino)\n\t\telif commands[i] == ')':\n\t\t\tactive_tetramino = rotate_cw(active_tetramino)\n\t\t\tgrid, active_tetramino = spawn_tetra(grid, active_tetramino)\n\t\telif commands[i] == '(':\n\t\t\tactive_tetramino = rotate_ccw(active_tetramino)\n\t\t\tgrid, active_tetramino = spawn_tetra(grid, active_tetramino)\n\t\telif commands[i] == '<':\n\t\t\tgrid = nudge_left(grid, active_tetramino)\n\t\telif commands[i] == '>':\n\t\t\tgrid = nudge_right(grid, active_tetramino)\n\t\telif commands[i] == 'v':\n\t\t\tgrid = nudge_down(grid)\n\t\telif commands[i] == 't':\n\t\t\tdisplay_tetramino(active_tetramino)\n\t\telif commands[i] == 'q':\n\t\t\tquit()\n\t\telse:\n\t\t\tprint \"\"\n\t\ti = i + 1\n\t\t\n\tif commands == []:\n\t\texecute(grid, score, clearedlines, active_tetramino)\n\telif commands[index_length] != 'q':\n\t\texecute(grid, score, clearedlines, active_tetramino)\n\telse:\n\t\tquit()\n\ngrid = initialize_grid()\nscore = 0\nclearedlines = 0\nactive_tetramino = ''\n\nexecute(grid, score, clearedlines, active_tetramino)"
}
] | 1 |
ganyuanyuan/chess_exercise | https://github.com/ganyuanyuan/chess_exercise | d4611a2b1e57482a4c771d2bb7031d93d1903123 | 53d20f015be7eceaa54760f33283811b226d65d8 | dc4a324b4f3414defe0e21122a9547991f8f027f | refs/heads/master | 2020-06-28T04:16:20.341601 | 2019-08-02T01:20:56 | 2019-08-02T01:20:56 | 200,140,517 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5271317958831787,
"alphanum_fraction": 0.531438410282135,
"avg_line_length": 20.5,
"blob_id": "77d90a2eec5f2026e822fca1f38d5c57991c714f",
"content_id": "6fe941ac23e1ae170aa3e4dc0bb43c7f5df25c94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 54,
"path": "/main.py",
"repo_name": "ganyuanyuan/chess_exercise",
"src_encoding": "UTF-8",
"text": "from chessman import *\nfrom chessboard import *\nfrom engine import *\nimport time\n\ndef main():\n chessboard= Chessboard()\n chessboard.init_board()\n chessboard.print_board()\n engine = Engine(chessboard)\n\n count = 0\n while True:\n chessman = Chessman()\n chessman.set_color('X')\n i = input('format: x, y\\n')\n if i == 'stop':\n break\n engine.parse_user_input(i, chessman)\n\n chessboard.set_chessman(chessman)\n count += 1\n chessboard.print_board()\n if engine.is_wonman(chessman):\n print('you won!')\n break\n\n chessman = Chessman()\n chessman.set_color('O')\n print('computer go...')\n time.sleep(1)\n engine.computer_go(chessman)\n\n chessboard.set_chessman(chessman)\n count +=1\n time.sleep(1)\n chessboard.print_board()\n if engine.is_wonman(chessman):\n print('computer won!')\n break\n\n\n\n\n\n\nif __name__ == '__main__':\n while True:\n print('start!')\n main()\n is_stop = input('stop?\\n')\n if is_stop == 'yes':\n break\n print('stop!')\n"
},
{
"alpha_fraction": 0.738095223903656,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 13,
"blob_id": "d698f3a06aaf220bd0a07270c45e82919b858dfd",
"content_id": "fcad75827efd4f0db7cdac5964a6338f10df143b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 42,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/README.md",
"repo_name": "ganyuanyuan/chess_exercise",
"src_encoding": "UTF-8",
"text": "# chess_exercise\n\nThis is a console game.\n"
},
{
"alpha_fraction": 0.594059407711029,
"alphanum_fraction": 0.594059407711029,
"avg_line_length": 20.571428298950195,
"blob_id": "80ca76047e8655972c6d9a34118e891c9845e7dc",
"content_id": "fd8a104bec527dfc71fc25f686ae3998cdf3e03c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/chessman.py",
"repo_name": "ganyuanyuan/chess_exercise",
"src_encoding": "UTF-8",
"text": "class Chessman(object):\n def __init__(self):\n pass\n def set_position(self, position):\n self.position = position\n\n def get_position(self):\n return self.position\n\n def set_color(self, color):\n self.color = color\n\n def get_color(self):\n return self.color \n"
},
{
"alpha_fraction": 0.5638998746871948,
"alphanum_fraction": 0.5718050003051758,
"avg_line_length": 28.19230842590332,
"blob_id": "c14a831b0a0db9c8c2aa3baf679e2153bcc3618c",
"content_id": "2ef3e10e57b1ccffd070b658f77b465f8a764b77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 759,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 26,
"path": "/chessboard.py",
"repo_name": "ganyuanyuan/chess_exercise",
"src_encoding": "UTF-8",
"text": "class Chessboard(object):\n board_size = 5\n\n def __init__(self):\n self.__board = [[0 for _ in range(Chessboard.board_size)] for _ in range(Chessboard.board_size)]\n\n def init_board(self):\n for i in range(Chessboard.board_size):\n for j in range(Chessboard.board_size):\n self.__board[i][j] = '_'\n\n def print_board(self):\n for i in range(Chessboard.board_size):\n print(self.__board[i])\n\n\n def set_chess(self, pos, color):\n self.__board[pos[0]][pos[1]] = color\n\n def set_chessman(self, chessman):\n pos = chessman.get_position()\n color = chessman.get_color()\n self.set_chess(pos, color)\n\n def get_chess(self, pos):\n return self.__board[pos[0]][pos[1]]\n"
},
{
"alpha_fraction": 0.45998504757881165,
"alphanum_fraction": 0.4734480082988739,
"avg_line_length": 26.85416603088379,
"blob_id": "f4849c79862f6fc92364b5e9088a35e5d8b9c7db",
"content_id": "be4f0ebe3222126a746688dd7581ec3629d83615",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1337,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 48,
"path": "/engine.py",
"repo_name": "ganyuanyuan/chess_exercise",
"src_encoding": "UTF-8",
"text": "import random\n\nclass Engine(object):\n\n def __init__(self, chessboard):\n self.__chessboard = chessboard\n\n\n def parse_user_input(self, input, chessman):\n res = input.split(',')\n pos_x = int(res[0])\n pos_y = int(res[1])\n chessman.set_position((pos_x, pos_y))\n\n def computer_go(self, chessman):\n while True:\n pos_x = random.randint(0,4)\n pos_y = random.randint(0,4)\n if self.__chessboard.get_chess((pos_x,pos_y))=='_':\n print('computer go:', pos_x, pos_y)\n chessman.set_position((pos_x,pos_y))\n break\n\n def is_won(self, pos, color):\n count = 0\n for pos_x in range(5):\n if self.__chessboard.get_chess((pos_x,pos[1])) == color:\n count += 1\n if count >= 3:\n return True\n else:\n count = 0\n\n count = 0\n for pos_y in range(5):\n if self.__chessboard.get_chess((pos[0], pos_y)) == color:\n count+= 1\n if count >=3:\n return True\n else:\n count = 0\n\n return False\n\n def is_wonman(self,chessman):\n pos = chessman.get_position()\n color = chessman.get_color()\n return self.is_won(pos, color)\n"
}
] | 5 |
zahera-fatima/password-generator | https://github.com/zahera-fatima/password-generator | 7193e32fa26ff4b137303362fe8d23905812ff70 | a09f1a111bdebe69e90c5579870c371d55a2cb93 | 1762cad417ea160e088dfbd68794c7a2d2d6792e | refs/heads/master | 2020-11-29T13:47:34.506416 | 2019-12-25T16:32:54 | 2019-12-25T16:32:54 | 230,127,962 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7228260636329651,
"alphanum_fraction": 0.7228260636329651,
"avg_line_length": 29,
"blob_id": "e4594d14a250d8cccb104a364555d313699b73ce",
"content_id": "f13108ed6fdc63dfdeafc2853ddba0e27d2c5c82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 6,
"path": "/passgen.py",
"repo_name": "zahera-fatima/password-generator",
"src_encoding": "UTF-8",
"text": "import string\r\nimport random\r\nn = int(input())\r\nchars = string.ascii_letters + string.digits + string.punctuation\r\npassw = \"\".join(random.choice(chars) for x in range(n))\r\nprint(passw)"
}
] | 1 |
suganthicj/csk | https://github.com/suganthicj/csk | 390ec8da4a5a4b2bd972c1420681e84042693e6f | 0389198b4e339f8e01fe532a72d0fe6fc9e8a3f1 | 32635b85b9d43cfa1dad52210396ed48bb480246 | refs/heads/master | 2022-01-19T05:40:59.964442 | 2019-07-22T11:26:47 | 2019-07-22T11:26:47 | 198,209,743 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46341463923454285,
"alphanum_fraction": 0.6585366129875183,
"avg_line_length": 19.5,
"blob_id": "3eade5126d079e9aaa3042d03a506fbe01600317",
"content_id": "7cef76d9fde0bc96ce71471872d8ef699aa02704",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 2,
"path": "/csk.py",
"repo_name": "suganthicj/csk",
"src_encoding": "UTF-8",
"text": "X11,Y11=(input().split())\nprint(X11+Y11)\n"
}
] | 1 |
HanzHaxors/WordLister | https://github.com/HanzHaxors/WordLister | 0ec9eaee47a3bcda6be45fbf5a62e9b34a6fbf46 | e7dc3087c7bf2d2e9b1af95a4415c62b2ca00114 | 351b2a70f43dae471864119b0f34da011cd6553f | refs/heads/main | 2023-02-28T03:29:56.821945 | 2021-02-08T15:06:10 | 2021-02-08T15:06:10 | 281,030,488 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6050847172737122,
"alphanum_fraction": 0.6118643879890442,
"avg_line_length": 19.851852416992188,
"blob_id": "da86b66883159a3f8276b3ae0c7ac3b6c3b7bfce",
"content_id": "a1090c11fda2f80ab6defca019e850d6b7977e1e",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 54,
"path": "/wordlister.py",
"repo_name": "HanzHaxors/WordLister",
"src_encoding": "UTF-8",
"text": "import itertools\r\n\r\nusage_banner = \"\"\"\r\nUSAGE\r\npython3 wordlister.py <absolute_file_input> <absolute_file_output>\r\n\r\nExample:\r\npython3 wordlister.py /mnt/e/words.txt /mnt/e/result.txt\r\n\r\nHH\r\n\"\"\"\r\n\r\ndef main():\r\n\timport sys, os\r\n\targv = sys.argv[1:]\r\n\r\n\twordlist = None\r\n\tgenerated = list()\r\n\r\n\ttry:\r\n\t\tfile_read = open(argv[0], 'r')\r\n\t\tfile_write = open(argv[1], 'w')\r\n\t\twordlist = file_read.read().split('\\n')\r\n\r\n\t\twhile '' in wordlist:\r\n\t\t\twordlist.remove('')\r\n\r\n\t\trepeat = len(wordlist)\r\n\r\n\t\ttry:\r\n\t\t\tfor repeat in range(1, repeat):\r\n\t\t\t\tfor generated_string in itertools.product(wordlist, repeat=repeat):\r\n\t\t\t\t\tprint(f\"[*] {''.join(generated_string)}\", end='\\r')\r\n\t\t\t\t\tgenerated.append(''.join(generated_string))\r\n\t\texcept KeyboardInterrupt:\r\n\t\t\tfile_read.close()\r\n\r\n\t\t\tprint(f\"\\n[i] Writing to file {argv[1]}\")\r\n\t\t\tfile_write.write('\\n'.join(generated))\r\n\t\t\tfile_write.close()\r\n\t\texcept Exception as e:\r\n\t\t\tprint(f\"\\n[!] {e}\")\r\n\t\tfinally:\r\n\t\t\tfile_read.close()\r\n\r\n\t\t\tprint(f\"\\n[i] Writing to file {argv[1]}\")\r\n\t\t\tfile_write.write('\\n'.join(generated))\r\n\t\t\tfile_write.close()\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\tprint(usage_banner)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7212543487548828,
"avg_line_length": 18.133333206176758,
"blob_id": "d6886f0c4fecff6d58656e7b915b1e462de01f61",
"content_id": "918621eabfe7fafe7a75fa4c339621d27227a1c1",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 287,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 15,
"path": "/README.md",
"repo_name": "HanzHaxors/WordLister",
"src_encoding": "UTF-8",
"text": "# WordLister\nGives you a wordlist of your choice\n\n# Usage\n```\nUSAGE\npython3 wordlister.py <absolute_file_input> <absolute_file_output>\n\nExample:\npython3 wordlister.py /mnt/e/words.txt /mnt/e/result.txt\n```\n\n# ToDOs\n - [ ] Upload a package to PyPi\n - [ ] Enable non-absolute file locator\n"
}
] | 2 |
dboyliao/SmallTools | https://github.com/dboyliao/SmallTools | e770256190a73d58e5689416383a944a4a7c728b | 00043ecab05dd4ee840d1e99ad7630db26ddc492 | 389b1a306fac1804f7d6cc5acb6e24a8ee7d6431 | refs/heads/master | 2023-05-27T15:31:57.139286 | 2023-05-16T01:17:12 | 2023-05-16T01:17:12 | 37,348,963 | 3 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5738396644592285,
"alphanum_fraction": 0.574442446231842,
"avg_line_length": 26.649999618530273,
"blob_id": "46c22c95c74ae7ca872413dc7033246ab077de3f",
"content_id": "bc6a52b7197f3fae52c8ad440548f5e3442f550e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1659,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 60,
"path": "/Python/remove_all_exec.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nfrom __future__ import print_function\nimport argparse\nimport os\n\n\ndef is_executable(path):\n return os.access(path, os.X_OK) and not os.path.isdir(path)\n\n\ndef remove_file(path, run_dry=False, verbose=False):\n if is_executable(path):\n if run_dry:\n print(\"Would Removing {}\".format(path))\n elif verbose:\n print(\"Removing {}\".format(path))\n if not run_dry:\n os.remove(path)\n\n\ndef main(root_path, run_dry=False, recursive=False, verbose=False):\n if recursive:\n for current_dir, dirs, files in os.walk(root_path):\n for path in map(lambda s: os.path.join(current_dir, s), files):\n remove_file(path, run_dry, verbose)\n else:\n for fname in os.listdir(root_path):\n remove_file(fname, run_dry, verbose)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"root_path\",\n metavar=\"ROOT_DIR\",\n nargs=\"?\",\n default=os.getcwd(),\n help=\"root directory where to start with (default: '.')\",\n )\n parser.add_argument(\n \"-r\",\n \"--recursive\",\n dest=\"recursive\",\n help=\"remove executable recursively (default: false)\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"verbose\",\n help=\"run in verbose mode.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-n\", \"--dry-run\", dest=\"run_dry\", help=\"dry run mode\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n main(args.root_path, args.run_dry, args.recursive, args.verbose)\n"
},
{
"alpha_fraction": 0.49203822016716003,
"alphanum_fraction": 0.493630588054657,
"avg_line_length": 26.91111183166504,
"blob_id": "882cf7bc714fe5a73f89ba396f0cfd1aae54cafc",
"content_id": "8ce3c49e2c34228463825f1c7ad03d2432f71a7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1256,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 45,
"path": "/Python/remove_trash",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding: utf-8 -*-\nimport argparse\nimport os\nimport subprocess\n\n\ndef main(verbose=False):\n\n for current_dir, dirs, fnames in os.walk(os.path.abspath(\".\")):\n\n for dirname in dirs:\n if dirname in [\"__pycache__\"]:\n path = os.path.join(current_dir, dirname)\n if verbose:\n print(\"removing {}\".format(path))\n subprocess.call([\"rm\", \"-r\", path])\n\n for fname in fnames:\n if fname in [\".DS_Store\"]:\n path = os.path.join(current_dir, fname)\n if verbose:\n print(\"removing {}\".format(path))\n subprocess.call([\"rm\", path])\n\n _, ext = os.path.splitext(fname)\n if ext in [\".pyc\"]:\n path = os.path.join(current_dir, fname)\n if verbose:\n print(\"removing {}\".format(path))\n subprocess.call([\"rm\", path])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"running in verbose mode.\",\n dest=\"verbose\",\n )\n\n args = parser.parse_args()\n main(args.verbose)\n"
},
{
"alpha_fraction": 0.5439469218254089,
"alphanum_fraction": 0.5572139024734497,
"avg_line_length": 15.75,
"blob_id": "46796a501edcff7dad2714c46bca95d45f1d0e2f",
"content_id": "62289160674572b31f8fe3ff9b8cd0121e71af4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Swift",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 36,
"path": "/Swift/rand_sentence/main.swift",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "import Foundation\n\n/*\nfunc readOneLine() -> String {\n\n var result = String()\n var c = getchar()\n\n while c != EOF && c != 10 {\n result.append(UnicodeScalar(UInt32(c)))\n c = getchar()\n }\n\n return result\n}\n*/\n\nfunc main() {\n \n var sentences = [String]()\n var oneSentense = readLine()\n\n while oneSentense != nil {\n sentences.append(oneSentense!)\n oneSentense = readLine()\n }\n let index = Int(arc4random_uniform(UInt32(sentences.count)))\n\n if sentences.count > 0 {\n print(sentences[index])\n } else {\n print(\"\")\n }\n}\n\nmain()\n"
},
{
"alpha_fraction": 0.605042040348053,
"alphanum_fraction": 0.6176470518112183,
"avg_line_length": 18.83333396911621,
"blob_id": "540af5e86f68b9fe32746813ddb239b573f4a69c",
"content_id": "6bb6bfcb3338c26853fb3629ff7d27a1dee3b6f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 238,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 12,
"path": "/Python/randomSentences",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport sys\nfrom random import choices\n\n\ndef main():\n quotes = [line for line in sys.stdin.readlines() if not line.startswith(\"#\")]\n print(choices(quotes, k=1)[0])\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5569502711296082,
"alphanum_fraction": 0.5592747330665588,
"avg_line_length": 30.632352828979492,
"blob_id": "5727c95e5536728925f9099ce25b02d2034608af",
"content_id": "f8930b3ab830be9e0a1c4b17fa3e1c051e932c1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 68,
"path": "/Python/cmake-find-package.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport argparse\nfrom subprocess import run as _run\nfrom tempfile import TemporaryDirectory\nimport sys\nfrom shutil import which as _which\n\n\ndef cmake_find_package(\n package_names,\n language=\"CXX\",\n mode=\"EXIST\",\n compiler_id=\"GNU\",\n print_cmake_cmd=False,\n dry_run=False,\n):\n cmake = _which(\"cmake\")\n if cmake is None:\n print(\"CMake not available on the system, abort\")\n return 1\n ok_flags = []\n with TemporaryDirectory(prefix=\"cmake_find_package_\") as tmp_dir:\n for name in package_names:\n cmd = [\n cmake,\n f\"-DNAME={name}\",\n f\"-DLANGUAGE={language}\",\n f\"-DMODE={mode}\",\n f\"-DCOMPILER_ID={compiler_id}\",\n \"--find-package\",\n ]\n if dry_run:\n print(\"Would run CMake command:\", \" \".join(cmd))\n elif print_cmake_cmd:\n print(\"Will run CMake command:\", \" \".join(cmd))\n if not dry_run:\n comp_proc = _run(cmd, cwd=tmp_dir, check=False)\n ok_flags.append(comp_proc.returncode == 0)\n else:\n ok_flags.append(True)\n ret_code = 0 if all(ok_flags) else 1\n return ret_code\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"package_names\", nargs=\"+\", help=\"the package name\")\n parser.add_argument(\"--language\", default=\"CXX\", help=\"the language id for CMake\")\n parser.add_argument(\n \"-m\",\n \"--mode\",\n default=\"EXIST\",\n choices=[\"EXIST\", \"COMPILE\", \"LINK\"],\n help=\"the configure mode\",\n )\n parser.add_argument(\n \"-c\", \"--compiler-id\", default=\"GNU\", help=\"the compiler id for CMake\"\n )\n parser.add_argument(\n \"--print-cmake-cmd\",\n action=\"store_true\",\n help=\"print the CMake command if given\",\n )\n parser.add_argument(\"-n\", \"--dry-run\", action=\"store_true\", help=\"dry run mode\")\n kwargs = vars(parser.parse_args())\n sys.exit(cmake_find_package(**kwargs))\n"
},
{
"alpha_fraction": 0.5898672938346863,
"alphanum_fraction": 0.592279851436615,
"avg_line_length": 32.15999984741211,
"blob_id": "ae3b65b835064102ddf47e778ee8a3c06c6ec377",
"content_id": "89113ca46dd9ad8adc312e5f4fdd9c321ff4f596",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 829,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 25,
"path": "/Python/tf/log_graph",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding:utf8 -*-\nimport argparse\nimport tensorflow as tf\n\n\ndef main(pb_file, logdir=\"log\"):\n graph_def = tf.GraphDef()\n with tf.gfile.FastGFile(pb_file, \"rb\") as fid:\n graph_def.ParseFromString(fid.read())\n graph = tf.Graph()\n with graph.as_default():\n tf.import_graph_def(graph_def, name=\"\")\n tf.summary.FileWriter(logdir=logdir, graph=graph).close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"pb_file\", metavar=\"GRAPH.pb\",\n help=\"input graph protobuf file\")\n parser.add_argument(\"-L\", \"--log-dir\", dest=\"logdir\",\n default=\"log\",\n help=\"output tensorboard log directory (default: %(default)s)\")\n args = vars(parser.parse_args())\n main(**args)\n"
},
{
"alpha_fraction": 0.5272727012634277,
"alphanum_fraction": 0.5282555222511292,
"avg_line_length": 28.492753982543945,
"blob_id": "4d505b1db7d5e2fa89b50c56a9a7f83d75328811",
"content_id": "4136e07ffc29e918d10817a15d9b93b5384df7c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2035,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 69,
"path": "/Python/rename_by_replace",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding: utf-8 -*-\n\n\"\"\"\nRename all files and directories in a directory.\n\nThis program will replace specific characters in the filename with\ndesired characters. ex: replcae \" \" with \"_\".\n\"\"\"\n\nimport os, argparse\n\n\ndef main(args):\n replace_str = args.replace_str\n with_str = args.with_str\n root_path = args.path\n verbose = args.verbose\n\n for current_dir, dirs, fnames in os.walk(root_path):\n for fname in fnames:\n if replace_str in fname:\n old_name = os.path.join(current_dir, fname)\n new_name = os.path.join(\n current_dir, fname.replace(replace_str, with_str)\n )\n if verbose:\n print(\"Rename {} to {}\".format(old_name, new_name))\n os.rename(old_name, new_name)\n for dirname in dirs:\n if replace_str in dirname:\n old_name = os.path.join(current_dir, dirname)\n new_name = os.path.join(\n current_dir, dirname.replace(replace_str, with_str)\n )\n if verbose:\n print(\"Rename {} to {}\".format(old_name, new_name))\n os.rename(old_name, new_name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog=\"rename_by_replace\", description=__doc__)\n parser.add_argument(\n \"-r\",\n \"--replace\",\n dest=\"replace_str\",\n help=\"string in the file name to be replaced\",\n metavar=\"STRING\",\n default=\" \",\n )\n parser.add_argument(\n \"-w\",\n \"--with\",\n dest=\"with_str\",\n help=\"string to be replaced with\",\n metavar=\"STRING\",\n default=\"_\",\n )\n parser.add_argument(\n \"-p\",\n \"--path\",\n help=\"path to the target directory.\",\n metavar=\"PATH\",\n dest=\"path\",\n default=\".\",\n )\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\")\n args = parser.parse_args()\n main(args)\n"
},
{
"alpha_fraction": 0.5903614163398743,
"alphanum_fraction": 0.5903614163398743,
"avg_line_length": 21.133333206176758,
"blob_id": "2e59775f4ba50f2f0bac4fba1e0aa8699c127d10",
"content_id": "ca5117ec57d45b43fa96ca06d65cde432bf92fac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/Swift/rand_sentence/Makefile",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "SWIFTC=xcrun -sdk macosx swiftc\nFLAGS=''\nSOURCE=main.swift\n\nrandomSentences:$(SOURCE)\n\t@echo \"Compiling $@...\"\n\t@$(SWIFTC) $(FLAGS) $(SOURCE) -o $@ && \\\n\t echo \"Copy executable to $$(realpath ../bin/$@)\" && \\\n\t cp -f $@ ../bin/$@\n\t@make clean\n\nclean:\n\t@echo \"Clean up...\" && \\\n\t rm -f randomSentences && \\\n\t echo \"Cleaning sucess.\"\n"
},
{
"alpha_fraction": 0.563811182975769,
"alphanum_fraction": 0.5664335489273071,
"avg_line_length": 27.625,
"blob_id": "5466d615da423d0b4182c280680dcfec5d77d827",
"content_id": "35fc576e0b86c9e32abccd8e3cdbf6e95dec93f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1144,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 40,
"path": "/Python/convert_heif.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport argparse\nimport os\nimport sys\n\nimport pyheif\nfrom PIL import Image\n\n\ndef convert_heif(heif_names, to_format=\"png\"):\n for heif_name in heif_names:\n img_name, ext = os.path.splitext(os.path.basename(heif_name))\n if ext.lower() not in [\".heif\", \".heic\"]:\n print(f\"expecting heif image file, get {heif_name}\")\n return 1\n heif = pyheif.read(heif_name)\n img = Image.frombytes(\n heif.mode, # mode\n heif.size, # size\n heif.data, # data\n \"raw\", # decoder_name\n heif.mode,\n heif.stride,\n )\n out_fname = f\"{img_name}.{to_format}\"\n img.save(out_fname)\n print(f\"{out_fname} saved\")\n return 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--to-format\", dest=\"to_format\", default=\"png\", help=\"the output image format\"\n )\n parser.add_argument(\n \"heif_names\", metavar=\"IMAGE.heif\", nargs=\"+\", help=\"the input heif image\"\n )\n kwargs = vars(parser.parse_args())\n sys.exit(convert_heif(**kwargs))"
},
{
"alpha_fraction": 0.48027127981185913,
"alphanum_fraction": 0.5092478394508362,
"avg_line_length": 25.161291122436523,
"blob_id": "acdb85936b0bb959aa124e6c859b0f04545953b1",
"content_id": "2a899f74b88293e2837008472ad3b89b19e022b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1622,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 62,
"path": "/Python/lyrics_formater.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding: utf-8 -*-\nimport argparse\n\n\ndef combine_two(fname1, fname2=None, max_line_length=75):\n\n if fname2 is None:\n with open(fname1) as rf:\n content = rf.read()\n\n return content\n\n content = \"\"\n line_format = \"{0:<\" + str(max_line_length) + \"}{1}\"\n with open(fname1) as rf1:\n with open(fname2) as rf2:\n line1 = rf1.readline()\n line2 = rf2.readline()\n while len(line1) > 0 or len(line2) > 0:\n if len(line2) > 0:\n line1 = line1.strip()\n else:\n line2 = \"\\n\"\n content += line_format.format(line1, line2)\n line1 = rf1.readline().strip()\n line2 = rf2.readline()\n\n return content\n\n\ndef main(files, max_line_length):\n\n if not len(files) % 2 == 0:\n files.append(None)\n\n with open(\"lyrics.txt\", \"w\") as wf:\n for i in range(len(files) / 2):\n file1 = files[2 * i]\n file2 = files[2 * i + 1]\n content = combine_two(file1, file2, max_line_length)\n wf.write(content)\n wf.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"files\", metavar=\"TXT_FILE\", nargs=\"+\", help=\"lyrics files to be aligned\"\n )\n parser.add_argument(\n \"-l\",\n \"--max-line-length\",\n default=75,\n dest=\"line_length\",\n metavar=\"INTEGER\",\n help=\"maximum line length (default: 75)\",\n )\n args = parser.parse_args()\n\n main(args.files, args.line_length)\n"
},
{
"alpha_fraction": 0.5580986142158508,
"alphanum_fraction": 0.5598591566085815,
"avg_line_length": 24.81818199157715,
"blob_id": "134e0c371d1033f122a1456b091b7ae754f2f68c",
"content_id": "a976f9ed58ee7c9b05825d5c41faa7af754ea4ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1136,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 44,
"path": "/Python/count_pattern",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# Simple counting program\nfrom __future__ import print_function\nimport re\nimport argparse\n\n\ndef count_pattern(fname, pattern, verbose=False):\n count = 0\n pattern = re.compile(pattern)\n with open(fname) as rf:\n l = rf.readline()\n while not l == \"\":\n match = pattern.findall(l)\n if verbose:\n print(match)\n count += len(match)\n l = rf.readline()\n return count\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"count the number of matched pattern in the file.\"\n )\n parser.add_argument(\"fname\", metavar=\"FILE\", help=\"file name\")\n parser.add_argument(\n \"-e\",\n \"--pattern\",\n metavar=\"PATTERN\",\n default=\"[^\\s]\",\n help=\"regular expression pattern\",\n dest=\"pattern\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"run in verbose mode\",\n dest=\"verbose\",\n )\n args = parser.parse_args()\n count = count_pattern(args.fname, args.pattern, args.verbose)\n print(count)\n"
},
{
"alpha_fraction": 0.535883903503418,
"alphanum_fraction": 0.5400176048278809,
"avg_line_length": 28.45595932006836,
"blob_id": "6345737d56a05c6f791564df672e092371e7848e",
"content_id": "b55876f1e720c6623bd32ad5d76804a222fae048",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11370,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 386,
"path": "/Python/tunnel",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding: utf8 -*-\nfrom __future__ import print_function\nimport re\nimport subprocess as sp\nimport argparse\nimport sys\nimport os\nimport yaml\n\n\nif os.name != \"posix\":\n print(\"Only support Unix/Linux System. Sorry....\", file=sys.stderr)\n sys.exit(3)\n\nDEFAULT_CONFIG_FILE = os.path.join(os.path.expanduser(\"~\"), \".tunnel_config.yaml\")\n\n\nclass _ConfigParser(object):\n def __init__(self, path=None):\n if path and os.path.exists(path):\n with open(path) as fid:\n config_data = yaml.load(fid)\n else:\n config_data = {\"HEAD\": \"default\", \"default\": {}}\n self._path = path\n self._head = config_data.pop(\"HEAD\")\n self._data = config_data\n\n @property\n def path(self):\n return self._path\n\n @property\n def data(self):\n return self._data.copy()\n\n @property\n def head_data(self):\n return self._data.get(self._head, {})\n\n @property\n def head(self):\n return self._head\n\n @head.setter\n def head(self, new_head):\n assert isinstance(new_head, str), \"HEAD has to be string\"\n self._head = new_head\n\n def get(self, key):\n return self._data.get(key, None)\n\n def set(self, key, value):\n self._data[key] = value\n\n def update(self, new_config):\n assert isinstance(\n new_config, dict\n ), \"new config should be a dict, get {}\".format(new_config)\n if self._head in self._data:\n self._data[self._head].update(new_config)\n else:\n self._data[self._head] = new_config\n\n def save(self, path=DEFAULT_CONFIG_FILE):\n with open(path, \"w\") as wf:\n data = {}\n data[\"HEAD\"] = self._head\n data.update(self._data)\n yaml.dump(data, wf, default_flow_style=False)\n\n def __str__(self):\n data = {}\n data[self._head] = self._data[self._head]\n return yaml.dump(data, default_flow_style=False)\n\n\ndef bind(\n port, user_name=None, dest=None, id_file=None, host=\"127.0.0.1\", ssh_port=\"22\"\n):\n \"\"\"\n Binding local port to remote port\n \"\"\"\n config_parser = _ConfigParser(DEFAULT_CONFIG_FILE)\n data = config_parser.data\n\n if user_name:\n data[\"user_name\"] = user_name\n if dest:\n data[\"dest\"] = dest\n assert (\n data.get(\"user_name\", None) is not None\n ), \"user name not given and can't find it in the config file\"\n assert (\n data.get(\"dest\", None) is not None\n ), \"destination not given and can't find it in the config file\"\n assert port is not None, \"no port for tunneling\"\n\n user_name = data[\"user_name\"]\n dest = data[\"dest\"]\n host = data.get(\"host\", host)\n ssh_port = data.get(\"ssh_port\", ssh_port)\n err_code = _ssh_forward(user_name, dest, port, id_file, host, ssh_port)\n return err_code\n\n\ndef _ssh_forward(user_name, dest, port, id_file=None, host=\"127.0.0.1\", ssh_port=\"22\"):\n if id_file:\n cmd = [\n \"ssh\",\n \"-i\",\n id_file,\n \"-p\",\n ssh_port,\n \"-L\",\n \"{port}:{host}:{port}\".format(port=port, host=host),\n \"{}@{}\".format(user_name, dest),\n \"-f\",\n \"-N\",\n ]\n else:\n cmd = [\n \"ssh\",\n \"-p\",\n ssh_port,\n \"-L\",\n \"{port}:{host}:{port}\".format(port=port, host=host),\n \"{}@{}\".format(user_name, dest),\n \"-f\",\n \"-N\",\n ]\n err_code = sp.call(cmd)\n return err_code\n\n\ndef break_connect(dest=None, port=None):\n \"\"\"\n Break connection to remote port\n \"\"\"\n config_parser = _ConfigParser(DEFAULT_CONFIG_FILE)\n data = config_parser.data\n if dest:\n data[\"dest\"] = dest\n assert (\n data.get(\"dest\", None) is not None\n ), \"destination not given and can't find it in config file\"\n dest = data[\"dest\"]\n p_ssh_tunnel = sp.Popen(\n \"ps aux | grep ssh | grep -- -L\", shell=True, stdout=sp.PIPE\n )\n p_dest = sp.Popen([\"grep\", dest], stdin=p_ssh_tunnel.stdout, stdout=sp.PIPE)\n if port:\n print(\"Break pipe connecting {}:{}\".format(dest, port))\n p_port = sp.Popen([\"grep\", port], stdin=p_dest.stdout, stdout=sp.PIPE)\n p_kill = sp.Popen(\n \"awk '{ print $2 }' | xargs kill\", shell=True, stdin=p_port.stdout\n )\n else:\n print(\"Breaking all pipes to {}\".format(dest))\n p_kill = sp.Popen(\n \"awk '{ print $2 }' | xargs kill\", shell=True, stdin=p_dest.stdout\n )\n p_ssh_tunnel.stdout.close()\n _, err = p_kill.communicate()\n if err:\n return 2\n return 0\n\n\ndef ls_tunnel():\n \"\"\"\n List all port tunneling\n \"\"\"\n p_all_tunnel = sp.Popen(\n \"ps aux | grep ssh | grep -v grep | grep -- -L\", stdout=sp.PIPE, shell=True\n )\n out, err = p_all_tunnel.communicate()\n if err:\n print(err, file=sys.stderr)\n return 2\n\n forward_pattern = re.compile(r\"(\\d*):([\\d\\w\\.]*):\\d*\")\n dest_pattern = re.compile(r\"([\\w\\d\\.]*)@([\\w\\d\\.]*)\")\n out = out.decode(\"utf8\")\n forward_matchs = [m for m in forward_pattern.finditer(out)]\n dest_matchs = [m for m in dest_pattern.finditer(out)]\n if not dest_matchs:\n print(\"No pipes detected\")\n else:\n for forward_m, dest_m in zip(forward_matchs, dest_matchs):\n port = forward_m.group(1)\n host = forward_m.group(2)\n user = dest_m.group(1)\n dest = dest_m.group(2)\n print(\n \"{host}:{port} --> {user}@{dest}:{port}\".format(\n host=host, port=port, user=user, dest=dest\n )\n )\n return 0\n\n\ndef config():\n \"\"\"\n Setup default configuration\n \"\"\"\n print(\"leave blank to use original configuration\", flush=True)\n print(\"press ctrl-C/ctrl-D to interrupt\", flush=True)\n modified = False\n config_parser = _ConfigParser(DEFAULT_CONFIG_FILE)\n try:\n old_head = config_parser.head\n prompt = \"HEAD ({}): \".format(old_head)\n head = input(prompt)\n if head:\n config_parser.head = head\n modified = True\n data = config_parser.head_data\n\n old_user_name = data.get(\"user_name\", None)\n prompt = (\n old_user_name is None\n and \"user name: \"\n or \"user name ({}): \".format(old_user_name)\n )\n user_name = input(prompt)\n if user_name:\n data[\"user_name\"] = user_name\n modified = True\n\n old_dest = data.get(\"dest\", None)\n prompt = (\n old_dest is None\n and \"destination: \"\n or \"destination ({}): \".format(old_dest)\n )\n dest = input(prompt)\n if dest:\n data[\"dest\"] = dest\n modified = True\n\n old_id_file = data.get(\"id_file\", None)\n prompt = (\n old_id_file is None and \"id file: \" or \"id file ({}): \".format(old_id_file)\n )\n id_file = input(prompt)\n if id_file:\n data[\"id_file\"] = id_file\n modified = True\n\n old_host = data.get(\"host\", None)\n prompt = old_host is None and \"host: \" or \"host ({}): \".format(old_host)\n host = input(prompt)\n if host:\n data[\"host\"] = host\n modified = True\n\n old_ssh_port = data.get(\"ssh_port\", None)\n prompt = (\n old_ssh_port is None\n and \"ssh port: \"\n or \"ssh port ({}): \".format(old_ssh_port)\n )\n ssh_port = input(prompt)\n if ssh_port:\n data[\"ssh_port\"] = ssh_port\n modified = True\n except (EOFError, KeyboardInterrupt):\n print()\n finally:\n if modified:\n print(\"Saving new configuration\")\n config_parser.update(data)\n config_parser.save(DEFAULT_CONFIG_FILE)\n print()\n print(\"Current Config\")\n print(\"--------------\")\n print(config_parser)\n return 0\n\n\ndef ls_config():\n \"\"\"\n List content of default configuration\n \"\"\"\n if not os.path.exists(DEFAULT_CONFIG_FILE):\n print(\"No config file found: {}\".format(DEFAULT_CONFIG_FILE), file=sys.stderr)\n return 2\n config_parser = _ConfigParser(DEFAULT_CONFIG_FILE)\n print(config_parser)\n return 0\n\n\nif __name__ == \"__main__\":\n # pylint: disable=line-too-long, C0103\n config_parser = _ConfigParser(DEFAULT_CONFIG_FILE)\n config_content = config_parser.head_data\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest=\"subcmd\", help=\"Subcommands\")\n # binding parser\n bind_parser = subparsers.add_parser(\"bind\", help=\"binding local port to remote\")\n bind_parser.add_argument(\"port\", help=\"port to bind\")\n bind_parser.add_argument(\n \"-u\",\n \"--user-name\",\n dest=\"user_name\",\n help=\"user name for login (default: %(default)s)\",\n default=config_content.get(\"user_name\", None),\n )\n bind_parser.add_argument(\n \"-d\",\n \"--destination\",\n dest=\"dest\",\n metavar=\"ADDRESS\",\n help=\"destination address (default: %(default)s)\",\n default=config_content.get(\"dest\", None),\n )\n bind_parser.add_argument(\n \"-i\",\n \"--id-file\",\n dest=\"id_file\",\n metavar=\"ID_FILE\",\n help=\"login id file (default: %(default)s)\",\n default=config_content.get(\"id_file\", None),\n )\n bind_parser.add_argument(\n \"-H\",\n \"--host\",\n dest=\"host\",\n metavar=\"ADDRESS\",\n default=\"127.0.0.1\",\n help=\"host address (default: %(default)s)\",\n )\n bind_parser.add_argument(\n \"-P\",\n \"--ssh-port\",\n dest=\"ssh_port\",\n metavar=\"PORT\",\n help=\"ssh port (default: %(default)s)\",\n default=config_content.get(\"ssh_port\", \"22\"),\n )\n # break_connect parser\n break_parser = subparsers.add_parser(\"break\", help=\"break tunneling\")\n break_parser.add_argument(\n \"-d\",\n \"--destination\",\n dest=\"dest\",\n default=config_content.get(\"dest\", None),\n help=\"destination to break connection (default: %(default)s)\",\n )\n break_parser.add_argument(\n \"-p\",\n \"--port\",\n dest=\"port\",\n default=None,\n help=(\"port to break (default: %(default)s, \" \" break all port)\"),\n )\n # ls_tunnel parser\n ls_tunnel_parser = subparsers.add_parser(\"ls\", help=\"list all tunneling\")\n # config parser\n config_parser = subparsers.add_parser(\"config\", help=\"setup default configuration\")\n config_parser.add_argument(\n \"--show\", action=\"store_true\", help=\"show current default configurations\"\n )\n\n subcmd = parser.parse_args().subcmd\n argv = sys.argv[2:]\n if subcmd == \"bind\":\n bind_args = vars(bind_parser.parse_args(argv))\n err_code = bind(**bind_args)\n elif subcmd == \"break\":\n break_args = vars(break_parser.parse_args(argv))\n err_code = break_connect(**break_args)\n elif subcmd == \"ls\":\n err_code = ls_tunnel()\n elif subcmd == \"config\":\n config_args = vars(config_parser.parse_args(argv))\n if config_args[\"show\"]:\n err_code = ls_config()\n else:\n err_code = config()\n else:\n parser.print_help()\n err_code = 1\n sys.exit(err_code)\n"
},
{
"alpha_fraction": 0.5148431062698364,
"alphanum_fraction": 0.5390161275863647,
"avg_line_length": 29.230770111083984,
"blob_id": "3fd865d33d579f9a1c1fda8ec2f6ab68dbea4656",
"content_id": "9eb7e83b25f91cdc3b16ac399ec6040b0383bae1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2358,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 78,
"path": "/Python/show_img_term.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# https://github.com/nikhilkumarsingh/terminal-image-viewer/blob/master/img-viewer.py\nfrom PIL import Image\nimport numpy as np\nimport argparse\n\nfrom os import get_terminal_size\nimport sys\n\n_term_size = get_terminal_size()\nTERM_WIDTH = _term_size.columns\nTERM_HEIGHT = _term_size.lines\n\n\ndef convert_ansci_color(img):\n if img.ndim == 3:\n if img.shape[-1] == 1:\n img = np.squeeze(img).astype(np.float64)\n elif img.shape[-1] == 4:\n # RGBA\n alpha = img[:, :, 3].reshape(img.shape[:2] + (1,))\n img = (img[:, :, :3] * (alpha / 255)).astype(np.float64)\n ansi_img = np.zeros_like(img, shape=img.shape[:2], dtype=np.uint8)\n if img.ndim == 2:\n # gray scale\n ansi_img[:] = np.round(((img - 8) / 247) * 24) + 232\n ansi_img[np.where(img < 8)] = 16\n ansi_img[np.where(img > 248)] = 231\n else:\n # color image\n ansi_img[:] = 16 + np.round(img * (5 / 255)) @ [36, 6, 1]\n return ansi_img\n\n\[email protected]\ndef to_color_str(v):\n return f\"\\x1b[48;5;{v}m \\x1b[0m\"\n\n\ndef show_img_term(img_paths, adjust_to_height=False):\n ret = 0\n try:\n for img_path in img_paths:\n try:\n img = Image.open(img_path)\n except FileNotFoundError:\n print(f\"fail to read {img_path}\")\n ret = 1\n continue\n scale = TERM_WIDTH / img.width\n if adjust_to_height:\n scale = TERM_HEIGHT / img.height\n if scale < 1:\n img = img.resize((int(scale * img.width), int(scale * img.height)))\n img = np.array(img, copy=False)\n ansi_img = convert_ansci_color(img)\n print(f\"image: {img_path}\")\n print(\n \"\\n\".join([\"\".join(row) for row in to_color_str(ansi_img)]),\n )\n print()\n except KeyboardInterrupt:\n ...\n return ret\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"img_paths\", nargs=\"+\", metavar=\"IMG_PATH\", help=\"the path to the image\"\n )\n parser.add_argument(\n \"--adjust-to-height\",\n action=\"store_true\",\n help=\"adjust the image to fit the terminal height\",\n )\n kwargs = vars(parser.parse_args())\n sys.exit(show_img_term(**kwargs))\n"
},
{
"alpha_fraction": 0.620771050453186,
"alphanum_fraction": 0.6235247850418091,
"avg_line_length": 34.30555725097656,
"blob_id": "00b49d50bff5959bc34748a7444d3dbe87508ec4",
"content_id": "7020030dc696d61012a81a54ba73299a42432452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2542,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 72,
"path": "/Python/pyscript-run.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport datetime as dt\nimport argparse\nfrom pathlib import Path\nimport sys\nimport os\nimport subprocess\nfrom shutil import copyfile, rmtree, copy\n\n\n_LOG_DIR = \"~/.pyscript_run\"\n\n\ndef clear(py_script: str):\n print(f\"clear log for {py_script}\")\n log_path = Path(_LOG_DIR) / py_script\n if log_path.exists():\n rmtree(str(log_path))\n return 0\n\n\ndef pyscript_run(py_script, script_args, copy_dir=False):\n run_time = dt.datetime.now().isoformat(timespec=\"seconds\").replace(\":\", \"-\")\n script_name, ext = os.path.splitext(py_script)\n if ext != \".py\":\n print(f\"expecting a python script, get {py_script}\")\n return 1\n log_dir = (Path(_LOG_DIR) / script_name / run_time).expanduser()\n log_dir.mkdir(parents=True, exist_ok=True)\n with (log_dir / py_script).open(\"w\") as fid, open(py_script) as ori_fid:\n content = ori_fid.read()\n fid.write(content)\n complete_proc = subprocess.run(\n [sys.executable, py_script] + script_args,\n capture_output=True,\n )\n if complete_proc.stdout:\n print(complete_proc.stdout.decode(\"utf8\"), file=sys.stdout, end=\"\")\n if complete_proc.stderr:\n print(complete_proc.stderr.decode(\"utf8\"), file=sys.stderr, end=\"\")\n with (log_dir / \"command.txt\").open(\"w\") as fid:\n fid.write(\" \".join([py_script] + script_args) + \"\\n\")\n with (log_dir / \"log.txt\").open(\"w\") as fid:\n if complete_proc.stdout:\n fid.write(complete_proc.stdout.decode(\"utf8\"))\n if complete_proc.stderr:\n fid.write(complete_proc.stderr.decode(\"utf8\"))\n for script_arg in script_args:\n arg_path = Path(script_arg).expanduser()\n if arg_path.is_file():\n copyfile(str(arg_path), str(log_dir / arg_path.name))\n elif arg_path.is_dir() and copy_dir:\n copy(str(arg_path), str(log_dir / arg_path.name))\n return complete_proc.returncode\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"py_script\", help=\"the python script to run\")\n parser.add_argument(\n \"--clear\", help=\"clear running log of given python script\", action=\"store_true\"\n )\n parser.add_argument(\n \"--copy-dir\",\n action=\"store_true\",\n help=\"if to copy directory given in the script aruguments\",\n )\n args, script_args = parser.parse_known_args()\n if args.clear:\n sys.exit(clear(args.py_script))\n else:\n sys.exit(pyscript_run(args.py_script, script_args, copy_dir=args.copy_dir))\n"
},
{
"alpha_fraction": 0.4669603407382965,
"alphanum_fraction": 0.5092511177062988,
"avg_line_length": 22.183673858642578,
"blob_id": "38d76a41294fd37b0982eb1b367bf19a4496922a",
"content_id": "8b2f4b50f1e025cfdb1ee7afb163b56cda2c617d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1135,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 49,
"path": "/img-viewer.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport sys\nimport numpy as np\nfrom PIL import Image\nfrom os import get_terminal_size\n\n\ndef get_ansi_color_code(r, g, b):\n if r == g and g == b:\n if r < 8:\n return 16\n if r > 248:\n return 231\n return round(((r - 8) / 247) * 24) + 232\n return (\n 16 + (36 * round(r / 255 * 5)) + (6 * round(g / 255 * 5)) + round(b / 255 * 5)\n )\n\n\ndef get_color(r, g, b):\n return \"\\x1b[48;5;{}m \\x1b[0m\".format(int(get_ansi_color_code(r, g, b)))\n\n\ndef show_image(img_path):\n try:\n img = Image.open(img_path)\n except FileNotFoundError:\n exit(\"Image not found.\")\n\n h = img.height\n w = img.width\n scale = get_terminal_size().lines / h\n if scale < 1:\n h = int(scale * h)\n w = int(scale * w)\n\n img = img.resize((w, h), Image.ANTIALIAS)\n img_arr = np.asarray(img)\n\n for x in range(h):\n for y in range(w):\n pix = img_arr[x][y]\n print(get_color(pix[0], pix[1], pix[2]), sep=\"\", end=\"\")\n print()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n show_image(sys.argv[1])"
},
{
"alpha_fraction": 0.5972037315368652,
"alphanum_fraction": 0.6125166416168213,
"avg_line_length": 27.884614944458008,
"blob_id": "e10eeae2ef99dd20ee7ad7806afd87c7c3d42a26",
"content_id": "a868bd9ededf5c3ce5729e5ed3639e4ba97c0b14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1502,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 52,
"path": "/Python/convert_mp4.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport argparse\nimport os\nimport sys\n\nimport cv2\nfrom tqdm import tqdm\n\n\ndef convert_mp4(input_video):\n in_fname, ext = os.path.splitext(input_video)\n if ext.lower() == \".mp4\":\n print(f\"{input_video} is already mp4 file, abort\")\n return 1\n out_fname = f\"{in_fname}.mp4\"\n in_cap = cv2.VideoCapture(input_video)\n if not in_cap.isOpened():\n print(f\"fail to open {input_video}\")\n return 1\n fps = in_cap.get(cv2.CAP_PROP_FPS)\n width = int(in_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(in_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n num_frames = int(in_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n if sys.platform == \"darwin\":\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n else:\n fourcc = cv2.VideoWriter_fourcc(*\"MP4V\")\n writer = cv2.VideoWriter(out_fname, fourcc, fps, (width, height))\n for _ in tqdm(range(num_frames)):\n ret, frame = in_cap.read()\n if not ret:\n break\n writer.write(frame)\n in_cap.release()\n writer.release()\n return 0\n\n\ndef main(input_videos):\n ok = True\n for input_video in input_videos:\n print(f\"converting {input_video}\")\n ok = convert_mp4(input_video) == 0 and ok\n print(\"\\n\")\n return 0 if ok else 1\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_videos\", nargs=\"+\", help=\"the input video file\")\n kwargs = vars(parser.parse_args())\n sys.exit(main(**kwargs))\n"
},
{
"alpha_fraction": 0.5781487226486206,
"alphanum_fraction": 0.588770866394043,
"avg_line_length": 26.45833396911621,
"blob_id": "e174b0d0635e651421f69b087e0eaa0656c09e6f",
"content_id": "fb945533c497a84099aca405418d3cd3b80b012b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 659,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 24,
"path": "/Python/docker-tags",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport requests\nimport click\n\n\[email protected](name=\"docker-tags\")\[email protected](\"image\")\ndef docker_tags(image):\n res = requests.get(f\"https://registry.hub.docker.com/v1/repositories/{image}/tags\")\n if res.status_code != 200:\n click.secho(\n f\"there is error fetching tags for {image}: {res.text}[{res.status_code}]\",\n fg=\"red\",\n bold=True,\n )\n return 1\n click.secho(f\"tags for {image} are:\", fg=\"bright_green\", bold=True)\n for tag in res.json():\n click.secho(f\"{tag['name']}\", fg=\"bright_blue\")\n return 0\n\n\nif __name__ == \"__main__\":\n docker_tags()\n"
},
{
"alpha_fraction": 0.5049180388450623,
"alphanum_fraction": 0.5098360776901245,
"avg_line_length": 26.727272033691406,
"blob_id": "cc69026e7f9f4047ffef5ca94e78c3bc2d678194",
"content_id": "09ee37bec678715212d22602290afbcbc2a24083",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2440,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 88,
"path": "/Python/add_interpretor",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n\n\"\"\"\nThis small tool will automatic add proper '#!' line\nat the begining of your scripts.\n\"\"\"\n\nimport os\nimport argparse\n\n_supported_interpretor = {\n \"swift_xc\": (\"#!/usr/bin/env xcrun swift\\n\", \".swift\"),\n \"swift\": (\"#!/usr/bin/env swift\\n\", \".swift\"),\n \"pytohn\": (\"#!/usr/bin/env python\\n\", \".py\"),\n \"python2\": (\"#!/usr/bin/env python2\\n\", \".py\"),\n \"python3\": (\"#!/usr/bin/env python3\\n\", \".py\"),\n \"bash\": (\"#!/usr/bin/env bash\", \".sh\"),\n \"sh\": (\"#!/usr/bin/env sh\", \".sh\"),\n}\n\n\ndef main(interpretor, file_name, overwrite):\n \"\"\"\n main program\n \"\"\"\n interpretor, target_ext = _supported_interpretor[interpretor]\n _, ext = os.path.splitext(file_name)\n if ext == target_ext:\n with open(file_name, \"r\") as rf:\n # Read contents.\n lines = rf.readlines()\n try:\n first_line = lines[0]\n except IndexError:\n # Empty file, do nothing.\n return 0\n # Skip if there is already one which we desire.\n if first_line == interpretor:\n return 0\n elif first_line.startswith(\"#!\"):\n # Found existing interpretor. Overwrite it or not.\n if not overwrite:\n print(\n \"[Warning] Detecting other interpretor in file: {}\".format(\n file_name\n )\n )\n print(\"Do nothing\")\n return 0\n lines = lines[1:]\n\n with open(file_name, \"w\") as wf:\n content = \"\".join(lines)\n wf.seek(0)\n wf.write(interpretor)\n wf.write(content)\n else:\n print(f\"expecting file with ext {target_ext}, get {ext}\")\n return 1\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(prog=\"add_interpretor\")\n parser.add_argument(\n \"-i\",\n \"--interpretor\",\n dest=\"interpretor\",\n help=\"interpretor to be used. ex: python\",\n metavar=\"INTERPRETOR\",\n required=True,\n )\n parser.add_argument(\n \"-f\",\n \"--file-name\",\n help=\"the file to be processed\",\n required=True,\n )\n parser.add_argument(\n \"-o\",\n \"--overwrite\",\n dest=\"overwrite\",\n help=\"overwrite interpretor if there is any.\",\n action=\"store_true\",\n )\n\n kwargs = vars(parser.parse_args())\n main(**kwargs)\n"
},
{
"alpha_fraction": 0.5430285334587097,
"alphanum_fraction": 0.5506826639175415,
"avg_line_length": 27.104650497436523,
"blob_id": "7993e33fe8eb3542945a2420218a45b62a43a430",
"content_id": "413dd314dd5e80cba765e4b407aaa4f0f7afcebd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4834,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 172,
"path": "/Python/tomato_timer.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding: utf-8 -*-\n# https://en.wikipedia.org/wiki/Pomodoro_Technique\nfrom __future__ import print_function\nfrom time import sleep\nfrom datetime import datetime\nimport sys\nimport subprocess as sp\nimport argparse\n\nif sys.version_info.major < 3:\n input = raw_input\n\n\nclass _Talker(object):\n \"\"\"\n _Talker: object responsible for playing voice message.\n\n Support Platform\n ================\n - MacOS: via 'say'.\n \"\"\"\n\n def __init__(self):\n\n if sys.platform == \"darwin\":\n self._cmd = [sp.check_output([\"which\", \"say\"]).strip()]\n self._disgard_options = False\n else:\n print(\n \"Falling back to terminal print for platform: \".format(sys.platform),\n file=sys.stderr,\n )\n self._cmd = [\"echo\", \"-e\", \"\\a\"]\n self._disgard_options = True\n\n def say(self, msg, options):\n \"\"\"\n say void messsage\n\n params\n ======\n - msg (str): message to say.\n - options (list or tuple): options to pass to shell command that will play the voice message.\n \"\"\"\n if self._disgard_options:\n options = []\n else:\n options = list(options)\n sp.call(self._cmd + options + [msg])\n\n\ndef simple_log(msg, dest=sys.stdout, wait=False):\n d = datetime.now()\n log_msg = \"{}: {}\".format(d, msg)\n if wait:\n input(log_msg)\n else:\n print(log_msg, file=dest)\n\n\ndef main(num_cycles=4, minutes=25.0, break_time=5.0, talker_options=\"\"):\n \"\"\"\n main function\n\n params\n ======\n - num_cycles (int): number of cycles (default: 4)\n - minutes (float): time for each cycles in minutes (default: 25.0)\n - break_time (float): time for break after each cycle in minutes (default: 5.0)\n - talker_options (str): option string to pass to the voice message player (ex: '-v Fred' on MacOS)\n \"\"\"\n # try to initiate a talker.\n try:\n talker = _Talker()\n except NotImplementedError as e:\n simple_log(e, dest=sys.stderr)\n sys.exit(1)\n\n options = talker_options.split(\" \")\n cycle_seconds = int(minutes * 60)\n break_seconds = int(break_time * 60)\n\n # start clock\n talker.say(\"start clock\", options)\n simple_log(\"start clock\")\n while num_cycles > 0:\n\n # wait for one cycle\n sleep(cycle_seconds)\n\n # take a inter-cycle break\n ## setup break message\n if break_seconds > 60:\n m = int(break_time)\n s = int(60 * (break_time - m))\n msg = \"break time {} minutes {} seconds\".format(m, s)\n else:\n msg = \"break time {} seconds\".format(cycle_seconds)\n\n ## playing inter-cycle break start message (except for the last cycle)\n if num_cycles > 1:\n talker.say(msg, options)\n simple_log(\n \"{} (press return/enter to continue the clock)\".format(msg), wait=True\n )\n simple_log(\"timer continue\")\n\n ## wait for inter-cycle break\n sleep(break_seconds)\n\n ## playing inter-cycle break over message (except for the last cycle)\n msg = \"break time over, get back to work\"\n talker.say(msg, options)\n simple_log(\n \"{} (press return/enter to continue the clock)\".format(msg), wait=True\n )\n simple_log(\"timer continue\")\n\n num_cycles -= 1 # count down.\n\n # all cycles done. Taking long break.\n msg = \"take a long break\"\n talker.say(msg, options)\n simple_log(msg, wait=True)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"simple tomato timer: https://en.wikipedia.org/wiki/Pomodoro_Technique\"\n )\n parser.add_argument(\n \"-n\",\n \"--number-cycles\",\n dest=\"num_cycles\",\n metavar=\"INTEGER\",\n type=int,\n help=\"number of cycles (default: 4)\",\n default=4,\n )\n parser.add_argument(\n \"-m\",\n \"--minutes-per-cycle\",\n dest=\"minutes\",\n metavar=\"FLOAT\",\n type=float,\n help=\"minutes for each cycle (default: 25)\",\n default=25.0,\n )\n parser.add_argument(\n \"-b\",\n \"--minutes-per-break\",\n dest=\"break_time\",\n metavar=\"FLOAT\",\n type=float,\n help=\"minutes for break after each cycle (default: 5)\",\n default=5.0,\n )\n parser.add_argument(\n \"-t\",\n \"--talker-options\",\n dest=\"talker_options\",\n metavar=\"STRING\",\n help=\"options for the talker (ex: -t '-v Fred' for Mac)\",\n default=\"\",\n )\n args = parser.parse_args()\n try:\n main(args.num_cycles, args.minutes, args.break_time, args.talker_options)\n except KeyboardInterrupt:\n print(\"Bye bye...\")\n"
},
{
"alpha_fraction": 0.6240267157554626,
"alphanum_fraction": 0.6262513995170593,
"avg_line_length": 30,
"blob_id": "4939d05e92db36a8acdd25f445a12497cbe16a3d",
"content_id": "db0555f1a12277ecdbd8009d6ad6316dadaf65a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 899,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 29,
"path": "/Python/move_files_and_link.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport argparse\nimport os\nimport sys\nfrom pathlib import Path\nfrom shutil import move\n\n\ndef move_files_and_link(files, to: Path):\n to.mkdir(parents=True, exist_ok=True)\n for file_path in map(Path, files):\n if not file_path.is_file():\n print(f\"skipping {file_path} for not being a file\")\n continue\n new_path = to / file_path.name\n move(str(file_path), str(new_path))\n os.symlink(str(new_path), str(file_path))\n print(f\"creating symlink for {file_path} and move it to {new_path}\")\n return 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"files\", nargs=\"+\", help=\"files to move\")\n parser.add_argument(\n \"--to\", \"-t\", type=Path, help=\"move to directory\", required=True\n )\n kwargs = vars(parser.parse_args())\n sys.exit(move_files_and_link(**kwargs))\n"
},
{
"alpha_fraction": 0.6610299944877625,
"alphanum_fraction": 0.6625672578811646,
"avg_line_length": 47.185184478759766,
"blob_id": "4b6ff6cac65de90417124e1a1b6011e241e8e821",
"content_id": "ac8ea324e7d8a16a811d529325ce2b0a2b16cc06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1301,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 27,
"path": "/C_codes/rand_sentence/random_sentence.c",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n\nchar *sentences[] = {\n \"If you want to go fast, go alone. If you want to go far, go together.\",\n \"It is the tears of the earth that keep her smiles in bloom. -- Rabindranath Tagore\",\n \"Be the change that you wish to see in the world. -- Mahatma Gandhi\",\n \"If you tell the truth, you don't have to remember anything. -- Mark Twain\",\n \"Live as if you were to die tomorrow. Learn as if you were to live forever. -- Mahatma Gandhi\",\n \"Order and simplification are the first steps toward the mastery of a subject. -- Thomas Mann\",\n \"We are twice armed if we fight with faith. -- Plato\",\n \"Everything that is done in the world is done by hope. -- Martin Luther\",\n \"Young man, in mathematics you don't understand things. You just get used to them. -- John von Neumann\",\n \"In most sciences, one generation tears down what another has built and what one has established another undoes. In mathematics alone, each generations adds a new story to the old structure. -- Hermann Hankel\",\n \"Two things are infinite: the universe and human stupidity. But the universe I'm not quite sure. -- Albert Einstein\"\n};\n\nint main()\n{\n srand(time(NULL));\n int r = rand();\n int l = sizeof(sentences) / sizeof(sentences[0]);\n char *msg = sentences[r % l];\n printf(\"%s\\n\", msg);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5788288116455078,
"alphanum_fraction": 0.5818318128585815,
"avg_line_length": 27.36170196533203,
"blob_id": "caa9392445eab6773b39eeb7b8e55cda4dc38744",
"content_id": "fa7430feb64ecf3089553a34e20437dfdc942e65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1332,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 47,
"path": "/Python/resize_fig.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nfrom PIL import Image\n\nimport argparse\n\n\ndef size_parser(value_str):\n if value_str is None:\n return value_str\n values = value_str.split(\",\")\n if len(values) > 1:\n return map(int, [v.strip() for v in values[:2]])\n else:\n return float(values[0])\n\n\ndef main(img_path, new_size, out_img):\n img = Image.open(img_path)\n if isinstance(new_size, float):\n new_size = tuple(map(lambda v: int(new_size * v), img.size))\n elif new_size is None:\n print(f\"original image size: {img.size}\")\n new_size = (\n int(input(\"Enter the new width: \")),\n int(input(\"Enter the new height: \")),\n )\n new_img = img.resize(new_size).convert(\"RGB\")\n new_img.save(out_img)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"img_path\", metavar=\"IMAGE\")\n parser.add_argument(\n \"--to-size\",\n dest=\"new_size\",\n type=size_parser,\n metavar=\"SIZE\",\n default=None,\n help=\"the target size. Can be either size seperated by comma or a float\",\n )\n parser.add_argument(\"--out-fname\", dest=\"out_img\", default=\"resized_img.png\")\n args = vars(parser.parse_args())\n try:\n main(**args)\n except (KeyboardInterrupt, EOFError):\n print(\"\\nBye!\")"
},
{
"alpha_fraction": 0.5123152732849121,
"alphanum_fraction": 0.5270935893058777,
"avg_line_length": 12.533333778381348,
"blob_id": "4bcbd8f6bcf42e3a91614d9fa95b95bfa83fb9c9",
"content_id": "dd9b1ebeec0ab3e473a00d42dca6e57e5639411d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 15,
"path": "/Python/pyabspath",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding: utf-8 -*-\nimport os, sys\n\n\ndef main():\n try:\n path = sys.argv[1]\n except IndexError:\n path = \".\"\n\n print(os.path.abspath(path))\n\n\nmain()\n"
},
{
"alpha_fraction": 0.5606242418289185,
"alphanum_fraction": 0.5630252361297607,
"avg_line_length": 20.921052932739258,
"blob_id": "e9da42e97b7203b05555ae3519bea6ad96744d36",
"content_id": "23496140fcd8ca1e075f585164dd4b7ecc731416",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 833,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 38,
"path": "/Python/clean_derived_data",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# -*- coding:utf-8 -*-\nimport subprocess\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-r\",\n \"--root\",\n dest=\"root\",\n default=\".\",\n metavar=\"PATH\",\n help=\"root directory to start with. (default:.)\",\n)\nparser.add_argument(\n \"-n\",\n \"--dry\",\n dest=\"dry\",\n help=\"running in dry mode.(Print out message only)\",\n action=\"store_true\",\n)\n\n\ndef main(args):\n root_dir = args.root\n dry = args.dry\n for current_dir, _, _ in os.walk(root_dir):\n if \"DerivedData\" in current_dir:\n if dry:\n print(\"would remove {}\".format(current_dir))\n else:\n subprocess.call(\"rm -rf {}\".format(current_dir), shell=True)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n main(args)\n"
},
{
"alpha_fraction": 0.572780191898346,
"alphanum_fraction": 0.5829694271087646,
"avg_line_length": 31.32941246032715,
"blob_id": "deba34213651b0fd653389c9234e0cd7d149db18",
"content_id": "6f9f7fc5fecbf6940bee9a677c6febc86a294b78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2748,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 85,
"path": "/Python/extract_frames.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport argparse\nimport re\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nimport cv2\n\n\ndef parse_time(time_spec, fps):\n pattern = re.compile(r\"(\\d+h)?(\\d+m)?(\\d+s)?\")\n match = pattern.match(time_spec)\n if match is None or match.group(0) == \"\":\n return -1\n hour = match.group(1)\n minute = match.group(2)\n second = match.group(3)\n frame_cnt = 0.0\n if hour:\n frame_cnt += fps * 3600 * float(hour.replace(\"h\", \"\"))\n if minute:\n frame_cnt += fps * 60 * float(minute.replace(\"m\", \"\"))\n if second:\n frame_cnt += fps * float(second.replace(\"s\", \"\"))\n return int(frame_cnt)\n\n\ndef extract_frames(\n video_name, frame_indices=None, out_dir=\"frames\", start_time=None, end_time=None\n):\n video = cv2.VideoCapture(video_name)\n print(\"reading frames\")\n frames = [video.read()[1] for _ in range(int(video.get(cv2.CAP_PROP_FRAME_COUNT)))]\n print(\"reading frames done\")\n fps = video.get(cv2.CAP_PROP_FPS)\n print(f\"FPS: {fps}\")\n video.release()\n if frame_indices is None:\n start_frame_idx = 0\n end_frame_idx = len(frames)\n if start_time:\n start_frame_idx = parse_time(start_time, fps)\n if start_frame_idx < 0:\n print(f\"invalid start time spec: {start_time}\")\n return 1\n if end_time:\n end_frame_idx = parse_time(end_time, fps)\n if end_frame_idx < 0:\n print(f\"invalid end time spec: {end_time}\")\n return 1\n frame_indices = [i for i in range(start_frame_idx, end_frame_idx)]\n out_path = Path(out_dir)\n out_path.mkdir(exist_ok=True, parents=True)\n for idx in tqdm(frame_indices):\n if idx >= len(frames):\n print(f\"invalid frame index detected, skipped: {idx}\")\n continue\n img_path = out_path / f\"{idx:04d}.png\"\n cv2.imwrite(str(img_path), frames[idx])\n print(f\"frames saved in {out_path.absolute()}\")\n return 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"video_name\", help=\"the input video file\", metavar=\"VIDEO\")\n parser.add_argument(\n \"--frame-indices\",\n \"-i\",\n type=lambda arg_str: map(int, arg_str.strip().split(\",\")),\n metavar=\"IDX[,IDX,...]\",\n default=None,\n help=\"the frame indices to save\",\n )\n parser.add_argument(\n \"-o\",\n \"--out-dir\",\n metavar=\"DIR\",\n help=\"the output directory of extracted frames (default: %(default)s)\",\n default=\"frames\",\n )\n parser.add_argument(\"--start-time\", help=\"the start time\")\n parser.add_argument(\"--end-time\", help=\"the end time\")\n kwargs = vars(parser.parse_args())\n extract_frames(**kwargs)\n"
},
{
"alpha_fraction": 0.5755395889282227,
"alphanum_fraction": 0.577595055103302,
"avg_line_length": 28.484848022460938,
"blob_id": "a581fa20f7e21c0d900611a94da9f4ed52ba988f",
"content_id": "cba2249bc0de607621a6c613a24b79337aade865",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 973,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 33,
"path": "/Python/copy_file.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport argparse\nimport sys\nfrom pathlib import Path\n\n\ndef copy_file(files, in_place=False):\n for file in files:\n print(f\"copy {file}\")\n file_path = Path(file)\n with file_path.open(\"rb\") as fid:\n bin_content = fid.read()\n if in_place:\n out_fname = file_path.name\n else:\n out_fname = f\"copied_{file_path.name}\"\n to_path = file_path.parent / out_fname\n if in_place and to_path.exists():\n to_path.unlink()\n with to_path.open(\"wb\") as fid:\n fid.write(bin_content)\n return 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # setup the parser here\n parser.add_argument(\"files\", metavar=\"FILE\", help=\"the file to copy\", nargs=\"+\")\n parser.add_argument(\n \"--in-place\", action=\"store_true\", help=\"copy the file IN PLACE\"\n )\n kwargs = vars(parser.parse_args())\n sys.exit(copy_file(**kwargs))\n"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.5913978219032288,
"avg_line_length": 14.5,
"blob_id": "24a29ed9b6648303d1ec52f474125efb22c223e7",
"content_id": "5a2b0747558081e94141742511f9de0ed5708cc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 6,
"path": "/C_codes/rand_sentence/Makefile",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "CC=gcc\nSOURCE=random_sentence.c\n\nrandom_sentence:$(SOURCE)\n\t$(CC) $< -o $@\n\tcp -f $@ ../bin/\n"
},
{
"alpha_fraction": 0.5580021739006042,
"alphanum_fraction": 0.5698174238204956,
"avg_line_length": 27.66153907775879,
"blob_id": "d306cae740c62bafa103e8715786f18228f95814",
"content_id": "ef79857d8025f18bfee4b2d2950296c0b1849ff8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1862,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 65,
"path": "/Python/show_video_term.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n\nfrom pathlib import Path\nimport cv2\nimport sys\nimport argparse\nfrom os import get_terminal_size\nfrom time import sleep\n\n\nsys.path[:1] = [str(Path(__file__).parent)]\n\nfrom show_img_term import convert_ansci_color, to_color_str\n\nTERM_HIGHT = get_terminal_size().lines\n\n\ndef clear_screen():\n print(chr(27) + \"[2j\\033c\\x1bc\", end=\"\")\n\n\ndef show_video_term(video_path, fps=None):\n video = cv2.VideoCapture(video_path)\n if video is None:\n print(f\"fail to read {video_path}\")\n return 1\n num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n if fps is None:\n fps = video.get(cv2.CAP_PROP_FPS)\n sleep_secs = 1 / fps\n print(f\"number of frames: {num_frames}\")\n scale = None\n try:\n for frame_idx in range(num_frames):\n ok, frame = video.read()\n frame = frame[:, :, ::-1] # bgr to rgb\n if not ok:\n print(f\"failure on reading frame {frame_idx}\")\n break\n if frame_idx == 0:\n scale = TERM_HIGHT / frame.shape[0]\n if scale < 1:\n frame = cv2.resize(\n frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC\n )\n ansi_img = convert_ansci_color(frame)\n print(\n \"\\n\".join([\"\".join(row) for row in to_color_str(ansi_img)]),\n )\n sleep(sleep_secs)\n clear_screen()\n except KeyboardInterrupt:\n ...\n clear_screen()\n print(f\"number of frames played: {frame_idx}\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"video_path\", help=\"the of video to show\")\n parser.add_argument(\"--fps\", help=\"frames per second\", type=float)\n kwargs = vars(parser.parse_args())\n sys.exit(show_video_term(**kwargs))"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 31,
"blob_id": "1774d00d6885eab50187d31520c0886c1654be78",
"content_id": "b52d21883f783786f1256dea63e4eb12f208d365",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/README.md",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "# SmallTools\n\n- Python: small command line tools (scripts) written in python.\n- C_Codes: small command line tools written in C.\n"
},
{
"alpha_fraction": 0.5302904844284058,
"alphanum_fraction": 0.5352697372436523,
"avg_line_length": 26.386363983154297,
"blob_id": "4798ce10fc69fffa2674512dbbe9f9bd8eda76b3",
"content_id": "c3f881651b322fe9b92a63fa20bb55b4e2f87f18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1205,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 44,
"path": "/Python/run-it.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport subprocess\nimport sys\nimport datetime as dt\nfrom pathlib import Path\n\n\ndef _print_usage():\n print(\"Usage:\")\n print(\"Running command: run-it.py <command> args...\")\n print(\"Print log: run-it.py --print-log\")\n print(\"Clear log: run-it.py --clear\")\n\n\ndef main(cmd, log_file):\n run_time = dt.datetime.now().isoformat(\"T\", \"seconds\")\n try:\n ret_code = subprocess.call(cmd)\n print(\n f\"[run-it {run_time}] running {' '.join(cmd)!r}\",\n file=log_file,\n )\n return ret_code\n except Exception:\n _print_usage()\n return 1\n\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1:]\n if cmd[0] in [\"-h\", \"--help\"]:\n _print_usage()\n elif cmd[0] == \"--print-log\":\n try:\n with Path(\"~/.run-it.log\").expanduser().open(\"r\") as fid:\n print(fid.read())\n except FileNotFoundError:\n print(\"No log file to print\")\n elif cmd[0] == \"--clear\":\n print(\"removing ~/.run-it.log\")\n Path(\"~/.run-it.log\").expanduser().unlink(missing_ok=True)\n else:\n with Path(\"~/.run-it.log\").expanduser().open(\"a\") as fid:\n main(cmd, fid)\n"
},
{
"alpha_fraction": 0.5396391749382019,
"alphanum_fraction": 0.5423728823661804,
"avg_line_length": 41.53488540649414,
"blob_id": "74d013f67cb9a79757fc70f381be49883834401f",
"content_id": "fd57c6b86d4b6250973542553c658b9fe34d5c2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1829,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 43,
"path": "/Python/find_files/find_files.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- encode: utf-8 -*-\n\nfrom __future__ import print_function\nimport sys, os\nimport shutil\n\ndef copy_file(src, dest, file_surfixs = [\"jpg\", \"png\"]):\n count = 0\n for rel_path, dirs, files in os.walk(src):\n for file_name in files:\n if sum(map(file_name.endswith, file_surfixs)) > 0:\n count += 1\n shutil.copy2(os.path.abspath(os.path.join(rel_path, file_name)), os.path.join(dest, file_name))\n print(os.path.abspath(os.path.join(rel_path, file_name)) + \" copied.\")\n print(\"Total number of files copied: {count}\".format(count = count), file = sys.stdout)\n\nif __name__ == \"__main__\":\n\n import argparse\n\n desc = \"Walk throuhg `src` to find all files with surfix in `surfixs` and copy it to `dest`\"\n\n epilog = \"Example: find_files -f ~ -t ~/temp -s txt c cpp md\"\n parser = argparse.ArgumentParser(description=desc, epilog = epilog)\n parser.add_argument(\"-f\", \"--from\", dest='from_dir', metavar = 'from',\n required = True,\n help= \"source directory\")\n parser.add_argument(\"-t\", \"--to\", dest='dest_dir', metavar = 'to',\n required = True,\n help = \"destination directory\")\n parser.add_argument(\"-s\", '--surfixs', dest = \"surfixs\",\n action = \"store\",\n nargs = '*',\n default = ['jpg', 'gif', 'png', 'jpeg'],\n metavar = 'surfixs', help = \"file surfixs to match. Default are 'jpg', 'gif', 'png' and 'jepg'.\")\n try:\n args = parser.parse_args()\n print(\"Begin process....\")\n copy_file(args.from_dir, args.dest_dir, file_surfixs = args.surfixs)\n except Exception as e:\n print(e)\n print(parser.format_help())\n"
},
{
"alpha_fraction": 0.6039707660675049,
"alphanum_fraction": 0.6123301982879639,
"avg_line_length": 21.785715103149414,
"blob_id": "9e2706f9845290787f8cdff00c241143e160060c",
"content_id": "e5ce44a6120041fc3696a38c8fe5613b9aba1001",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 957,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 42,
"path": "/Python/generate_cmd_script.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport argparse\nimport sys\nimport re\nfrom pathlib import Path\nimport os\n\nTEMPLAE = \"\"\"\\\n#!/usr/bin/env -S python3 -u\nimport argparse\nimport sys\n\ndef {script_name}(**kwargs):\n # enter your code here\n ...\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # setup the parser here\n kwargs = vars(parser.parse_args())\n sys.exit({script_name}(**kwargs))\n\"\"\"\n\n\ndef generate_cmd_script(script_name):\n fname = f\"{script_name}.py\"\n with Path(fname).open(\"w\", encoding=\"utf8\") as fid:\n fid.write(TEMPLAE.format(script_name=script_name))\n os.chmod(fname, 0o744)\n return 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"script_name\",\n type=lambda name: re.sub(r\"[-\\.]\", \"_\", name),\n help=\"the script name\",\n metavar=\"NAME\",\n )\n kwargs = vars(parser.parse_args())\n sys.exit(generate_cmd_script(**kwargs))\n"
},
{
"alpha_fraction": 0.6005194783210754,
"alphanum_fraction": 0.610909104347229,
"avg_line_length": 31.08333396911621,
"blob_id": "62a512a87257d52418687d89a5794e36f21c0796",
"content_id": "1ca6de38e17ede68d27c9215a9da6eb86bb74339",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1925,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 60,
"path": "/Python/download_gdrive",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\n# https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive\n# https://sites.google.com/site/gdocs2direct/\nimport requests\nimport tqdm\n\n\ndef download_file_from_google_drive(file_id, destination, chunk_size=32768):\n def get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith(\"download_warning\"):\n return value\n\n return None\n\n def save_response_content(response, destination):\n print(f\"downloading to {destination}\")\n with open(destination, \"wb\") as f:\n for chunk in tqdm.tqdm(\n response.iter_content(chunk_size), unit=\" bytes\", unit_scale=chunk_size\n ):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params={\"id\": file_id}, stream=True)\n token = get_confirm_token(response)\n\n if token:\n print(f\"get token: {token}\")\n params = {\"id\": file_id, \"confirm\": token}\n response = session.get(URL, params=params, stream=True)\n save_response_content(response, destination)\n else:\n print(f\"fail to get token for {file_id}\")\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file_id\", help=\"the id of the file on google drive\")\n parser.add_argument(\n \"--chunk-size\",\n dest=\"chunk_size\",\n default=32768,\n help=\"the downloading chunk size [default: %(default)s]\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n dest=\"destination\",\n help=\"the output file name [default: %(default)s]\",\n default=\"saved_file.out\",\n )\n args = vars(parser.parse_args())\n download_file_from_google_drive(**args)\n"
},
{
"alpha_fraction": 0.5531052947044373,
"alphanum_fraction": 0.5657065510749817,
"avg_line_length": 27.487178802490234,
"blob_id": "caeeddaae2577a17a216fb16a13b61ca5d0074c4",
"content_id": "e86b0db896e5b6bc210424b713c5de86ffe1e2ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2222,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 78,
"path": "/Python/images_to_mov.py",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nimport argparse\nimport re\nfrom pathlib import Path\n\nimport cv2\nimport tqdm\n\n\ndef imgs2mov(\n input_dir: Path, movie_name=\"movie\", fps=20, numerical=False, flip_channels=False\n):\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n out_fname = f\"{movie_name}.mov\"\n img_names = []\n for ext in [\"png\", \"jpg\"]:\n img_names.extend(\n [str(p.expanduser().absolute()) for p in input_dir.glob(f\"*.{ext}\")]\n )\n if numerical:\n\n def key(fname):\n pat = re.compile(r\"[0-9]+\")\n return tuple(map(int, pat.findall(fname)))\n\n else:\n key = lambda v: v\n img_names = sorted(img_names, key=key)\n print(f\"reading frames from {input_dir}\")\n frames = []\n for name in tqdm.tqdm(img_names):\n frames.append(cv2.imread(name, cv2.IMREAD_COLOR))\n if flip_channels:\n if frames[0].shape[-1] == 4:\n shuffle_idxs = [2, 1, 0, 3]\n else:\n shuffle_idxs = [2, 1, 0]\n frames = [frame[:, :, shuffle_idxs] for frame in frames]\n print(f\"reading frames done ({len(frames)} frames)\")\n writer = cv2.VideoWriter(out_fname, fourcc, fps, frames[0].shape[:2][::-1])\n print(\"writing frames\")\n for frame in tqdm.tqdm(frames):\n writer.write(frame)\n writer.release()\n print(f\"movie saved: {out_fname} (fps: {fps})\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"input_dir\",\n help=\"the directory where to find input images\",\n type=Path,\n default=Path(\".\"),\n )\n parser.add_argument(\n \"-o\",\n \"--movie-name\",\n help=\"the output movie name (default: %(default)s)\",\n default=\"movie\",\n )\n parser.add_argument(\n \"--fps\",\n help=\"output frame per second (default: %(default)s)\",\n default=20,\n type=int,\n )\n parser.add_argument(\n \"-n\",\n \"--numerical\",\n action=\"store_true\",\n help=\"interpret the file name numerically, sort it accordingly\",\n )\n parser.add_argument(\n \"-f\", \"--flip-channels\", help=\"flip B and R channel\", action=\"store_true\"\n )\n kwargs = vars(parser.parse_args())\n imgs2mov(**kwargs)\n"
},
{
"alpha_fraction": 0.6223404407501221,
"alphanum_fraction": 0.6276595592498779,
"avg_line_length": 18.789474487304688,
"blob_id": "7cc07a77d27c110679e196e9bd553e40e1e23721",
"content_id": "882f09fa981731dc2ed2b5f747bdf7ef23e93d2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 376,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 19,
"path": "/Python/create_file",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env -S python3 -u\nfrom pathlib import Path\n\nimport click\n\n\[email protected]()\[email protected](\"file\", metavar=\"FILE_NAME\")\[email protected]_option(\"-h\", \"--help\")\ndef create_file(file):\n file_path = Path(file)\n file_path.parent.mkdir(parents=True, exist_ok=True)\n with file_path.open(\"w\"):\n pass\n return 0\n\n\nif __name__ == \"__main__\":\n create_file()\n"
},
{
"alpha_fraction": 0.713087260723114,
"alphanum_fraction": 0.713087260723114,
"avg_line_length": 38.733333587646484,
"blob_id": "efe76829cd371ce86fba21754d26a8a678a942ed",
"content_id": "7d615d0160823516551d7f6cc290ac200cc5c7fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 596,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 15,
"path": "/Python/find_files/README.md",
"repo_name": "dboyliao/SmallTools",
"src_encoding": "UTF-8",
"text": "## Find Files with Specific Surfixes \n\nThis short python script is used for copying files with specific surfixes from \n\nsource directory and all its subdirectories to destination directory.\n\n## Basic Usage\n\n- `python find_files.py -h`\n - This will print out all options and minimum directions.\n\n- `python find_files.py -f src -t dest`\n - copy all files with surfix `png`, `jepg`, `gif` and `jpg` from `src` (directory) to `dest` (directory).\n - you can specify surfixes you want using `-s` or `--surfixes` option.\n - ex: `python find_files.py -f src -t dest -s md` (Copy all markdown files).\n"
}
] | 36 |
johnfelipe/oct-detection | https://github.com/johnfelipe/oct-detection | ca49195f130ead28beb6001896f64db18c285131 | 3038578b52fc6f0ae46408da8b128d5d654e38b5 | ab1088b7170cde7b67e4f66dcdd6a5dd8ada58de | refs/heads/main | 2023-05-06T05:06:08.872670 | 2021-06-06T13:31:15 | 2021-06-06T13:31:15 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.779291570186615,
"alphanum_fraction": 0.7847411632537842,
"avg_line_length": 51.42856979370117,
"blob_id": "d948c7815863eb398e5315dac6944888aa97cb5a",
"content_id": "dcfa6f76ce970a7fda8b768d1005bad9337a4053",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1875,
"license_type": "no_license",
"max_line_length": 328,
"num_lines": 35,
"path": "/README.md",
"repo_name": "johnfelipe/oct-detection",
"src_encoding": "UTF-8",
"text": "# Description \nLe but du projet, est de mettre en place une IA, permettant de catégoriser les Retinal OCT Images (optical coherence tomography) en 4 classes: \n- Normal \n- CNV\n- DME\n- DRUSEN\n\nJ'ai opté pour la mise en place d'un classificateur par le biais de modèles de Deep Learning pour la classification d'images.\n\nMerci de noter que le dataset est disponible sur [Kaggle](https://www.kaggle.com/paultimothymooney/kermany2018). J'en remercie les auteurs.\n\n\n# Modélisation et Architecture des modèles\nJ'ai mis en place différentes architectures basées sur les réseaux de neurones convolutifs (CNN), le transfert learning ainsi que les modèles hybrides (combinant l'extraction de features avec les modèles de machine learning standard). Je les ai évalués avec les différentes métriques.\n\n## Les modèles entraînés \n\n- Un modèle CNN personnalisé\n- Le modèle LeNet\n- Un modèle de Transfert Learning EfficientNetB5\n- Un modèle de Transfer Learning VGG16\n\n## Préprocessing des données\nVeuillez vous réferer à la section dataviz du notebook `OCT.ipynb`.\n\n## Modeling\nVeuillez vous réferer à la section Modeling du notebook `OCT.ipynb`. \n\n## Streamlit\n\nJ'ai mis en place une application web, en utilisant [Streamlit]( https://www.streamlit.io/). Cette application permettra de choisir un modèle, de télécharger une OCT et d'afficher le résultat de la classification. Voir [ici](https://github.com/sihamsaid/oct-detection/oct-streamlit.py) pour plus de détails sur l'implémentation.\n\nL'utilisation de cette application, requiert l'exécution du notebook (voir ci dessous), et de sauvegarder les modèles générés, dans le dossier [models](https://github.com/sihamsaid/oct-detection/models). J'ai mis les modèles `CNN`, `LeNet` et `VGG16` au format `zip`.\n\nLa commande qui lance notre application streamlit est : `streamlit run oct-streamlit.py`\n"
},
{
"alpha_fraction": 0.6663037538528442,
"alphanum_fraction": 0.6869896650314331,
"avg_line_length": 32.40909194946289,
"blob_id": "72bafa6ef1ae75741163b2bf979e9ea6860639a6",
"content_id": "fb3779694ca30963841040af2401015bcbcab540",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3710,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 110,
"path": "/oct-streamlit.py",
"repo_name": "johnfelipe/oct-detection",
"src_encoding": "UTF-8",
"text": "\"\"\"\n- pip install opencv-python\n- pip install streamlit\n- pip install tensorflow\n\"\"\"\n\nfrom os import write\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport cv2 \nimport tensorflow as tf\n\n# Encoding des différentes classes\nENCODING = {'CNV': 0, 'DME': 1, 'DRUSEN': 2, 'NORMAL': 3}\nIMAGES_TYPES = list(ENCODING.keys())\n\nst.title(\"Prédire la classe d'une Tomographie en Cohérence Optique\")\nst.subheader(\"Définition [Wikipedia] : \")\n\ndescription=\"\"\"\nLa tomographie en cohérence optique1 ou tomographie optique cohérente (TCO ou (en) OCT) est\nune technique d'imagerie médicale bien établie qui utilise une onde lumineuse pour capturer des\nimages tridimensionnelles d'un matériau qui diffuse la lumière (par exemple un tissu biologique), \navec une résolution de l'ordre du micromètre (1 µm).\nLa tomographie en cohérence optique est basée sur une technique interférométrique à faible cohérence, \nutilisant habituellement une lumière dans l'infrarouge proche.\n\"\"\"\n\nst.write('', description)\n\ndf = pd.DataFrame({\n 'Nom du Modèle': [\"CNN\", \"LeNet\", \"EfficientNetB5\", \"VGG16\"],\n 'Description': [\"CNN\", \"LeNet\", \"EfficientNetB5\", \"VGG16\"],\n 'Chemin':['CNN', \"LeNet\", 'EfficientNetB5', 'VGG16']\n })\n\n\nst.subheader(\"Veuillez choisir un modèle : \")\noption = st.selectbox('', df['Nom du Modèle'])\n\n'Vous avez selectionné le modèle : ', option\n\ndf_model = df[df['Nom du Modèle'] == option]\nst.subheader(\"Architrcture et Métriques : \")\n\n# Chemin vers l'architecture\nchemin = f\"./models/{df_model['Chemin'].values[0]}\"\n\n# Afficher le summary du modèle\nmodel_summary = open(f\"{chemin}/summary.txt\").read()\nst.text(model_summary)\n\n# Afficher la loss accuracy\nloss_image = plt.imread(f\"{chemin}/loss_accuracy.png\")\nst.image(loss_image) \n\n# Afficher la matrice de confusion\nconfusion_image = plt.imread(f\"{chemin}/confusion_matrix.png\")\nst.image(confusion_image) \n\n# Afficher le rapport de classification\nclassification_report = open(f\"{chemin}/classification_report.txt\").read()\nst.text(classification_report)\n\n\n# Load le modèle Keras stocké\nuploaded_file = st.file_uploader(\"Télécharger votre Tomographie\", type=['png','jpeg', 'jpg'])\nif uploaded_file is not None:\n st.write(uploaded_file)\n image = plt.imread(uploaded_file)\n image = cv2.resize(image, dsize = (256, 256))\n st.image(image)\n\n if option not in \"VGG16\":\n # input is (256, 256)and we should convert (256, 256, 1)\n image=image.reshape(256, 256, 1)\n else:\n # VGG16 expects (256, 256, 3) \n image=cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n st.write(image.shape)\n # Mettre l'image dans un numpy array.\n array=np.array(([image]))\n\n st.write(f\"Loading model {option}...\")\n model = tf.keras.models.load_model(f\"{chemin}\")\n st.write(\"Model loaded.\")\n st.write(\"Start Prediction...\")\n classes = model.predict(array)\n st.write(\"Prediction done.\")\n # Récupérer l'index de la classe\n index = np.argmax(classes[0], axis=0)\n # calculer le pourcentage\n valeur = classes[0][index]\n valeur = round(valeur*100, 2)\n df_prediction = pd.DataFrame(data=classes[0], index = IMAGES_TYPES)\n df_prediction = df_prediction.rename({'0': 'Prédiction'})\n st.write(df_prediction)\n for key in ENCODING.keys():\n if index == ENCODING[key]:\n type_oct=key\n st.write(f'OCT de type {type_oct} à {valeur}%')\n if type_oct in uploaded_file.name:\n st.image('./icons/check.png', width=None)\n else:\n # i.e => 'NORMAL-5324912-1.jpeg'\n real_value = uploaded_file.name.split('-')[0]\n st.error(f'Should be {real_value}')\n st.image('./icons/cross.png', width=None)"
}
] | 2 |
AdmireKhulumo/Final-Exam-Mark-ML-Prediction | https://github.com/AdmireKhulumo/Final-Exam-Mark-ML-Prediction | 656aebd4f4ea4ead1147482fa1619f03978c8d1e | c4f323de42ea7648b60d58a05164c4d93ae6707c | 860ac4e1c070eb4962f28ce603f71c72faca5cfe | refs/heads/main | 2023-01-04T19:24:25.965924 | 2020-10-28T23:36:39 | 2020-10-28T23:36:39 | 308,160,033 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6501767039299011,
"alphanum_fraction": 0.6651943325996399,
"avg_line_length": 29.594594955444336,
"blob_id": "a94e31c14c22ed3d9eb1fa51ea37f976263b6ab7",
"content_id": "97b9cdb7d5a7f71bd4080e98acacece160db08f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1132,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 37,
"path": "/flaskAPI/app.py",
"repo_name": "AdmireKhulumo/Final-Exam-Mark-ML-Prediction",
"src_encoding": "UTF-8",
"text": "from flask import Flask, jsonify, request\nimport pickle\nimport numpy as np\nfrom flask_cors import CORS\n\n#load model from file\npickle_in = open(\"marksmodel.pickle\", \"rb\")\nmodel = pickle.load(pickle_in)\n\napp = Flask(__name__)\nCORS(app)\n\n\[email protected]('/', methods=['GET'])\ndef test():\n return \"Service Working!\"\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n #change request body to json\n data = request.get_json(force=True)\n\n #parse values from string to int\n predict_request = [int(data[\"sex\"]), int(data[\"age\"]), int(data[\"studytime\"]), int(data[\"failures\"]), int(data[\"romantic\"]), int(data[\"goout\"]), int(data[\"Dalc\"]), int(data[\"Walc\"]), int(data[\"G1\"]), int(data[\"G2\"]),]\n\n #convert request to a proper np array that can be understood by predicter\n predict_request = np.array([predict_request])\n\n # Make A prediction with order sex, age, studytime, failures, romantic, goout, Dalc, Walc, G1, G2\n prediction = model.predict(predict_request)\n\n return jsonify(prediction[0])\n\n\nif __name__ == '__main__':\n #app.run(host='0.0.0.0', port=8080, debug=True)\n app.run(port=5000, debug=True)\n"
},
{
"alpha_fraction": 0.5859231948852539,
"alphanum_fraction": 0.6106032729148865,
"avg_line_length": 22.782608032226562,
"blob_id": "5dfc1286ecb52e1064cd1634f9c73df5cf022628",
"content_id": "4bd86358f75bc3d4cec9b8fcf3e3fc64dc7bbc6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1118,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 46,
"path": "/client-marks-predictor/src/App.js",
"repo_name": "AdmireKhulumo/Final-Exam-Mark-ML-Prediction",
"src_encoding": "UTF-8",
"text": "import React, {useState} from 'react';\nimport './App.css';\nimport Questions from './components/questions';\nimport axios from 'axios';\nimport {useQuery} from 'react-query';\nimport {Card} from \"@material-ui/core\";\n\nconst fetchPrediction = async ( query) =>{\n \n const res = await fetch ('http://127.0.0.1:5000/predict',{\n method: 'POST',\n body: JSON.stringify(query)\n });\n\n return res.json();\n\n};\n\n\nfunction App() {\n const [query, setQuery] = useState();\n const [prediction, setPrediction] = useState();\n \n const getPrediction = async(query1)=>{\n const result = await axios.post('http://127.0.0.1:5000/predict', query1);\n setPrediction(result.data);\n };\n\n\n return (\n <div className=\"App\">\n <h1> <span role='img' ariel-aria-label='emoji'>🤷🏽♀️</span> What Mark Will I Get? <span role='img' ariel-aria-label='emoji'>🤷🏾♂️</span></h1>\n <Questions setQuery={setQuery} getPrediction={getPrediction}/>\n\n {prediction && \n <div>\n <h3>Predicted Mark: {prediction*5}% </h3>\n </div>\n }\n\n\n </div>\n );\n}\n\nexport default App;\n"
},
{
"alpha_fraction": 0.4093869924545288,
"alphanum_fraction": 0.42341741919517517,
"avg_line_length": 27.110328674316406,
"blob_id": "c95debbbcfffe8ae1d244fd7619a465c1d63429c",
"content_id": "412481670145c6f649c43c6887eb7599f7c11457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 11974,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 426,
"path": "/client-marks-predictor/src/components/questions.js",
"repo_name": "AdmireKhulumo/Final-Exam-Mark-ML-Prediction",
"src_encoding": "UTF-8",
"text": "import React, { useState } from \"react\";\n\n//MUI\nimport { makeStyles } from \"@material-ui/core/styles\";\nimport {\n\tButton,\n\tRadio,\n\tRadioGroup,\n\tFormControlLabel,\n\tFormLabel,\n\tDialog,\n\tDialogActions,\n\tDialogContent,\n\tDialogContentText,\n\tGrid,\n TextField,\n Slider,\n Typography\n} from \"@material-ui/core\";\n\nconst useStyles = makeStyles((theme) => ({\n\tpaper: {\n\t\tmarginTop: theme.spacing(8),\n\t\tdisplay: \"flex\",\n\t\tflexDirection: \"column\",\n\t\talignItems: \"center\"\n\t},\n\tform: {\n\t\twidth: \"100%\", // Fix IE 11 issue.\n\t\tmarginTop: theme.spacing(3)\n\t},\n\tsubmit: {\n\t\tmargin: theme.spacing(3, 0, 2)\n\t},\n\tradio: {\n\t\tdisplay: \"flex\",\n\t\tflexDirection: \"row\",\n\t\talignItems: \"center\"\n\t},\n\tradioSet: {\n\t\tdisplay: \"flex\",\n\t\tflexDirection: \"column\",\n\t\talignItems: \"center\"\n\t}\n}));\n\nexport default function Questions({setQuery, getPrediction}) {\n\tconst classes = useStyles();\n\tconst [open, setOpen] = useState(false);\n\tconst [error, setError] = useState(\"\");\n const [success, setSuccess] = useState(false);\n const [age, setAge] = useState(15);\n const [failures, setFailures] = useState(0)\n const [study, setStudy] = useState(1);\n const [G1, setG1] = useState(0);\n const [G2, setG2] = useState(0);\n \n //for error dialogue box\n\tconst handleClose = () => {\n\t\tsetOpen(false);\n\t};\n\n\tconst submitDetails = (e) => {\n\t\te.preventDefault();\n const { sex, romantic, goout, Walc, Dalc} = e.target.elements;\n \n\n\t\tif (\n\t\t\t!sex.value ||\n\t\t\t!romantic.value ||\n\t\t\t!goout.value ||\n\t\t\t!Walc.value ||\n\t\t\t!Dalc.value\n\t\t) {\n\t\t\tsetError(\"Please Fill In All Fields!\");\n\t\t\tsetOpen(true);\n\t\t\treturn \"\";\n }\n else{\n const query = {\n age: age,\n sex: Number(sex.value),\n romantic: Number(romantic.value),\n goout: Number(goout.value),\n Walc: Number(Walc.value),\n Dalc: Number(Dalc.value),\n failures: failures,\n studytime: 10-study ,\n G1: G1/5,\n G2: G2/5,\n };\n console.log(query);\n getPrediction(query);\n }\n\n \n\n };\n \n const marks = [\n {value: 0,label: '0%',},\n //{value: 25,label: '25%'},\n {value: 50,label: '50%'},\n //{value: 75,label: '75%'},\n {value: 100,label: '100%'}\n ];\n\n const retakes = [\n {value:0, label:'0'},\n {value:1, label:'1'},\n {value:2, label:'2'},\n {value:3, label:'3'},\n ]\n\n const ages = [\n {value:15, label:'15'},\n {value:22, label:'22'},\n ]\n\n const hours = [\n {value:1, label:'1h'},\n {value:5, label:'5h'},\n {value:10, label:'10h'},\n ]\n \n const changeAge = (e, newValue)=>{\n setAge(newValue);\n };\n\n const changeFailures = (e, newValue)=>{\n setFailures(newValue);\n };\n\n const changeG1 = (e, newValue)=>{\n setG1(newValue);\n };\n\n const changeG2 = (e, newValue)=>{\n setG2(newValue);\n };\n\n const changeStudy = (e, newValue)=>{\n setStudy(newValue);\n };\n\n\n\treturn (\n\t\t<div style={{maxWidth:'600px'}}>\n\t\t\t{success === false && (\n\t\t\t\t<form className={classes.form} noValidate onSubmit={submitDetails}>\n\t\t\t\t\t<Grid container spacing={2}>\n <h3>Personal Questions</h3>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>1. How Old Are You?</strong></FormLabel>\n <Slider\n defaultValue={15}\n onChange={changeAge}\n id='age'\n aria-labelledby=\"discrete-slider\"\n valueLabelDisplay=\"auto\"\n step={1}\n marks={ages}\n min={15}\n max={22}\n />\n\t\t\t\t\t\t</Grid>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>2. Pick Your Gender:</strong> </FormLabel>\n <RadioGroup\n\t\t\t\t\t\t\t\taria-label=\"sex\"\n\t\t\t\t\t\t\t\tname=\"sex\"\n\t\t\t\t\t\t\t\tid=\"sex\"\n\t\t\t\t\t\t\t\tclassName={classes.radio}\n\t\t\t\t\t\t\t>\n <FormControlLabel \n value=\"1\" \n control={<Radio />} \n label=\"Male\" \n />\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"0\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Female\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</RadioGroup>\n\t\t\t\t\t\t</Grid>\n\t\t\t\t\t\t\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>3. Are You In A Romantic Relationship?</strong> </FormLabel>\n\t\t\t\t\t\t\t<RadioGroup\n\t\t\t\t\t\t\t\taria-label=\"romantic\"\n\t\t\t\t\t\t\t\tname=\"romantic\"\n\t\t\t\t\t\t\t\tid=\"romantic\"\n\t\t\t\t\t\t\t\tclassName={classes.radio}\n\t\t\t\t\t\t\t>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"1\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"YES\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"0\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"NO\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</RadioGroup>\n\t\t\t\t\t\t</Grid>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>4. How Often Do You Go Out With Friends?</strong> (frequency)</FormLabel>\n\t\t\t\t\t\t\t<RadioGroup\n\t\t\t\t\t\t\t\taria-label=\"goout\"\n\t\t\t\t\t\t\t\tname=\"goout\"\n\t\t\t\t\t\t\t\tid=\"goout\"\n\t\t\t\t\t\t\t\tclassName={classes.radio}\n\t\t\t\t\t\t\t>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"1\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Very Low\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"2\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Low\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"3\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Moderate\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"4\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"High\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"5\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Very High\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</RadioGroup>\n\t\t\t\t\t\t</Grid>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>5. How Much Alcohol Do You Drink During WEEKDAYS?</strong> (frequency)</FormLabel>\n\t\t\t\t\t\t\t<RadioGroup\n\t\t\t\t\t\t\t\taria-label=\"Walc\"\n\t\t\t\t\t\t\t\tname=\"Walc\"\n\t\t\t\t\t\t\t\tid=\"Walc\"\n\t\t\t\t\t\t\t\tclassName={classes.radio}\n\t\t\t\t\t\t\t>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"1\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Very Low\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"2\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Low\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"3\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Moderate\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"4\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"High\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"5\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Very High\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</RadioGroup>\n\t\t\t\t\t\t</Grid>\n \n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>6. How Much Alcohol Do You Drink During WEEKENDS?</strong> (frequency)</FormLabel>\n\t\t\t\t\t\t\t<RadioGroup\n\t\t\t\t\t\t\t\taria-label=\"Dalc\"\n\t\t\t\t\t\t\t\tname=\"Dalc\"\n\t\t\t\t\t\t\t\tid=\"Dalc\"\n\t\t\t\t\t\t\t\tclassName={classes.radio}\n\t\t\t\t\t\t\t>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"1\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Very Low\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t<FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"2\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Low\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"3\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Moderate\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"4\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"High\"\n\t\t\t\t\t\t\t\t/>\n <FormControlLabel\n\t\t\t\t\t\t\t\t\tvalue=\"5\"\n\t\t\t\t\t\t\t\t\tcontrol={<Radio />}\n\t\t\t\t\t\t\t\t\tlabel=\"Very High\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</RadioGroup>\n\t\t\t\t\t\t</Grid>\n \n <h3>Academic Questions</h3>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>7. How Many Retakes Do You Have?</strong></FormLabel>\n <Slider\n defaultValue={0}\n id='failures'\n valueLabelDisplay=\"on\"\n // getAriaValueText={value}\n onChange={changeFailures}\n aria-labelledby=\"discrete-slider\"\n valueLabelDisplay=\"auto\"\n step={1}\n marks={retakes}\n min={0}\n max={3}\n />\n\t\t\t\t\t\t</Grid>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>8. Many Hours Do You Study Per Week?</strong></FormLabel>\n <Slider\n defaultValue={1}\n id='studytime'\n // getAriaValueText={value}\n onChange={changeStudy}\n valueLabelDisplay=\"on\"\n aria-labelledby=\"discrete-slider\"\n valueLabelDisplay=\"auto\" \n step={1}\n marks={hours}\n min={1}\n max={10}\n />\n\t\t\t\t\t\t</Grid>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>9. Test 1 Mark:</strong> <i>...estimate if unknown</i> </FormLabel>\n <Slider\n defaultValue={0} \n id='G1'\n // getAriaValueText={value}\n onChange={changeG1}\n valueLabelDisplay=\"on\"\n aria-labelledby=\"discrete-slider\"\n valueLabelDisplay=\"auto\"\n step={5}\n marks={marks}\n min={0}\n max={100}\n />\n\t\t\t\t\t\t</Grid>\n\n <Grid item xs={12}>\n\t\t\t\t\t\t\t<FormLabel component=\"legend\"><strong>10. Test 2 Mark:</strong> <i>...estimate if unknown</i> </FormLabel>\n <Slider\n defaultValue={0}\n id='G2'\n // getAriaValueText={value}\n onChange={changeG2}\n valueLabelDisplay=\"on\"\n aria-labelledby=\"discrete-slider\"\n valueLabelDisplay=\"auto\"\n step={5}\n marks={marks}\n min={0}\n max={100}\n />\n\t\t\t\t\t\t</Grid>\n\n\t\t\t\t\t</Grid>\n\n\t\t\t\t\t<Button\n\t\t\t\t\t\ttype=\"submit\"\n\t\t\t\t\t\tfullWidth\n\t\t\t\t\t\tvariant=\"contained\"\n\t\t\t\t\t\tcolor=\"primary\"\n\t\t\t\t\t\tclassName={classes.submit}\n\t\t\t\t\t>\n\t\t\t\t\t\t<strong>PREDICT FINAL MARK!</strong>\n\t\t\t\t\t</Button>\n\t\t\t\t</form>\n\t\t\t)}\n\n\n\t\t\t{/*--------Dialogue For Input Error Display-------*/}\n\t\t\t<div>\n\t\t\t\t<Dialog\n\t\t\t\t\topen={open}\n\t\t\t\t\tonClose={handleClose}\n\t\t\t\t\taria-labelledby=\"alert-dialog-title\"\n\t\t\t\t\taria-describedby=\"alert-dialog-description\"\n\t\t\t\t>\n\t\t\t\t\t<DialogContent>\n\t\t\t\t\t\t<DialogContentText id=\"alert-dialog-description\">\n\t\t\t\t\t\t\t{error}\n\t\t\t\t\t\t</DialogContentText>\n\t\t\t\t\t</DialogContent>\n\t\t\t\t\t<DialogActions>\n\t\t\t\t\t\t<Button onClick={handleClose} color=\"primary\" autoFocus>\n\t\t\t\t\t\t\tOK\n\t\t\t\t\t\t</Button>\n\t\t\t\t\t</DialogActions>\n\t\t\t\t</Dialog>\n\t\t\t</div>\n\t\t</div>\n\t);\n}"
},
{
"alpha_fraction": 0.6244398951530457,
"alphanum_fraction": 0.6525458097457886,
"avg_line_length": 16.633092880249023,
"blob_id": "dd233a8252ed37344071bdcf2386cd4fcc3f227f",
"content_id": "3ed0b0a5ba914868a4608e2082bfeeeba3df12e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2455,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 139,
"path": "/notebooks/modelCreator.py",
"repo_name": "AdmireKhulumo/Final-Exam-Mark-ML-Prediction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[45]:\n\n\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn import preprocessing, model_selection, linear_model\n\n\n# ## Load The Data\n\n# In[8]:\n\n\n#Load and checkout the data\ndf = pd.read_csv('student-mat.csv')\ndf.describe()\n\n\n# In[10]:\n\n\n#Select specific columns\ndf = df[[\"sex\", \"age\", \"studytime\", \"failures\",\"romantic\", \"goout\", \"Dalc\", \"Walc\", \"G1\", \"G2\", \"G3\"]]\ndf\n\n\n# ## Process The Data\n\n# In[73]:\n\n\n#initiate preprocessor\nle = preprocessing.LabelEncoder()\n\n#change M/F to 1/0 on sex\nsex = le.fit_transform(list(df[\"sex\"]))\n\n#change Y/N to 1/0 on romantic\nromantic = le.fit_transform(list(df[\"romantic\"]))\n\n#show romantic column's new look\nromantic\n\n\n# ## Create Train/Test Arrays\n\n# In[68]:\n\n\n#create input array\n#zip up lists from previous step\nX = list(zip(sex, df[\"age\"], df[\"studytime\"], df[\"failures\"], romantic, df[\"goout\"], df[\"Dalc\"], df[\"Walc\"], df[\"G1\"], df[\"G2\"]))\n\n\n# In[65]:\n\n\n#create target array\ny = df[\"G3\"]\n\n\n# In[43]:\n\n\n#split X and y to train and test datasets\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)\n\n\n# ## Training A Linear Regression Model\n\n# In[46]:\n\n\n#Select Model Type\nmodel = linear_model.LinearRegression()\n\n#Train model\nmodel.fit(X_train, y_train)\n\n\n# In[48]:\n\n\n#Check Accuracy score\naccuracy = model.score(X_test, y_test)\nprint(accuracy * 100)\n\n\n# ### Train the model many times and store best one\n\n# In[61]:\n\n\n#Create A Loop That Trains a model until accuracy > 90%\nmodel = linear_model.LinearRegression()\naccuracy = 0\nruns = 0\nwhile (accuracy < 0.9):\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)\n model.fit(X_train, y_train)\n accuracy = model.score(X_test, y_test)\n runs += 1\n #when acceptable model is found, store it\n if (accuracy >= 0.9):\n print(accuracy)\n \n #open file for writing and \"dump\" model inside\n with open(\"marksmodel.pickle\",\"wb\") as f:\n pickle.dump(model, f)\n\n#print number of trainings made\nprint(\"Trainings = \", runs)\n\n\n# ## Predicting\n\n# In[63]:\n\n\n#Load Stored Model\npickle_in = open(\"marksmodel.pickle\", \"rb\")\nmodel = pickle.load(pickle_in)\n\n\n# In[75]:\n\n\n#Make A prediction with order sex, age, studytime, failures, romantic, goout, Dalc, Walc, G1, G2\n#Note M/F = 1/0 and Y/N = 1/0 from preprocessing\n\nprediction = model.predict([[1,21,5,2,1,3,2,5,12,15]])\nprint(prediction)\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.7867370247840881,
"alphanum_fraction": 0.7874906063079834,
"avg_line_length": 81.9375,
"blob_id": "4e8218044bc44e376a3e6f82376a05554ebb885f",
"content_id": "cddc0761aea26d81d0a76437b0fc1cd7776dc6ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1327,
"license_type": "no_license",
"max_line_length": 496,
"num_lines": 16,
"path": "/README.md",
"repo_name": "AdmireKhulumo/Final-Exam-Mark-ML-Prediction",
"src_encoding": "UTF-8",
"text": "# Final Mark ML Prediction\n#### *Fullstack ML Project*\n \n \n## Description\n\nThis is a fullstack machine learning project consisting of:\n* **Machine Learning Model** - This is in the notebooks folder. I used a pre-existing [dataset from Kaggle](https://www.kaggle.com/uciml/student-alcohol-consumption) about some students'marks in relation to many factors including age, gender, test 1 and most interestingly, alcohol consumption. I trained a model though sklearn's linear regression algorithm to estimate their final exam mark based on severalfactors. This model was then saved in a .pickle file to use in the backend Flask server.\n\n* **Flask API** - A python backend server app with an API route that accepts an array input then uses a pre-defned prediction model to return a single final exam mark.\n\n* **React Client Web App** - This client consumes the Flask API by sending it some inputs from the user then receiving an estimate of that student's final mark according to the parameters supplied.\n\n\n## Future Improvements\nI wish to produce the same kind of application using real data from a local university. Such a platform could be used by school counsellors and academic advisors to predict a student's final mark and offer counselling or help before it is too late. The only challenge is to collect the necessary data.\n"
}
] | 5 |
stxnext-kindergarten/presence-analyzer-jromaniuk | https://github.com/stxnext-kindergarten/presence-analyzer-jromaniuk | 63bcf08a8cec806637edb7993eed72bcc54a352d | 6e1cb2b289f44ad63d530382652c1dc404cb1349 | eadb67967bec2b45df6072ef2a873b751012dd07 | refs/heads/master | 2020-04-04T00:00:32.843426 | 2015-01-26T13:29:24 | 2015-01-26T13:29:24 | 28,913,889 | 0 | 1 | null | 2015-01-07T12:38:55 | 2015-01-23T14:49:43 | 2015-01-26T13:29:24 | Python | [
{
"alpha_fraction": 0.5425898432731628,
"alphanum_fraction": 0.5522730946540833,
"avg_line_length": 27.339534759521484,
"blob_id": "4a98df988e47939162675dec36bdd261820da465",
"content_id": "eb6f1ae9323f7aa251a4edb338a891bebeaf7561",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6093,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 215,
"path": "/src/presence_analyzer/utils.py",
"repo_name": "stxnext-kindergarten/presence-analyzer-jromaniuk",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nHelper functions used in views.\n\"\"\"\nimport csv\nimport logging\nimport os\nimport requests\nimport time\n\nfrom datetime import datetime\nfrom flask import Response\nfrom functools import wraps\nfrom json import dumps\nfrom presence_analyzer.main import app\nfrom threading import Lock\nfrom werkzeug.contrib.cache import SimpleCache\nfrom xml.etree import ElementTree as etree\nlog = logging.getLogger(__name__) # pylint: disable=invalid-name\nlock = Lock()\nsimple_cache = SimpleCache()\n\n\ndef cache(expires):\n \"\"\"\n Cache user data.\n :param integer expires:\n :return function:\n \"\"\"\n def decorator(function):\n \"\"\"\n Decorator.\n :param function function:\n :return function:\n \"\"\"\n @wraps(function)\n def wrapper(*args, **kwargs):\n \"\"\"\n Wrapper.\n :param args:\n :param kwargs:\n :return list:\n \"\"\"\n global lock\n\n with lock:\n data = simple_cache.get('user-data')\n if data is None:\n data = function(*args, **kwargs)\n simple_cache.set('user-data', data, expires)\n return data\n return wrapper\n return decorator\n\n\ndef get_users_data():\n \"\"\"\n Get User data from xml.\n :return dict:\n \"\"\"\n users = {}\n try:\n root = etree.parse(app.config['USERS_DATA'])\n server = root.find('server')\n port = server.find('port').text # pylint: disable=no-member\n protocol = server.find('protocol').text # pylint: disable=no-member\n host = server.find('host').text # pylint: disable=no-member\n for user in root.iter('user'):\n id = int(user.get('id'))\n name = user.find('name').text.encode('utf-8')\n url = user.find('avatar').text.encode('utf-8')\n avatar = \"{0}://{1}:{2}{3}\".format(protocol, host, port, url)\n users[id] = {\n 'name': name,\n 'avatar': avatar\n }\n except IOError, e:\n log.debug('File users.xml does not exist, run download-users command')\n return users\n\n\ndef download_users_xml():\n \"\"\"\n Download file.\n \"\"\"\n root_dir = os.path.dirname(os.path.realpath(__file__))\n DEBUG_CFG = os.path.join('{0}/../../'.format(root_dir),'parts', 'etc', 'debug.cfg')\n app.config.from_pyfile(DEBUG_CFG)\n url = app.config['USERS_DATA_EXTERNAL']\n r = requests.get(url)\n with open(app.config['USERS_DATA'], 'w') as f:\n f.write(r.text.encode('ISO-8859-1'))\n\n\ndef jsonify(function):\n \"\"\"\n Creates a response with the JSON representation of wrapped function result.\n \"\"\"\n @wraps(function)\n def inner(*args, **kwargs):\n \"\"\"\n This docstring will be overridden by @wraps decorator.\n \"\"\"\n return Response(\n dumps(function(*args, **kwargs)),\n mimetype='application/json'\n )\n return inner\n\n\n@cache(15)\ndef get_data():\n \"\"\"\n Extracts presence data from CSV file and groups it by user_id. It creates structure like this.\n data = {\n 'user_id': {\n datetime.date(2013, 10, 1): {\n 'start': datetime.time(9, 0, 0),\n 'end': datetime.time(17, 30, 0),\n },\n datetime.date(2013, 10, 2): {\n 'start': datetime.time(8, 30, 0),\n 'end': datetime.time(16, 45, 0),\n },\n }\n }\n \"\"\"\n data = {}\n with open(app.config['DATA_CSV'], 'r') as csvfile:\n presence_reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(presence_reader):\n if len(row) != 4:\n # ignore header and footer lines\n continue\n\n try:\n user_id = int(row[0])\n date = datetime.strptime(row[1], '%Y-%m-%d').date()\n start = datetime.strptime(row[2], '%H:%M:%S').time()\n end = datetime.strptime(row[3], '%H:%M:%S').time()\n except (ValueError, TypeError):\n log.debug('Problem with line %d: ', i, exc_info=True)\n\n data.setdefault(user_id, {})[date] = {'start': start, 'end': end}\n return data\n\n\ndef group_by_weekday(items):\n \"\"\"\n Groups presence entries by weekday.\n \"\"\"\n result = [[], [], [], [], [], [], []] # one list for every day in week\n for date in items:\n start = items[date]['start']\n end = items[date]['end']\n result[date.weekday()].append(interval(start, end))\n return result\n\n\ndef group_by_weekday_start_end(items):\n \"\"\"\n Groups presence entries by weekday.\n \"\"\"\n result = {} # one list for every day in week\n\n for date in items:\n start, end = items[date]['start'], items[date]['end']\n if date.weekday() not in result:\n result[date.weekday()] = {\n 'weekday': date.strftime(\"%a\"),\n 'start': [],\n 'end': []\n }\n result[date.weekday()]['start'].append(seconds_since_midnight(start))\n result[date.weekday()]['end'].append(seconds_since_midnight(end))\n\n return result\n\n\ndef avg_time_weekday(items):\n \"\"\"\n Count avg for Groups presence entries by weekday.\n \"\"\"\n for day in items.values():\n day['start'] = stringify_average_date(day['start'])\n day['end'] = stringify_average_date(day['end'])\n return items\n\n\ndef stringify_average_date(list):\n \"\"\"\n Stringify avg date\n \"\"\"\n return time.strftime(\"%Y %m %d %H:%M:%S\", time.gmtime(mean(list)))\n\n\ndef seconds_since_midnight(time):\n \"\"\"\n Calculates amount of seconds since midnight.\n \"\"\"\n return time.hour * 3600 + time.minute * 60 + time.second\n\n\ndef interval(start, end):\n \"\"\"\n Calculates inverval in seconds between two datetime.time objects.\n \"\"\"\n return seconds_since_midnight(end) - seconds_since_midnight(start)\n\n\ndef mean(items):\n \"\"\"\n Calculates arithmetic mean. Returns zero for empty lists.\n \"\"\"\n return float(sum(items)) / len(items) if len(items) > 0 else 0\n"
},
{
"alpha_fraction": 0.6902654767036438,
"alphanum_fraction": 0.6902654767036438,
"avg_line_length": 17.83333396911621,
"blob_id": "e6ea409690938ff1c12019059e04ea9912d3b645",
"content_id": "fd4ec9a1f1322d4edada718b360a22076a4b7dcf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 6,
"path": "/src/presence_analyzer/main.py",
"repo_name": "stxnext-kindergarten/presence-analyzer-jromaniuk",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFlask app initialization.\n\"\"\"\nfrom flask import Flask\n\napp = Flask(__name__) # pylint: disable=invalid-name\n"
},
{
"alpha_fraction": 0.5369724035263062,
"alphanum_fraction": 0.5665661692619324,
"avg_line_length": 31.652849197387695,
"blob_id": "92ae2af3aed45e2e2ad1d5714b3a7f3ddb0015f3",
"content_id": "a81ecd37b3d778ba7f2c97e6829b8635d3b75999",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12604,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 386,
"path": "/src/presence_analyzer/tests.py",
"repo_name": "stxnext-kindergarten/presence-analyzer-jromaniuk",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nPresence analyzer unit tests.\n\"\"\"\nimport os.path\nimport json\nimport datetime\nimport unittest\n\nfrom flask import Response\nfrom presence_analyzer import main, utils, views\n\nTEST_DATA_CSV = os.path.join(\n os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'\n)\n\n\n# pylint: disable=maybe-no-member, too-many-public-methods, invalid-name, line-too-long\nclass PresenceAnalyzerViewsTestCase(unittest.TestCase):\n \"\"\"\n Views tests.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Before each test, set up a environment.\n \"\"\"\n main.app.config.update({'DATA_CSV': TEST_DATA_CSV})\n self.client = main.app.test_client()\n\n def tearDown(self):\n \"\"\"\n Get rid of unused objects after each test.\n \"\"\"\n pass\n\n def test_mainpage__should_change_location__result_is_redirect_with_302_status_code(self):\n \"\"\"\n Test main page redirect.\n \"\"\"\n resp = self.client.get('/')\n self.assertEqual(resp.status_code, 302)\n assert resp.headers['Location'].endswith('/presence_weekday.html')\n\n def test_index__should_not_find_url__result_is_404_http_exception(self):\n \"\"\"\n Test index page.\n \"\"\"\n resp = self.client.get('/fail.html')\n self.assertEqual(resp.status_code, 404)\n\n def test_index__should_find_url__result_is_200_status_resonse(self):\n \"\"\"\n Test index page.\n \"\"\"\n resp = self.client.get('/presence_weekday.html')\n self.assertEqual(resp.status_code, 200)\n\n def test_api_users_view__should_call_xhr_request__result_is_users_list(self):\n \"\"\"\n Test users listing.\n \"\"\"\n resp = self.client.get(\n '/api/v1/users',\n headers={'X-Requested-With': 'XMLHttpRequest'}\n )\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)\n self.assertDictEqual(data[0], {u'user_id': 10, u'name': u'User 10'})\n\n def test_api_users_view__should_call_not_xhr_request__result_is_501_http_exception(self):\n \"\"\"\n Test users view.\n \"\"\"\n resp = self.client.get(\n '/api/v1/users',\n headers={}\n )\n self.assertEqual(resp.status_code, 501)\n\n def test_user_mean_time_weekday_view__should_use_not_existing_user__result_is_404_http_exception(self):\n \"\"\"\n Test user mean time weekday on non existing user.\n \"\"\"\n resp = self.client.get(\n '/api/v1/mean_time_weekday/9',\n headers={'X-Requested-With': 'XMLHttpRequest'}\n )\n self.assertEqual(resp.status_code, 404)\n\n def test_user_mean_time_weekday_view__should_rise_no_xhr_before_no_user__result_is_501_http_exception(self):\n \"\"\"\n Test user mean time weekday.\n \"\"\"\n resp = self.client.get(\n '/api/v1/mean_time_weekday/9',\n headers={}\n )\n self.assertEqual(resp.status_code, 501)\n\n def test_user_mean_time_weekday_view__should_use_existing_user__result_is_a_weekday_list(self):\n \"\"\"\n Test user mean time weekday.\n \"\"\"\n resp = self.client.get(\n '/api/v1/mean_time_weekday/10',\n headers={'X-Requested-With': 'XMLHttpRequest'}\n )\n data = json.loads(resp.data)\n result = [x[0] for x in data]\n expected = [u\"Mon\", u\"Tue\", u\"Wed\", u\"Thu\", u\"Fri\", u\"Sat\", u\"Sun\"]\n self.assertListEqual(expected, result)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n\n def test_presence_weekday_view__should_retrieve_presence_data__result_is_a_extended_weekday_list(self):\n \"\"\"\n Test user presence weekday.\n \"\"\"\n resp = self.client.get(\n '/api/v1/presence_weekday/10',\n headers={'X-Requested-With': 'XMLHttpRequest'}\n )\n data = json.loads(resp.data)\n result = [x[0] for x in data]\n expected = [u\"Weekday\", u\"Mon\", u\"Tue\", u\"Wed\", u\"Thu\", u\"Fri\", u\"Sat\", u\"Sun\"]\n self.assertListEqual(expected, result)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n\n def test_presence_weekday_view__should_rise_no_xhr_before_no_user__result_is_501_http_exception(self):\n \"\"\"\n Test user presence weekday.\n \"\"\"\n resp = self.client.get(\n '/api/v1/presence_weekday/9',\n headers={}\n )\n self.assertEqual(resp.status_code, 501)\n\n def test_presence_weekday_view__should_use_not_existing_user__result_is_404_http_exception(self):\n \"\"\"\n Test user mean time weekday on non existing user.\n \"\"\"\n resp = self.client.get(\n '/api/v1/presence_weekday/9',\n headers={'X-Requested-With': 'XMLHttpRequest'}\n )\n self.assertEqual(resp.status_code, 404)\n\n def test_presence_start_end_view__should_use_existing_user__result_is_start_end_list(self):\n \"\"\"\n Test user presence weekday.\n \"\"\"\n resp = self.client.get(\n '/api/v1/presence_start_end/10',\n headers={}\n )\n result = json.loads(resp.data)\n expected = {\n u'1': {u'start': u'1970 01 01 09:39:05', u'end': u'1970 01 01 17:59:52', u'weekday': u'Tue'},\n u'2': {u'start': u'1970 01 01 09:19:52', u'end': u'1970 01 01 16:07:37', u'weekday': u'Wed'},\n u'3': {u'start': u'1970 01 01 15:18:46', u'end': u'1970 01 01 18:53:51', u'weekday': u'Thu'}\n }\n self.assertDictEqual(expected, result)\n\n\nclass SimpleCacheDummy():\n \"\"\"\n Dummy cache object it is created to help with testing cache method.\n \"\"\"\n def __init__(self):\n self._cache = {}\n self.timeout = None\n\n def set(self, key, value, timeout=None):\n \"\"\"\n Sets key and value parameter, time out is here just to keep interface consistency.\n :param mixed key:\n :param mixed value:\n :param integer timeout:\n \"\"\"\n self._cache[key] = value\n self.timeout = timeout\n\n def get(self, key):\n \"\"\"\n Gets value for given key.\n :param mixed key:\n :return mixed :\n \"\"\"\n try:\n return self._cache[key]\n except KeyError:\n return None\n\n\nclass PresenceAnalyzerUtilsTestCase(unittest.TestCase):\n \"\"\"\n Utility functions tests.\n \"\"\"\n def setUp(self):\n \"\"\"\n Before each test, set up a environment.\n \"\"\"\n main.app.config.update({'DATA_CSV': TEST_DATA_CSV})\n\n def tearDown(self):\n \"\"\"\n Get rid of unused objects after each test.\n \"\"\"\n pass\n\n def test_jsonify(self):\n \"\"\"\n Test jsonify.\n \"\"\"\n @utils.jsonify\n def test():\n \"\"\"\n Test function.\n \"\"\"\n return \"test\"\n result = test()\n expected = '\"test\"'\n self.assertIsInstance(result, Response)\n self.assertEqual(result.headers[0], ('Content-Type', u'application/json'))\n self.assertEqual(expected, result.response[0])\n\n def test_get_data__should_set_value_in_cache__result_is_fresh_data_from_get_data(self):\n \"\"\"\n Test get_data\n \"\"\"\n cache_temp = utils.simple_cache\n utils.simple_cache = SimpleCacheDummy()\n expected = {1: 'user'}\n\n @utils.cache(60)\n def get_data():\n \"\"\"\n Function which replace mock.\n \"\"\"\n return expected\n\n result = get_data()\n self.assertEqual(expected, result)\n self.assertEqual(expected, utils.simple_cache.get('user-data'))\n # sets previous state to simple_cache global variable at utils package\n utils.simple_cache = cache_temp\n\n def test_get_data__should_not_set_value_in_cache__result_is_cached_data_from_get_data(self):\n \"\"\"\n Test get data.\n \"\"\"\n cache_temp = utils.simple_cache\n utils.simple_cache = SimpleCacheDummy()\n expected = {2: 'user 2'}\n utils.simple_cache.set('user-data', expected, 15)\n not_expected = {1: 'user'}\n\n @utils.cache(60)\n def get_data():\n \"\"\"\n Function which replace mock.\n \"\"\"\n return not_expected\n\n result = get_data()\n self.assertEqual(expected, result)\n self.assertEqual(expected, utils.simple_cache.get('user-data'))\n # sets previous state to simple_cache global variable at utils package\n utils.simple_cache = cache_temp\n\n def test_get_data(self):\n \"\"\"\n Test parsing of CSV file.\n \"\"\"\n data = utils.get_data()\n self.assertIsInstance(data, dict)\n self.assertItemsEqual(data.keys(), [10, 11])\n sample_date = datetime.date(2013, 9, 10)\n self.assertIn(sample_date, data[10])\n self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])\n self.assertEqual(\n data[10][sample_date]['start'],\n datetime.time(9, 39, 5)\n )\n\n def test_group_by_weekday__should_calculate_start_end_interval__result_is_list_of_intervals(self):\n \"\"\"\n Test group by weekday.\n \"\"\"\n date1 = datetime.date(2000, 1, 1)\n start1 = datetime.time(0, 0, 10)\n end1 = datetime.time(0, 0, 30)\n date2 = datetime.date(2000, 1, 8)\n start2 = datetime.time(0, 0, 20)\n end2 = datetime.time(0, 0, 40)\n data = {\n date1: {'start': start1, 'end': end1},\n date2: {'start': start2, 'end': end2}\n }\n\n result = utils.group_by_weekday(data)\n expected = [[], [], [], [], [], [20, 20], []]\n\n self.assertListEqual(expected, result)\n\n def test_group_by_weekday_start_end__should_group_by_weekday__result_dict_of_integers_per_weekday(self):\n \"\"\"\n Test group by weekday start end.\n \"\"\"\n date1 = datetime.date(2000, 1, 1)\n start1 = datetime.time(0, 0, 10)\n end1 = datetime.time(0, 0, 30)\n date2 = datetime.date(2000, 1, 8)\n start2 = datetime.time(0, 0, 20)\n end2 = datetime.time(0, 0, 40)\n data = {\n date1: {'start': start1, 'end': end1},\n date2: {'start': start2, 'end': end2}\n }\n result = utils.group_by_weekday_start_end(data)\n expected = {5: {'weekday': 'Sat', 'start': [10, 20], 'end': [30, 40]}}\n self.assertDictEqual(result, expected)\n\n def test_avg_time_weekday__should_convert_list_of_datetime_to_weekday_avg__result_is_dict_of_avg_datetime(self):\n \"\"\"\n Test avg time weekday.\n \"\"\"\n dates = {1: {'weekday': 1, 'start': [10, 20], 'end': [30, 40]}}\n result = utils.avg_time_weekday(dates)\n expected = {1: {'weekday': 1, 'start': \"1970 01 01 00:00:15\", 'end': \"1970 01 01 00:00:35\"}}\n self.assertDictEqual(expected, result)\n\n def test_seconds_since_midnight__should_transform_time_to_seconds__result_is_number_of_seconds(self):\n \"\"\"\n Test seconds since midnight.\n \"\"\"\n time = datetime.time(0, 1, 10)\n result = utils.seconds_since_midnight(time)\n expected = 70\n self.assertEqual(expected, result)\n\n def test_interval__should_subtract_two_dates__result_is_number_of_seconds(self):\n \"\"\"\n Test interval.\n \"\"\"\n start = datetime.datetime(1999, 12, 1, 0, 0, 1)\n end = datetime.datetime(1999, 12, 1, 0, 2, 1)\n result = utils.interval(start, end)\n expected = 120\n self.assertEqual(expected, result)\n\n def test_mean__should_add_all_list_elements_and_divide_sum_by_list_length__result_is_f_average(self):\n \"\"\"\n Test mean.\n \"\"\"\n result = utils.mean([1, 2, 3])\n expected = 2.0\n self.assertEqual(type(result), type(expected))\n self.assertEqual(result, expected)\n\n def test_mean__should_get_empty_list__result_is_avarage_equal_zero(self):\n \"\"\"\n Test mean.\n \"\"\"\n result = utils.mean([])\n expected = 0\n self.assertEqual(expected, result)\n\n\ndef suite():\n \"\"\"\n Default test suite.\n \"\"\"\n base_suite = unittest.TestSuite()\n base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))\n base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))\n return base_suite\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5959491729736328,
"alphanum_fraction": 0.6041881442070007,
"avg_line_length": 21.7578125,
"blob_id": "fad32bb7534432439fa5db3a4095d8ceb74b84ca",
"content_id": "12fe3f74980e2b1229d6657686d34fcf0817f313",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2913,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 128,
"path": "/src/presence_analyzer/views.py",
"repo_name": "stxnext-kindergarten/presence-analyzer-jromaniuk",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nDefines views.\n\"\"\"\nimport calendar\nimport logging\n\nfrom flask import redirect, abort, request, render_template\nfrom presence_analyzer.main import app\nfrom presence_analyzer.utils import(\n avg_time_weekday,\n get_data,\n get_users_data,\n group_by_weekday,\n group_by_weekday_start_end,\n jsonify,\n mean\n)\nlog = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\[email protected]('/')\ndef mainpage():\n \"\"\"\n Redirects to front page.\n \"\"\"\n return redirect('/presence_weekday.html')\n\n\[email protected]('/<tab>.html')\ndef index(tab):\n \"\"\"\n Renders template.\n \"\"\"\n tabs = {\n 'presence_weekday': 'Presence by weekday',\n 'mean_time_weekday': 'Mean time weekday',\n 'presence_start_end': 'Presence start-end',\n }\n\n if tab not in tabs:\n log.debug('Page %s not found!', tab)\n abort(404)\n\n return render_template(\"base.html\", tab=tab, tabs=tabs)\n\n\[email protected]('/api/v1/users', methods=['GET'])\n@jsonify\ndef users_view():\n \"\"\"\n Users listing for dropdown.\n \"\"\"\n if not request.is_xhr:\n log.debug('Not xhr request')\n abort(501)\n\n data = get_data()\n user_data = get_users_data()\n return [\n {'user_id': i, 'name': user_data.get(i).get('name'), 'avatar': user_data.get(i).get('avatar')}\n for i in data.keys() if user_data.get(i)\n ]\n\n\[email protected]('/api/v1/mean_time_weekday/<int:user_id>', methods=['GET'])\n@jsonify\ndef mean_time_weekday_view(user_id):\n \"\"\"\n Returns mean presence time of given user grouped by weekday.\n \"\"\"\n data = get_data()\n\n if not request.is_xhr:\n log.debug('Not xhr request')\n abort(501)\n\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n abort(404)\n\n weekdays = group_by_weekday(data[user_id])\n result = [\n (calendar.day_abbr[weekday], mean(intervals))\n for weekday, intervals in enumerate(weekdays)\n ]\n\n return result\n\n\[email protected]('/api/v1/presence_weekday/<int:user_id>', methods=['GET'])\n@jsonify\ndef presence_weekday_view(user_id):\n \"\"\"\n Returns total presence time of given user grouped by weekday.\n \"\"\"\n data = get_data()\n\n if not request.is_xhr:\n log.debug('Not xhr request')\n abort(501)\n\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n abort(404)\n\n weekdays = group_by_weekday(data[user_id])\n result = [\n (calendar.day_abbr[weekday], sum(intervals))\n for weekday, intervals in enumerate(weekdays)\n ]\n\n result.insert(0, ('Weekday', 'Presence (s)'))\n return result\n\n\[email protected]('/api/v1/presence_start_end/<int:user_id>', methods=['GET'])\n@jsonify\ndef presence_start_end_view(user_id):\n \"\"\"\n Return timeline data.\n \"\"\"\n data = get_data()\n result = avg_time_weekday(\n group_by_weekday_start_end(data[user_id])\n )\n\n return result\n"
}
] | 4 |
phooky/SolidConway | https://github.com/phooky/SolidConway | 2cb7abaad049c14070e0ed725970adf98e2c7d3d | dfb819c5098f5910ba4e0a2060eef4ceed1c328d | 402525fa88ea486961078f3a4cb2f1ed14331583 | refs/heads/default | 2021-11-25T12:12:25.080050 | 2021-11-21T19:22:06 | 2021-11-21T19:22:06 | 2,908,956 | 3 | 1 | null | 2011-12-04T05:42:23 | 2013-12-14T03:09:12 | 2021-11-21T19:22:06 | Python | [
{
"alpha_fraction": 0.6253870129585266,
"alphanum_fraction": 0.6578947305679321,
"avg_line_length": 20.88135528564453,
"blob_id": "34a50b2e41616451e707b6bbd1b301f51bfd767d",
"content_id": "3a0344173f2b724c7df33ac53657785d86ba23dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1292,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 59,
"path": "/golly script/openscad.py",
"repo_name": "phooky/SolidConway",
"src_encoding": "UTF-8",
"text": "# OpenSCAD generator for Golly\n# [email protected]\n\nimport golly\n\nrect = golly.getrect()\nif len(rect) == 0:\n\tgolly.exit(\"Please create a pattern.\")\n\ncount = int(golly.getstring(\"Number of generations to evolve:\",\"8\"))\nofile = open(golly.getstring(\"OpenSCAD output file:\",\"golly.scad\"),\"w\")\n\npreamble = \"\"\"\nmodule skew_cube(a,b) {\n\tmultmatrix(m = [\n\t\t[1, 0, a, 0],\n\t\t[0, 1, b, 0],\n\t\t[0, 0, 1, 0]\n\t]) cube(size=1.00001);\n}\n\nmodule span_cube(x1,y1,x2,y2,z) {\n\ttranslate(v=[x1,y1,z])\n\tskew_cube(x2-x1,y2-y1);\n}\n\n\"\"\"\n\n\ndef cell_set(cell_list):\n\tcells = set()\n\twhile len(cell_list) > 1:\n\t\tcells.add((cell_list.pop(0),cell_list.pop(0)))\n\treturn cells\n\n# get initial cells\ninitial = golly.getcells(rect)\n\n# create cell sets for all generations\ngenerations = [cell_set(golly.evolve(initial,c)) for c in range(count)]\n\n# dump preamble to output\nofile.write(preamble)\n\n# union of all cell transitions\nofile.write(\"union() { \\n\")\n\ndef buildLayerCubes(bottom,top,z,ofile):\n\tfor (x,y) in bottom:\n\t\tcandidates = set([(xp,yp) for xp in range(x-1,x+2) for yp in range(y-1,y+2)])\n\t\tfor (xp,yp) in candidates.intersection(top):\n\t\t\tofile.write(\"span_cube({0},{1},{2},{3},{4});\\n\".format(x,y,xp,yp,z))\n\nfor i in range(count-1):\n\tbuildLayerCubes(generations[i],generations[i+1],i,ofile)\n\nofile.write(\"} \\n\")\n\nofile.close()\n\n"
},
{
"alpha_fraction": 0.5783178806304932,
"alphanum_fraction": 0.6107252836227417,
"avg_line_length": 20.413223266601562,
"blob_id": "7165639371ccf29f585ed589ffa50e89309a728d",
"content_id": "0d0e1ec331dff9dc63fbeebf16da1cccff04fffc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2592,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 121,
"path": "/solid_conway.py",
"repo_name": "phooky/SolidConway",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport re\nimport sys\n\ndef usage():\n\tprint \"\"\"\nSolid Conway is a script for converting a series of RLE files\nrepresenting generations in the game of life into an openSCAD\nprogram producing a solid object representing the evolution\nof the pattern.\n\nUsage:\nsolid_conway.py gen1.rle gen2.rle ...\n\"\"\"\n\npreamble = \"\"\"\nmodule skew_cube(a,b) {\n\tmultmatrix(m = [\n\t\t[1, 0, a, 0],\n\t\t[0, 1, b, 0],\n\t\t[0, 0, 1, 0]\n\t]) cube(size=1.00001);\n}\n\nmodule span_cube(x1,y1,x2,y2,z) {\n\ttranslate(v=[x1,y1,z])\n\tskew_cube(x2-x1,y2-y1);\n}\n\n\"\"\"\n\nreXY = re.compile(r\"x\\s*=\\s*([0-9]+),\\s*y\\s*=\\s*([0-9]+)\")\nrePos = re.compile(r\"Pos\\s*=\\s*(\\-?[0-9]+),\\s*(\\-?[0-9]+)\")\n\ndef parseOutRLE(raw,row):\n\tcount = 0\n\toff = 0\n\tfor c in raw:\n\t\tif c <= '9' and c >= '0':\n\t\t\tcount = (count * 10) + int(c)\n\t\telse:\n\t\t\tif count == 0: count = 1\n\t\t\tif c == 'b':\n\t\t\t\trow[off:off+count] = [0]*count\n\t\t\telif c == 'o':\n\t\t\t\trow[off:off+count] = [1]*count\n\t\t\toff = off + count\n\t\t\tcount = 0\n\nclass Generation:\n\tdef __init__(self, position, data):\n\t\tself.pos = position\n\t\tself.data = data\n\tdef has(self,i,j):\n\t\ti = i - self.pos[0]\n\t\tj = j - self.pos[1]\n\t\tif i < 0 or j < 0:\n\t\t\treturn 0\n\t\tif j >= len(self.data):\n\t\t\treturn 0\n\t\tif i >= len(self.data[j]):\n\t\t\treturn 0\n\t\treturn self.data[j][i]\n\tdef walk(self, fn):\n\t\t\"Walk the tuples of on cells\"\n\t\tfor j in range(len(self.data)):\n\t\t\tfor i in range(len(self.data[j])):\n\t\t\t\tif self.data[j][i]:\n\t\t\t\t\tfn(i+self.pos[0],j+self.pos[1])\n\ndef loadRLE(path):\n\tlines = open(path).readlines()\n\tdata = []\n\tpos=(0,0)\n\tdim=(0,0)\n\ty = 0\n\tfor line in lines:\n\t\tposMatch = rePos.search(line)\n\t\tif posMatch:\n\t\t\tpos=(int(posMatch.group(1)),int(posMatch.group(2)))\n\t\t\tcontinue\n\t\txyMatch = reXY.match(line)\n\t\tif xyMatch:\n\t\t\tdim=(int(xyMatch.group(1)),int(xyMatch.group(2)))\n\t\t\tdata=[[0 for i in range(dim[0])] for j in range(dim[1])]\n\t\t\tcontinue\n\t\tif line and line[0] != \"#\":\n\t\t\t# line is data\n\t\t\trows=line.split(\"$\")\n\t\t\tfor row in rows:\n\t\t\t\tparseOutRLE(row,data[y])\n\t\t\t\ty = y + 1\n\t#print pos,data\n\treturn Generation(pos,data)\n\npaths = sys.argv[1:]\ngenerations = map(loadRLE,paths)\n\ndef buildParentsForGen(genAbove,z):\n\tdef buildParents(i,j):\n\t\t# use genAbove\n\t\tirange = range(i-1,i+2)\n\t\tjrange = range(j-1,j+2)\n\t\tfor jt in jrange:\n\t\t\tfor it in irange:\n\t\t\t\tif genAbove.has(it,jt):\n\t\t\t\t\tprint \"span_cube({0},{1},{2},{3},{4});\".format(i,j,it,jt,z)\n\treturn buildParents\n\nif len(generations) < 2:\n\tprint \"Need at least two generations.\"\n\tusage()\n\tsys.exit(1)\n\nprint preamble\n\nprint \"union() {\"\nfor idx in range(len(generations)-1):\n\tgenerations[idx].walk(buildParentsForGen(generations[idx+1],idx))\nprint \"}\"\n\n"
},
{
"alpha_fraction": 0.7962962985038757,
"alphanum_fraction": 0.7962962985038757,
"avg_line_length": 70.66666412353516,
"blob_id": "4c3521926d2816c3a2cbd0f37954af1d80897c0f",
"content_id": "82fee26068b730d46df5289d59746eb2cf3568ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 6,
"path": "/README.md",
"repo_name": "phooky/SolidConway",
"src_encoding": "UTF-8",
"text": "Solid Conway is a set of scripts for creating solid representations of evolutions in Conway's game of life.\n\nThe script comes in two flavors:\n\n- golly script/openscad.py is a script that can be run from within Golly to generate an openSCAD program for the evolution of the current pattern.\n- solid_conway.py is a command-line tool for building an object represented by a series of RLE files generated by Golly or another program.\n\n\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 124,
"blob_id": "2e368ca56ec5438feaa3e2b3da0d35024ddb5906",
"content_id": "f85e135d3140ddfb1a595422a751b5d71018b83f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 125,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 1,
"path": "/golly script/INSTALL.md",
"repo_name": "phooky/SolidConway",
"src_encoding": "UTF-8",
"text": "Install the openscad.py script in your Golly scripts directory. On a Debian distro, this is /usr/share/golly/Scripts/Python.\n"
}
] | 4 |
Pys3nberg/quotes_lcd | https://github.com/Pys3nberg/quotes_lcd | f2695582584145d0b7f5dadf7ccf9e90d27b658a | 494f20287d29950ecedae12877eb4fed9a09decf | 22441556e34d52bdbdca540f131c7066a4859d86 | refs/heads/master | 2022-07-27T23:41:25.289049 | 2020-05-17T18:59:17 | 2020-05-17T18:59:17 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7862069010734558,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 71.5,
"blob_id": "343c4814e0880bfe2ddaa7c4e18da78a333b13b8",
"content_id": "31cc2071938ddb0ee480eb49809f45f1cfe51a04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Pys3nberg/quotes_lcd",
"src_encoding": "UTF-8",
"text": "# quotes_lcd\nSmall project to display inspirational quote of the day and some crypto prices on a raspberry pi LCD. Designed to run every 15 mins\n"
},
{
"alpha_fraction": 0.5668628811836243,
"alphanum_fraction": 0.5773759484291077,
"avg_line_length": 24.56989288330078,
"blob_id": "3afe38dcd8b5013d96f64520d1c4f3387ee21d9b",
"content_id": "5c9cf0fdaad496f2edea1fcb7f58887ab9d4228d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2378,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 93,
"path": "/quotes_main.py",
"repo_name": "Pys3nberg/quotes_lcd",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n\nimport time\nimport board\nimport busio\nimport adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd\nimport requests\n\ndef format_quote(data):\n\n return data['contents']['quotes'][0]['quote']\n \n\ndef split_quote(quote, char_len):\n \n \"\"\"\n This functions purpose is to split up a sentence into\n segments where to the total characters are less than or\n equal to the amount of columns on the lcd used\n \"\"\"\n \n segments = []\n words = quote.split(' ')\n\n # Loop through quote until no words left\n while len(words) > 0:\n s = \"\"\n i = -1\n \n for j, word in enumerate(words):\n # Keep adding words to current segment untill word doesn't fit\n if(len(s) + len(word) + 1) <= char_len:\n s += \" \" + word\n s = s.strip()\n continue\n # Else set the index so a subset of remaining words can be used\n else:\n i = j\n break\n \n segments.append(s)\n if i == -1:\n words = []\n words = words[i:]\n \n return segments\n\ndef format_crypt_data(data):\n \n output = \"\"\n output += \"BTC:$\" + str(cryptoData['BTC']['USD']) + \"\\n\"\n output += \"ETH:$\" + str(cryptoData['ETH']['USD']) + \"\\n\"\n\n return output\n\nif __name__ == \"__main__\":\n\n # Get inspirational quote of the day\n resp = requests.get(\"http://quotes.rest/qod/inspire\") \n quoteData = resp.json()\n quote = format_quote(quoteData)\n\n # Get current prices of BTC and ETH from bitstamp data\n resp = requests.get(\"https://min-api.cryptocompare.com/data/pricemulti?fsyms=BTC,ETH&tsyms=USD&e=bitstamp\")\n cryptoData = resp.json()\n crypto = format_crypt_data(cryptoData)\n\n # Modify this if you have a different sized Character LCD\n lcd_columns = 16\n lcd_rows = 2\n\n # Initialise I2C bus.\n i2c = busio.I2C(board.SCL, board.SDA)\n\n # Initialise the LCD class\n lcd = character_lcd.Character_LCD_RGB_I2C(i2c, lcd_columns, lcd_rows)\n\n lcd.clear()\n # Set LCD color to purple\n lcd.color = [0, 100, 0]\n time.sleep(2)\n\n # Display segmented quote\n for seg in split_quote(quote, 16):\n lcd.clear()\n lcd.message=seg\n time.sleep(2)\n\n time.sleep(5)\n lcd.clear()\n\n # Display BTC and ETH prices\n lcd.message = crypto\n"
}
] | 2 |
adipro7/Pixelate-2020 | https://github.com/adipro7/Pixelate-2020 | f8e1abc1a7a3794422102dc7a7525df8f317a07c | 355a8640282f4b2106d62a4b74268e497704d9eb | 2d3898a1b6628a2474ed9a6ac0792cadb7aa52d2 | refs/heads/master | 2022-12-09T01:34:04.415328 | 2020-08-28T21:57:58 | 2020-08-28T21:57:58 | 291,153,260 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46671196818351746,
"alphanum_fraction": 0.5027173757553101,
"avg_line_length": 37.83783721923828,
"blob_id": "70b59910265b9cafc5afb52929899171e7fa6f24",
"content_id": "2096bcdae02ddc651c0ccf17ad14b81bd09fdeb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1472,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 37,
"path": "/pix1.0/color_recognize.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\n\r\ndef recog_col(color_mat,shape_mat,row,col,LRW,URW,arena,num):\r\n lrw = np.array([LRW])\r\n urw = np.array([URW])\r\n mask = cv2.inRange(arena, lrw, urw)\r\n\r\n cv2.imshow('mask', mask)\r\n cv2.waitKey(0)\r\n\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt>20):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n cenx = cenx // row\r\n ceny = ceny // col\r\n color_mat[ceny][cenx] = num\r\n # shape recog\r\n if(num<2):\r\n rect = cv2.minAreaRect(cnt)# making min_area_rect aroung contours\r\n area_of_rect=rect[1][0]*rect[1][1]# area of contours\r\n box = cv2.boxPoints(rect)# recovering 4 point of min_rect\r\n box = np.int0(box)\r\n cv2.drawContours(mask, [box], 0, (100,100,255), 2)# drawing rectangle around contours\r\n cv2.imshow('area_of',mask)\r\n cv2.waitKey(100)\r\n rat=area_of_cnt/area_of_rect# taking ratio of (area of conotur/area of rectangle)\r\n if rat>=0.87:\r\n print(rat,1)\r\n shape_mat[ceny][cenx]=1\r\n else:\r\n print(rat,0)\r\n shape_mat[ceny][cenx]=0"
},
{
"alpha_fraction": 0.5170021653175354,
"alphanum_fraction": 0.5402107238769531,
"avg_line_length": 34.510475158691406,
"blob_id": "af5d33e6e7e807bc8ae897e08a395d5badbd9533",
"content_id": "0377b735cc8d784824cf258d11939d6e9bdc1778",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19174,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 525,
"path": "/final_run.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\nimport serial\r\nimport heapq\r\nimport all_functions as fuck\r\nimport math\r\nfrom time import sleep\r\n\r\n\r\nlr=np.load('lrr.npy')\r\nur=np.load('urr.npy')\r\nly=np.load('lry.npy')\r\nuy=np.load('ury.npy')\r\nlb=np.load('lrb.npy')\r\nub=np.load('urb.npy')\r\nlg=np.load('lrg.npy')\r\nug=np.load('urb.npy')\r\nlw=np.load('lrw.npy')\r\nuw=np.load('urw.npy')\r\n\r\n#color_mat=np.load('')\r\n#shape_mat=np.load('')\r\nroi=np.load('roi.npy')\r\nprint(\"everything loaded\")\r\n\r\nserial.begin('com5',9600)\r\nsleep(2)\r\nprint(\"connected\")\r\nser=serial.Serial()\r\n\r\n# numpy array declaration\r\ncols=1\r\nn=9\r\nrows=n*n+1\r\n\r\n\r\nvar_cell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_adj = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\n\r\ncord=[]\r\n# numpy array declaration\r\ncell_num=np.zeros((n, n), dtype=np.int16)\r\nshape_mat = np.full((n,n),-1) # for shape\r\nweight_mat = np.zeros((n, n), dtype=np.int16) # for no of corners\r\ncolor_mat = np.zeros((n, n), dtype=np.int16) # for color\r\ncnt=1\r\n# numbering of cells of arena box\r\nfor i in range(n):\r\n for j in range(n):\r\n cell_num[i][j]=cnt\r\n cnt+=1\r\n\r\ncap=cv2.VideoCapture(1)\r\nprint(cell_num)\r\n# finding cordinate of any cell\r\ndef return_cord(cell_id,n=9):\r\n for i in range(n):\r\n for j in range(n):\r\n if(cell_num[i][j]==cell_id):\r\n return (i,j)\r\n\r\n\r\n# updating color of horcurex and jail\r\ndef update_color_shape(cell_id, lr, ur, num):\r\n ret, frame = cap.read()\r\n arena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n r, c = return_cord(cell_id)\r\n mask = cv2.inRange(arena, lr, ur)\r\n cv2.imshow('mask', mask)\r\n cv2.waitKey(0)\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n # for ignoring small contours\r\n total_cnt_area = 0\r\n no_of_cnt = 0\r\n for cnt in contours:\r\n total_cnt_area += cv2.contourArea(cnt)\r\n no_of_cnt += 1\r\n\r\n avg_cnt_area = total_cnt_area / no_of_cnt\r\n thresh_cnt_area = avg_cnt_area / 3\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt > thresh_cnt_area):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n cenx = cenx // n\r\n ceny = ceny // n\r\n if (cell_id == cell_num[ceny][cenx]):\r\n color_mat[ceny][cenx] = num\r\n # shape recog\r\n rect = cv2.minAreaRect(cnt) # making min_area_rect aroung contours\r\n area_of_rect = rect[1][0] * rect[1][1] # area of contours\r\n box = cv2.boxPoints(rect) # recovering 4 point of min_rect\r\n box = np.int0(box)\r\n cv2.drawContours(mask, [box], 0, (100, 100, 255), 2) # drawing rectangle around contours\r\n # cv2.imshow('area_of',mask)\r\n # cv2.waitKey(100)\r\n rat = area_of_cnt / area_of_rect # taking ratio of (area of conotur/area of rectangle)\r\n if rat >= 0.87:\r\n print(rat, 1)\r\n shape_mat[ceny][cenx] = 1\r\n else:\r\n print(rat, 0)\r\n shape_mat[ceny][cenx] = 0\r\n return True\r\n return False\r\n\r\n# for making color and shape matrix\r\ndef color_shape(lrg,urg,arena,num,boat_center):\r\n mask = cv2.inRange(arena, lrg, urg)\r\n cv2.imshow('mask', mask)\r\n cv2.waitKey(0)\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n # for ignoring small contours\r\n total_cnt_area=0\r\n no_of_cnt=0\r\n for cnt in contours:\r\n total_cnt_area +=cv2.contourArea(cnt)\r\n no_of_cnt+=1\r\n\r\n avg_cnt_area=total_cnt_area/no_of_cnt\r\n thresh_cnt_area=avg_cnt_area/3\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt>thresh_cnt_area):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n cenx = cenx // n\r\n ceny = ceny // n\r\n if boat_center!=(ceny,cenx):\r\n if(num==4):# to become sure that there is death eater above green cell\r\n if(color_mat[ceny][cenx]==5):\r\n color_mat[ceny][cenx]=4\r\n green_cell.append(cell_num[ceny][cenx])\r\n else:\r\n color_mat[ceny][cenx] = num\r\n else:\r\n color_mat[ceny][cenx] = 2\r\n # shape recog\r\n if(num<2):\r\n rect = cv2.minAreaRect(cnt)# making min_area_rect aroung contours\r\n area_of_rect=rect[1][0]*rect[1][1]# area of contours\r\n box = cv2.boxPoints(rect)# recovering 4 point of min_rect\r\n box = np.int0(box)\r\n cv2.drawContours(mask, [box], 0, (100,100,255), 2)# drawing rectangle around contours\r\n #cv2.imshow('area_of',mask)\r\n #cv2.waitKey(100)\r\n rat=area_of_cnt/area_of_rect# taking ratio of (area of conotur/area of rectangle)\r\n if rat>=0.87:\r\n print(rat,1)\r\n shape_mat[ceny][cenx]=1\r\n else:\r\n print(rat,0)\r\n shape_mat[ceny][cenx]=0\r\n\r\ndef find_boat_centre():\r\n while(1):\r\n ret, frame = cap.read()\r\n arena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)\r\n parameters = aruco.DetectorParameters_create()\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(arena, aruco_dict, parameters=parameters)\r\n if ids is None:\r\n continue\r\n img=cv2.circle(arena,(corners[0][0][0][0],corners[0][0][0][1]),5,(0,0,255),2)\r\n cv2.imshow('img',img)\r\n cv2.waitKey(0)\r\n cv2.destroyWindow('img')\r\n print(\"corners\")\r\n print(corners)\r\n for x in range(0,ids.shape[0]):\r\n p1 = max(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n p2 = min(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n q1 = max(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n q2 = min(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n xc = int(p2+abs(p1-p2)/2)\r\n yc = int(q2+abs(q1-q2)/2)\r\n return corners,[xc,yc]\r\n\r\n# for update of weights of which is decided by color and shape of weapons\r\ndef update_weight(var_cell_wei,shape,color,n=9):\r\n u_range=n*n+1 # 82 in case of n\r\n for i in range(1,u_range,1):\r\n for num in range(cell_adj[i]):\r\n if(num==0):\r\n continue\r\n else:\r\n r,c=return_cord(num,cell_num)\r\n if(color_mat[r][c]==color & shape_mat[r][c]==shape):\r\n var_cell_wei.append(0) # putting 0 for same color and shape\r\n else:\r\n var_cell_wei.append(1) #putting 1 for different shape or color\r\n\r\ndef path(source,destination,wei=cell_wei,n=9):\r\n path=[]\r\n par=[]\r\n dis=[]\r\n vis=[]\r\n nodes=n*n+1\r\n for i in range(nodes):\r\n dis.append(1000000)\r\n vis.append(False)\r\n par.append(-1)\r\n dis[source]=0\r\n par[source]=0\r\n q=[]\r\n heapq.heappush(q,(0,source))\r\n while q:\r\n next_item = heapq.heappop(q)\r\n node=next_item[1]\r\n #print(node)\r\n if vis[node]:\r\n continue\r\n vis[node]=True\r\n i=1\r\n flag=False\r\n for item in cell_adj[node]:\r\n if item!=0:\r\n if(dis[item]>(dis[node]+cell_wei[node][i])):\r\n dis[item]=dis[node]+cell_wei[node][i]\r\n par[item] = node\r\n heapq.heappush(q,(dis[item],item))\r\n i=i+1\r\n #print(\"parent\")\r\n #print(destination)\r\n if(par[destination]==-1):\r\n return path\r\n path.append(destination)\r\n while(par[destination]!=0):\r\n #print(par[destination])\r\n path.append(par[destination])\r\n destination=par[destination]\r\n path.reverse()\r\n return path\r\n\r\n# finding centres of each cells of arena\r\ndef find_centre(n=9):\r\n ret, frame = cap.read()\r\n arena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n l,b=arena.shape\r\n row=l/n\r\n col=b/n\r\n for i in range(n):\r\n for j in range(n):\r\n cord.append([col*(i+(1/2)),row*(j+(1/2))])\r\n\r\n# return front direction of boat\r\ndef bot_vector(p,i,j):\r\n return p[0][0][i]-p[0][0][j]\r\n\r\n# return direction in which boat have to move\r\ndef dirn_of_mov_vector(boat_centre,next_cell,roi):\r\n cell_cent=find_centre(next_cell)\r\n boat_centre[0]-=roi[0]\r\n boat_centre[1]-=roi[1]# source of error y and x\r\n return cell_cent-boat_centre\r\n\r\n# return distance between boat_centre and cell_cent\r\ndef find_dis(boat_centre,next_cell,r):\r\n cell_cent = find_centre(next_cell)\r\n boat_centre[0]-= r[0]\r\n boat_centre[1]-= r[1]\r\n return math.sqrt(((cell_cent[0]-boat_centre[0])**2)+((cell_cent[1]-boat_centre[1])**2))\r\n\r\n# determine angle between boat direction and direction of movemnet\r\ndef cross_pro(dirn_of_mov=[1,1],boat_vector=[0,1]):\r\n a=np.array(dirn_of_mov)\r\n b=np.array(boat_vector)\r\n print(np.cross(a, b))\r\n mag = (math.sqrt(dirn_of_mov[0] ** 2 + dirn_of_mov[1] ** 2)) * (math.sqrt(boat_vector[0] ** 2 + boat_vector[1] ** 2))\r\n print(math.degrees(math.asin(np.cross(a,b)/mag)))\r\n return (math.degrees(math.asin(np.cross(a,b)/mag)))\r\n\r\n# locomaion of boat\r\ndef bot_movement(go_path):\r\n dis = 10000\r\n flag2=False# if true then to move hoop that is next cell is blue\r\n flag = False # if true then down hoop to that is next cell is green\r\n for box in (go_path):\r\n if(box==go_path[0]):\r\n continue\r\n min_thres_dis=roi[2]/(3*n)# distance is less than length_of_one_side/3\r\n destination=cord[box]\r\n if(box==go_path[len(go_path)-1]):\r\n r,c=return_cord(box)\r\n r1=0\r\n c1=0\r\n # find centre of white box and making it as a centriod\r\n mask_w=cv2.inRange(arena,lw,uw)\r\n contours, _ = cv2.findContours(mask_w, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt > 20):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n c1 = cenx // n\r\n r1 = ceny // n\r\n if(r1==r & c1==c):\r\n flag=True\r\n break\r\n if flag==True:\r\n flag2=False\r\n destination=np.array([r1,c1])\r\n min_thres_dis=(roi[2]/(2*n))# caliberated distance\r\n else:\r\n min_thres_dis=20# caliberated distance\r\n flag2=True\r\n\r\n while (dis > min_thres_dis): # threshold distance by calibertaion\r\n #ret,frame=cap.read()\r\n p, b_c = find_boat_centre()\r\n bv = bot_vector(p,i,j)\r\n dv = dirn_of_mov_vector(b_c,destination,roi)\r\n angle = cross_pro(bv, dv)\r\n while (angle > 10 | angle < -10):\r\n if (angle <=-10):#small right turn\r\n ser.write(b'r')\r\n sleep(0.04)\r\n ser.write(b's')\r\n elif (angle>10):\r\n ser.write(b'l')\r\n sleep(0.04)\r\n ser.write(b's')\r\n p, b_c = find_boat_centre()\r\n bv = bot_vector(p, i, j)\r\n dv = dirn_of_mov_vector(b_c, destination, roi)\r\n angle = cross_pro(bv, dv)\r\n # arduino code to move forward little bit\r\n ser.write(b'f')\r\n sleep(0.1)\r\n ser.write(b's')\r\n #ret,frame=cap.read()\r\n p, b_c = find_boat_centre()\r\n dis=find_dis(b_c, destination,roi)\r\n\r\n if(flag2):\r\n # arduino code to move hoop up\r\n\r\n else:\r\n #arduino to pick up the bx\r\n\r\n# finding horcurex and jail\r\nhorcruxes = []\r\nazkaban_prison = []\r\nweapons = []\r\ngreen_cell=[]\r\n# making matrix of color and shape\r\n\r\nboat_center = find_boat_centre()\r\nret,frame=cap.read()\r\narena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\ncolor_shape(lr, ur, arena, 0,boat_center)\r\ncolor_shape(ly, uy, arena, 1,boat_center)\r\ncolor_shape(lw, uw, arena, 5,boat_center)\r\ncolor_shape(lb, ub, arena, 2,boat_center)\r\ncolor_shape(lg, ug, arena, 4,boat_center)\r\n\r\n# to store the coordinate of center of each cell\r\nfind_centre(n)\r\n\r\n\r\nfor i in range(n):\r\n for j in range(n):\r\n if(color_mat[i][j]==4):\r\n horcruxes.append(cell_num[i][j])\r\n if(color_mat[i][j]==2):\r\n azkaban_prison.append(cell_num[i][j])\r\n if (color_mat[i][j] == 5):\r\n weapons.append(cell_num[i][j])\r\n\r\n# making cell_wei and cell_adj\r\nfor i in range(n):\r\n for j in range(n):\r\n upx=i-1\r\n dwx=i+1\r\n ly=j-1\r\n ry=j+1\r\n if(color_mat[i][j]<=2):\r\n if(upx>=0):\r\n cell_adj[cell_num[i][j]].append(cell_num[upx][j])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if(dwx<n):\r\n cell_adj[cell_num[i][j]].append(cell_num[dwx][j])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if (ly >=0):\r\n cell_adj[cell_num[i][j]].append(cell_num[i][ly])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if (ry < n):\r\n cell_adj[cell_num[i][j]].append(cell_num[i][ry])\r\n cell_wei[cell_num[i][j]].append(1)\r\n\r\n\r\n\r\n# pass source position and destination position :taking 3 boxes to jail\r\nhorcruex_counter=0\r\njail_counter=0\r\nweapons_counter=0\r\nleft_jail=[]\r\nleft_horcruex=[]\r\nleft_weapons=[]\r\nvis_horcruex=[]# bool var to maintain visited of horcruex\r\nfor i in range(n*n+1):\r\n vis_horcruex.append(False)\r\n\r\nfor boxes in (horcruxes):\r\n path_to_horcruex=path(find_boat_centre(),boxes)\r\n if(len(path_to_horcruex)!=0):\r\n horcruex_counter+=1\r\n\r\n flag=True# denoting there is any path from horcurex to jail\r\n path_to_jail=[]# path from horcruex to jail\r\n while(flag & jail_counter<len(azkaban_prison)):\r\n path_to_jail=path(find_boat_centre(),azkaban_prison[jail_counter])\r\n if(len(find_boat_centre())!=0):# means there is path to jail\r\n jail_counter += 1\r\n flag=False\r\n break\r\n else:\r\n left_jail.append(jail_counter)\r\n jail_counter+=1\r\n if(flag):# no path is left from horcruex to jail\r\n horcruex_counter-=1\r\n break\r\n bot_movement(path_to_horcruex)\r\n fuck.connect_edges(cell_adj, cell_wei, cell_num, boxes, n=9)\r\n bot_movement(path_to_jail)\r\n if(update_color_shape(boxes,lr,ur,0)):\r\n print(\"hel\")\r\n else:\r\n update_color_shape(boxes, ly, uy, 1)\r\n fuck.remove_edges(cell_wei,cell_adj,azkaban_prison[jail_counter-1])\r\n # move bot two step back as jail is disconnected\r\n if(jail_counter==len(azkaban_prison)):\r\n break\r\n if(horcruex_counter==len(horcruxes)):\r\n break\r\n\r\nlast_horcruex=0\r\nfor ele in (weapons):\r\n path_to_weapons=path(find_boat_centre(),ele)\r\n bot_movement(path_to_weapons)\r\n '''if (last_horcruex != 0):\r\n fuck.remove_edges(cell_wei, cell_adj, last_horcruex)'''\r\n\r\n # arduino code to move two box backward\r\n\r\n # code to scan frame\r\n if(update_color_shape(ele,lr,ur,0)):\r\n print(\"hel\")\r\n else:\r\n update_color_shape(ele, ly, uy, 1)\r\n # first of all move box to current weapons place\r\n\r\n # flag for making sure if it find its color and shape with horcruex\r\n flag=False# initially we believe that there is no match\r\n for boxes in (green_cell):\r\n r,c=return_cord(boxes)\r\n r1,c1=return_cord(ele)\r\n if(color_mat[r][c]==color_mat[r1][c1] & shape_mat[r][c]==shape_mat[r1][c1] & vis_horcruex[boxes]==False):\r\n flag=True\r\n vis_horcruex[boxes]=True\r\n fuck.connect_edges(cell_adj,cell_wei,cell_num,ele,n=9)\r\n #update weight of graph\r\n var_cell_wei=[[0 for i in range(cols)] for j in range(rows)]\r\n update_weight(var_cell_wei, shape_mat[r1][c1],color_mat[r1][c1],n)\r\n path_to_horcruex=path(find_boat_centre(),boxes,wei=var_cell_wei,n=9)# if error replace find_boat_center by ele\r\n bot_movement(path_to_horcruex)\r\n fuck.remove_edges(cell_wei, cell_adj, last_horcruex)\r\n #move bot two step back or either bot centre is not in that horcruex\r\n last_horcruex=boxes\r\n if(flag==False):\r\n left_weapons.append(ele)\r\n\r\n#fuck.remove_edges(cell_wei, cell_adj, last_horcruex)# to disconnect horcruex from its neighbour if weapons is placed\r\n\r\n# move bot to last left horcruex\r\nfor i in range(horcruex_counter,len(horcruxes),1):\r\n path_to_horcruex=path(find_boat_centre(),horcruxes[horcruex_counter])\r\n bot_movement(path_to_horcruex)\r\n fuck.connect_edges(cell_adj,cell_wei,cell_num,cell_id,n=9)\r\n for left in (left_jail):\r\n path_to_jail=path(find_boat_centre(),left)\r\n bot_movement(path_to_jail)\r\n fuck.remove_edges(cell_wei, cell_adj, cell_id=jail)\r\n # remove bot some step back as jai is disconnected\r\n\r\nfor ele in (left_weapons):\r\n path_to_weapons=path(find_boat_centre(),ele)\r\n bot_movement(path_to_weapons)\r\n if (last_horcruex != 0):\r\n fuck.remove_edges(cell_wei, cell_adj, last_horcruex)\r\n\r\n # arduino code to move two box backward\r\n\r\n # code to scan frame\r\n if(update_color_shape(ele,lr,ur,0)):\r\n print(\"hel\")\r\n else:\r\n update_color_shape(ele, ly, uy, 1)\r\n\r\n # first of all move box to current weapons place\r\n flag = False # initially we believe that there is no match\r\n for boxes in (green_cell):\r\n r, c = return_cord(boxes)\r\n r1, c1 = return_cord(ele)\r\n if (color_mat[r][c] == color_mat[r1][c1] & shape_mat[r][c] == shape_mat[r1][c1] & vis_horcruex[boxes]=False):\r\n flag = True\r\n vis_horcruex[boxes] = True\r\n fuck.connect_edges(cell_adj, cell_wei, cell_num, ele, n=9)\r\n # update weight of graph\r\n var_cell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\n update_weight(var_cell_wei, shape_mat[r1][c1], color_mat[r1][c1], n)\r\n path_to_horcruex = path(find_boat_centre(), boxes, wei=var_cell_wei,\r\n n=9) # if error replace find_boat_center by ele\r\n bot_movement(path_to_horcruex)\r\n last_horcruex = boxes\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5286694169044495,
"alphanum_fraction": 0.5515775084495544,
"avg_line_length": 29.558441162109375,
"blob_id": "b6aad7b3982ec32774d8f282c3186f3b01e4feff",
"content_id": "a0af045289b5b50a47fda8f43b87e24975f11b5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7290,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 231,
"path": "/pix1.0/dyn_adj.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport cv2\r\nimport heapq\r\nimport adjmat_wei\r\n\r\ndef return_frame():\r\n ret,frame=cap.read()\r\n return frame\r\ndef check(prison):\r\n r,c = return_cord(prison)\r\n upx = r - 1\r\n dwx = r + 1\r\n ly = c - 1\r\n ry = c + 1\r\n j = 0\r\n if(upx>=0):\r\n if(color_mat[upx][c]<2):\r\n return True\r\n if(dwx<n):\r\n if(color_mat[dwx][c]<2):\r\n return True\r\n if(ly>=0):\r\n if(color_mat[r][ly]<2):\r\n return True\r\n if ry<n:\r\n if color_mat[r][ry]<2:\r\n return True\r\n return False\r\n\r\ndef return_cord(cell_id):\r\n for i in range(n):\r\n for j in range(n):\r\n if(cell_num[i][j]==cell_id):\r\n return (i,j)\r\ndef path(source,destination,wei=cell_wei):\r\n path=[]\r\n par=[]\r\n dis=[]\r\n vis=[]\r\n for i in range(82):\r\n dis.append(1000000)\r\n vis.append(False)\r\n par.append(-1)\r\n dis[source]=0\r\n q=[]\r\n heapq.heappush(q,(0,source))\r\n while q:\r\n next_item = heapq.heappop(q)\r\n node=next_item[1]\r\n #print(node)\r\n if vis[node]:\r\n continue\r\n vis[node]=True\r\n i=1\r\n flag=False\r\n for item in cell_adj[node]:\r\n if item!=0:\r\n if(dis[item]>(dis[node]+cell_wei[node][i])):\r\n dis[item]=min(dis[item],dis[node]+cell_wei[node][i])\r\n par[item] = node\r\n heapq.heappush(q,(dis[item],item))\r\n i=i+1\r\n if (item == destination):\r\n flag=True\r\n break\r\n if (flag):\r\n break\r\n #print(\"parent\")\r\n #print(destination)\r\n path.append(destination)\r\n while(par[destination]!=-1):\r\n #print(par[destination])\r\n path.append(par[destination])\r\n destination=par[destination]\r\n path.reverse()\r\n return path\r\n\r\n\r\ncolor_mat=[[4, 1, 0 ,1, 0, 1, 0, 1, 4],\r\n [1 ,0 ,1, 0, 0, 0, 1, 0, 1],\r\n [0 ,0, 1, 1, 1, 1, 0, 0, 0],\r\n [0 ,1 ,0, 0, 5, 1, 1, 1, 2],\r\n [1, 0, 1 ,5 ,2, 5, 0, 1, 2],\r\n [0 ,1 ,1 ,0 ,5 ,0 ,1, 0,2],\r\n [0 ,0, 1, 1, 1, 0, 1, 1, 0],\r\n [1 ,1 ,0, 0 ,0 ,1 ,0 ,1 ,0],\r\n [4, 0, 0 ,1 ,1, 0, 1 ,0 ,4]]\r\ncolor_mat=np.transpose(color_mat)\r\n\r\nrows=82\r\ncols=1\r\nn=9\r\n\r\nvar_cell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_adj = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\n\r\ncell_num=np.zeros((9,9),dtype=np.int16)\r\nHorcruxes=[]\r\nWeapons=[]\r\nAzkaban_prison=[]\r\nnot_out=[]\r\nhorcurex_destroyed=[]# for destroyed horcurex\r\n\r\n# cell numbering from 1 to 81\r\ncnt=1\r\nfor i in range(9):\r\n for j in range (9):\r\n cell_num[i][j]=cnt\r\n cnt=cnt+1\r\n\r\n# denoting Horcruxes,Weapons,and Azkaban_prison\r\nfor i in range(n):\r\n for j in range(n):\r\n if color_mat[i][j]==2:\r\n Azkaban_prison.append(cell_num[i][j])\r\n if color_mat[i][j]==4:\r\n Horcruxes.append(cell_num[i][j])\r\n horcurex_destroyed.append(False)\r\n if color_mat[i][j]==5:\r\n Weapons.append(cell_num[i][j])\r\n if color_mat[i][j]>=2:\r\n not_out.append(cell_num[i][j])\r\n\r\n# cell connecting to its adjacent except Azkaban prison with white neighbour ,Weapons and Horcruxes\r\nfor i in range(n):\r\n for j in range(n):\r\n upx=i-1\r\n dwx=i+1\r\n ly=j-1\r\n ry=j+1\r\n if(color_mat[i][j]<=2):\r\n if(upx>=0):\r\n cell_adj[cell_num[i][j]].append(cell_num[upx][j])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if(dwx<n):\r\n cell_adj[cell_num[i][j]].append(cell_num[dwx][j])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if (ly >=0):\r\n cell_adj[cell_num[i][j]].append(cell_num[i][ly])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if (ry < n):\r\n cell_adj[cell_num[i][j]].append(cell_num[i][ry])\r\n cell_wei[cell_num[i][j]].append(1)\r\n\r\nunvis_prison=-1# for unvis_prison btw white box\r\nunvis_horcurex=-1# left horcurex\r\n\r\n# kunal algorithm\r\n#print(\"Horcurex\")\r\n#print(Horcruxes)\r\n#print(\"Azakabian\")\r\n#print(Azkaban_prison)\r\n\r\n# pass source position and destination position :taking 3 boxes to jail\r\nj=0\r\nfor i in range(3):\r\n if(check(Azkaban_prison[j])==False):\r\n j=j+1\r\n go_path=path(Azkaban_prison[j],Horcruxes[i])#source =aruco position\r\n adjmat_wei.bot_vector(go_path)\r\n #vector concept and pick up the box\r\n adjmat_wei.connect_edges(cell_adj,cell_wei,cell_num,Horcruxes[i])# para: adj_list,adj_wei,cell_num,cell_id\r\n return_path=path(Horcruxes[i],Azkaban_prison[j])#here source will be aruco position\r\n adjmat_wei.bot_vector(return_path)\r\n adjmat_wei.remove_edges(cell_wei,cell_adj,Azkaban_prison[j])# remove edges connected to jail\r\n #print(\"path\")\r\n print(Azkaban_prison[j],Horcruxes[i])\r\n print(go_path)\r\n print(Horcruxes[i],Azkaban_prison[j])\r\n print(return_path)\r\n j=j+1\r\n# update color and shape matrix again\r\n\r\n\r\n# now going to one of the weapons\r\nto_rem_edg_hor=1# check it once\r\nfor num in range(Weapons):\r\n path(source,num)\r\n # remove weapons 2 boxes back\r\n # update color_mat and shape_mat due to weapons\r\n flag=False\r\n r,c=return_cord(num)\r\n for ele in (Horcruxes):\r\n r1,c1=return_cord(ele)\r\n if color_mat[r1][c1]==color_mat[r][c] & shape_mat[r1][c1]==shape_mat[r][c]:\r\n to_rem_edg_hor=ele\r\n # aurdino code to move weapons to its initial position\r\n adjmat_wei.connect_edges(cell_adj,cell_wei,cell_num,num)\r\n path_w_h=path(source,ele,var_cell_wei)\r\n # aurdino code to move weapons from its position to horcurex location\r\n adjmat_wei.bot_vector(path_w_h)\r\n flag=True\r\n break\r\n if(flag==True):\r\n break\r\n if(flag==False):\r\n # special specified code for aurdino ot move forward keep the weapons and then move to next weapons\r\n\r\n\r\n# code for 4th horcurex to jail\r\ngo_path=path(source,unvis_horcurex)\r\nadjmat_wei.bot_vector(go_path)\r\nadjmat_wei.remove_edges(to_rem_edg_hor)\r\nreturn_path=path(source,unvis_prison)\r\nadjmat_wei.bot_vector(return_path)\r\n\r\nadjmat_wei.connect_edges(cell_adj,cell_wei,cell_num,unvis_horcurex)\r\nadjmat_wei.remove_edges(unvis_prison)\r\n\r\n# codee to put all left 3 weapons to horcurex\r\n\r\nfor num in range(Weapons):\r\n go_path=path(source,num)\r\n # aurdino code to move to weapons\r\n adjmat_wei.bot_vector(return_path)\r\n\r\n adjmat_wei.remove_edges(cell_wei,cell_adj,to_rem_edg_hor)\r\n # remove weapons 2 boxes back\r\n # update color_mat and shape_mat due to weapons\r\n r,c=return_cord(num)\r\n for ele in (Horcruxes):\r\n r1,c1=return_cord(ele)\r\n if color_mat[r1][c1]==color_mat[r][c] & shape_mat[r1][c1]==shape_mat[r][c]:\r\n to_rem_edg_hor=ele\r\n # aurdino code to move weapons to its initial position\r\n adjmat_wei.connect_edges(cell_adj,cell_wei,cell_num,num)\r\n path_w_h=path(source,ele,var_cell_wei)\r\n adjmat_wei.bot_vector(path_w_h)\r\n # aurdino code to move weapons from its position to horcurex location\r\n break\r\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5499807000160217,
"avg_line_length": 30.81012725830078,
"blob_id": "d4b9541fe9f6bfc185bcbfd20cc982b72baabb22",
"content_id": "5a88e10aad3d3ae0c548e4c0a45e299f6ff065c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5184,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 158,
"path": "/pid_tuning.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport cv2.aruco as aruco\r\nimport math\r\nimport serial\r\nfrom time import sleep\r\n#cap = cv2.VideoCapture(\"http://192.168.43.63:4747/video\")\r\n\r\nser=serial.Serial('COM5',9600)\r\nsleep(2)\r\nprint(\"connected\")\r\n\r\n\r\ndef writeToSerial(a, b, c, d):\r\n ser.write(str.encode(str(a) + ' ' + str(b) + ' ' + str(c) + ' ' + str(d) + ' 0\\n'))\r\n ser.readline()\r\n pass\r\n'''\r\ndef find_boat_centre():\r\n while(1):\r\n ret, arena = cap.read()\r\n #arena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)\r\n parameters = aruco.DetectorParameters_create()\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(arena, aruco_dict, parameters=parameters)\r\n if ids is None:\r\n continue\r\n img=cv2.circle(arena,(corners[0][0][0][0],corners[0][0][0][1]),5,(0,0,255),2)\r\n cv2.imshow('img',img)\r\n cv2.waitKey(0)\r\n cv2.destroyWindow('img')\r\n print(\"corners\")\r\n print(corners)\r\n for x in range(0,ids.shape[0]):\r\n p1 = max(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n p2 = min(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n q1 = max(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n q2 = min(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n xc = int(p2+abs(p1-p2)/2)\r\n yc = int(q2+abs(q1-q2)/2)\r\n return corners,(xc,yc)\r\n'''\r\ndef bot_vector(p,i,j):\r\n return p[0][0][i]-p[0][0][j]\r\n\r\n# return direction in which boat have to move\r\ndef dirn_of_mov_vector(boat_centre,next_cell):\r\n #cell_cent=(next_cell)\r\n #boat_centre[0]-=roi[0]\r\n #boat_centre[1]-=roi[1]# source of error y and x\r\n return next_cell-boat_centre\r\n\r\n# determine angle between boat direction and direction of movemnet\r\ndef cross_pro(dirn_of_mov=[1,1],boat_vector=[0,1]):\r\n a=np.array(dirn_of_mov)\r\n b=np.array(boat_vector)\r\n print(np.cross(a, b))\r\n mag = (math.sqrt(dirn_of_mov[0] ** 2 + dirn_of_mov[1] ** 2)) * (math.sqrt(boat_vector[0] ** 2 + boat_vector[1] ** 2))\r\n print(math.degrees(math.asin(np.cross(a,b)/mag)))\r\n return (math.degrees(math.asin(np.cross(a,b)/mag)))\r\n\r\nKP = 7\r\nKD = 70\r\nKI=0\r\nMaxSpeedLine = 150\r\nBaseSpeedLine = 100\r\n#instantStopAngle = 0.250\r\n#instantStopLine = 0.100\r\n\r\n#thresholdForRect = 200\r\n\r\n#Initializatins\r\nprev_error = 0\r\nrightMotorSpeed=0\r\nleftMotorSpeed=0\r\nmotor_speed=0\r\n#desired_value=(200,100)# set desired value according to camera\r\n\r\nset_point=[(96,365),(56,269),(104,168),(274,252)]\r\ni=0\r\n'''\r\nwhile (True):\r\n ret, image = cap.read()\r\n if(ret==False):\r\n break\r\n print(\"here1\")\r\n cv2.imshow('frame', image)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n print(\"here2\")\r\n\r\n p,center_of_bot=find_boat_centre()\r\n bv=bot_vector(p,0,1)\r\n dv=dirn_of_mov_vector(center_of_bot,set_point[i])\r\n error=cross_pro(dv,bv)\r\n if(abs(error)>80):\r\n # stop bot\r\n leftMotorSpeed=0\r\n rightMotorSpeed=0\r\n i+=1\r\n if(i>=4):\r\n i=i%4\r\n\r\n p, b_c = find_boat_centre()\r\n bv = bot_vector(p, 0, 1)\r\n dv = dirn_of_mov_vector(b_c, set_point[i])\r\n angle = cross_pro(bv, dv)\r\n while (angle > 10 | angle < -10):\r\n if (angle <= -10):\r\n # small right turn\r\n rightMotorSpeed=40\r\n leftMotorSpeed=40\r\n lind=0\r\n rind=1\r\n writeToSerial(rightMotorSpeed,leftMotorSpeed,rind,lind)\r\n sleep(0.05)\r\n elif angle > 10:\r\n # small left turn\r\n rightMotorSpeed = 40\r\n leftMotorSpeed = 40\r\n lind = 0\r\n rind = 1\r\n writeToSerial(rightMotorSpeed, leftMotorSpeed, rind, lind)\r\n sleep(0.05)\r\n\r\n p, b_c = find_boat_centre()\r\n bv = bot_vector(p, 0, 1)\r\n dv = dirn_of_mov_vector(b_c, set_point[i])\r\n angle = cross_pro(bv, dv)\r\n continue\r\n #integral = integral_prior + error * iteration_time\r\n #derivative = (error – prev_error) / iteration_time\r\n motor_speed = KP*error + KD*(error-prev_error)\r\n rightMotorSpeed = BaseSpeedLine - motor_speed\r\n leftMotorSpeed = BaseSpeedLine + motor_speed\r\n\r\n if (rightMotorSpeed > MaxSpeedLine): rightMotorSpeed = MaxSpeedLine\r\n if (leftMotorSpeed > MaxSpeedLine): leftMotorSpeed = MaxSpeedLine\r\n if (rightMotorSpeed < 0): rightMotorSpeed = 0\r\n if (leftMotorSpeed < 0): leftMotorSpeed = 0\r\n print('L:', leftMotorSpeed, 'R:', rightMotorSpeed)\r\n writeToSerial(leftMotorSpeed, rightMotorSpeed, 1, 1)\r\n prev_error = error\r\n #integral_prior = integral\r\n #sleep(iteration_time)\r\n'''\r\nwhile(1):\r\n lms=100\r\n rms=200\r\n ser.write(b'f')\r\n print(lms, rms)\r\n #writeToSerial(100,200,1,1)\r\n #print(lms,rms)\r\n sleep(2)"
},
{
"alpha_fraction": 0.4882648289203644,
"alphanum_fraction": 0.51368248462677,
"avg_line_length": 36.25490188598633,
"blob_id": "b041ea7018d37281a5ff9b8e16762da12425ec54",
"content_id": "b532c6b300f02d41f2aa430630d333986693776a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9757,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 255,
"path": "/pix1.0/all_functions.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport math\r\nimport time\r\n\r\ndef clr_range(LR,UR,arena):\r\n for i in range(1):\r\n r = cv2.selectROI(arena)\r\n col_img = arena[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]\r\n row, col, hei = col_img.shape\r\n for r1 in range(row):\r\n for c in range(col):\r\n pixe = col_img[r1][c]\r\n for k in range(3):\r\n LR[k] = min(LR[k], max(pixe[k]-15,0))\r\n UR[k] = max(UR[k], min(pixe[k]+15,255))\r\n return LR,UR\r\n\r\ndef recog_col(color_mat,shape_mat,row,col,LRW,URW,arena,num):\r\n lrw = np.array([LRW])\r\n urw = np.array([URW])\r\n mask = cv2.inRange(arena, lrw, urw)\r\n\r\n cv2.imshow('mask', mask)\r\n cv2.waitKey(0)\r\n\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt>20):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n cenx = cenx // row\r\n ceny = ceny // col\r\n color_mat[ceny][cenx] = num\r\n # shape recog\r\n if(num<2):\r\n rect = cv2.minAreaRect(cnt)# making min_area_rect aroung contours\r\n area_of_rect=rect[1][0]*rect[1][1]# area of contours\r\n box = cv2.boxPoints(rect)# recovering 4 point of min_rect\r\n box = np.int0(box)\r\n cv2.drawContours(mask, [box], 0, (100,100,255), 2)# drawing rectangle around contours\r\n #cv2.imshow('area_of',mask)\r\n #cv2.waitKey(100)\r\n rat=area_of_cnt/area_of_rect# taking ratio of (area of conotur/area of rectangle)\r\n if rat>=0.87:\r\n print(rat,1)\r\n shape_mat[ceny][cenx]=1\r\n else:\r\n print(rat,0)\r\n shape_mat[ceny][cenx]=0\r\n\r\ndef return_cord(cell_id,cell_num,n=9):\r\n for i in range(n):\r\n for j in range(n):\r\n if(cell_num[i][j]==cell_id):\r\n return (i,j)\r\n\r\n# for update of weights of which is decided by color and shape of weapons\r\ndef update_weight(color_mat,shape_mat,cell_adj,cell_num,var_cell_wei,shape,color,n=9):\r\n u_range=n*n+1 # 82 in case of n\r\n for i in range(1,u_range,1):\r\n for num in range(cell_adj[i]):\r\n if(num==0):\r\n continue\r\n else:\r\n r,c=return_cord(num,cell_num)\r\n if(color_mat[r][c]==color & shape_mat[r][c]==shape):\r\n var_cell_wei.append(0) # putting 0 for same color and shape\r\n else:\r\n var_cell_wei.append(1) #putting 1 for different shape or color\r\n\r\n# to connect a cell to its four neighbour whichever exists\r\ndef connect_edges(cell_adj,cell_wei,cell_num,cell_id,n=9):\r\n r, c = return_cord(cell_id,cell_num)\r\n upx = r - 1\r\n dwx = r + 1\r\n ly = c - 1\r\n ry = c + 1\r\n if (upx >= 0):\r\n cell_adj[cell_id].append(cell_num[upx][c])\r\n cell_wei[cell_id].append(1)\r\n if (dwx < n):\r\n cell_adj[cell_id].append(cell_num[dwx][c])\r\n cell_wei[cell_id].append(1)\r\n if (ly >= 0):\r\n cell_adj[cell_id].append(cell_num[r][ly])\r\n cell_wei[cell_id].append(1)\r\n if (ry < n):\r\n cell_adj[cell_id].append(cell_num[r][ry])\r\n cell_wei[cell_id].append(1)\r\n\r\n# for remove edges of horcurex when weapons reaches to Horcurex\r\ndef remove_edges(cell_wei,cell_adj,cell_id):\r\n if cell_adj[cell_id] is None:\r\n return\r\n while(len(cell_adj[cell_id])>1):\r\n cell_adj[cell_id].pop()\r\n cell_wei[cell_id].pop()\r\n\r\n# update color and shape matrix again\r\ndef update_color_shape(Horcruxes,Weapons,unvis_horcurex,arena,shape_mat,color_mat,row,col,LRR,URR,LRY,URY,up_date):\r\n new_color_mat=np.full((9,9),-1)\r\n new_shape_mat=np.full((9,9),-1)\r\n recog_col(new_color_mat, new_shape_mat, row, col, LRR, URR, arena, 0)\r\n recog_col(new_color_mat, new_shape_mat, row, col, LRY, URY, arena, 1)\r\n if(up_date==0):\r\n for ele in range (Horcruxes):\r\n if(new_shape_mat[r][c]!=-1):\r\n r,c=return_cord(ele)\r\n color_mat[r][c]=new_color_mat[r][c]\r\n shape_mat[r][c]=new_shape_mat[r][c]\r\n else:\r\n for ele in (Weapons):\r\n r,c=return_cord(ele)\r\n if(new_shape_mat[r][c]!=-1):\r\n shape_mat[r][c]=new_shape_mat[r][c]\r\n color_mat[r][c]=new_color_mat[r][c]\r\n\r\n# finding centres of each cells of arena\r\ndef find_centre(arena,n=9):\r\n cord=[]\r\n l,b=arena.shape\r\n row=l/n\r\n col=b/n\r\n for i in range(n):\r\n for j in range(n):\r\n cord.append(col*(i+(1/2)),row*(j+(1/2)))\r\n return cord\r\n\r\n# return front direction of boat\r\ndef bot_vector(p,i,j):\r\n return p[0][0][i]-p[0][0][j]\r\n\r\n# return direction in which boat have to move\r\ndef dirn_of_mov_vector(boat_centre,next_cell,r):\r\n cell_cent=find_centre(next_cell)\r\n boat_centre[0]-=r[0]\r\n boat_centre[1]-=r[1]\r\n return cell_cent-boat_centre\r\n\r\n# return distance between boat_centre and cell_cent\r\ndef find_dis(boat_centre,next_cell,r):\r\n cell_cent = find_centre(next_cell)\r\n boat_centre[0]-= r[0]\r\n boat_centre[1]-= r[1]\r\n return math.sqrt(((cell_cent[0]-boat_centre[0])**2)+((cell_cent[1]-boat_centre[1])**2))\r\n\r\n# determine angle between boat direction and direction of movemnet\r\ndef cross_pro(dirn_of_mov=[1,1],boat_vector=[0,1]):\r\n a=np.array(dirn_of_mov)\r\n b=np.array(boat_vector)\r\n print(np.cross(a, b))\r\n mag = (math.sqrt(dirn_of_mov[0] ** 2 + dirn_of_mov[1] ** 2)) * (math.sqrt(boat_vector[0] ** 2 + boat_vector[1] ** 2))\r\n print(math.degrees(math.asin(np.cross(a,b)/mag)))\r\n\r\ndef bot_movement(go_path,n=9,cord):\r\n dis = 10000\r\n flag2=False\r\n for box in (go_path):\r\n if(box==go_path[0]):\r\n continue\r\n min_thres_dis=r[1]/(3*n)\r\n destination=cord[box]\r\n if(box==go_path[len(go_path)-1]):\r\n r,c=return_cord(box)\r\n flag=False\r\n # find centre of white box and making it as a centriod\r\n mask_w=cv2.inRange(arena,LRW,URW)\r\n contours, _ = cv2.findContours(mask_w, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt > 20):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n r1 = cenx // row\r\n c1 = ceny // col\r\n if(r1==r & c1==c):\r\n flag=True\r\n break\r\n if flag==True:\r\n flag2=False\r\n destination=np.array([ceny,cenx])\r\n min_thres_dis=20# caliberated distance\r\n else:\r\n min_thres_dis=20# caliberated distance\r\n flag2=True\r\n\r\n while (dis > min_thres_dis): # threshold distance by calibertaion\r\n p, b_c = aruco(frame)\r\n bv = bot_vector(p,i,j)\r\n dv = dirn_of_mov_vector(b_c, destination, r)\r\n angle = cross_pro(bv, dv)\r\n while (angle > 10 | angle < -10):\r\n if (angle <=-10):\r\n # small right turn\r\n elif angle>10:\r\n # small left turn\r\n # arduino code to move forward little bit\r\n p, b_c = aruco(frame)\r\n dis=find_dis(b_c, destination)\r\n frame=dyn_adj.return_frame()\r\n\r\n if(flag2):\r\n # arduino code to move hoop up\r\n else:\r\n #arduino to pick up the bx\r\n\r\n# updating color of horcurex and jail\r\ndef update_color_shape(cell_id,lr,ur,num):\r\n ret, frame = cap.read()\r\n arena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n r,c=return_cord(cell_id)\r\n mask = cv2.inRange(arena, lr, ur)\r\n cv2.imshow('mask', mask)\r\n cv2.waitKey(0)\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n # for ignoring small contours\r\n total_cnt_area = 0\r\n no_of_cnt = 0\r\n for cnt in contours:\r\n total_cnt_area += cv2.contourArea(cnt)\r\n no_of_cnt += 1\r\n\r\n avg_cnt_area = total_cnt_area / no_of_cnt\r\n thresh_cnt_area = avg_cnt_area / 3\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt > thresh_cnt_area):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n cenx = cenx // n\r\n ceny = ceny // n\r\n if (cell_id==cell_num[ceny][cenx]):\r\n color_mat[ceny][cenx] = num\r\n # shape recog\r\n rect = cv2.minAreaRect(cnt) # making min_area_rect aroung contours\r\n area_of_rect = rect[1][0] * rect[1][1] # area of contours\r\n box = cv2.boxPoints(rect) # recovering 4 point of min_rect\r\n box = np.int0(box)\r\n cv2.drawContours(mask, [box], 0, (100, 100, 255), 2) # drawing rectangle around contours\r\n # cv2.imshow('area_of',mask)\r\n # cv2.waitKey(100)\r\n rat = area_of_cnt / area_of_rect # taking ratio of (area of conotur/area of rectangle)\r\n if rat >= 0.87:\r\n print(rat, 1)\r\n shape_mat[ceny][cenx] = 1\r\n else:\r\n print(rat, 0)\r\n shape_mat[ceny][cenx] = 0\r\n return True\r\n return False\r\n\r\n"
},
{
"alpha_fraction": 0.49399468302726746,
"alphanum_fraction": 0.517061173915863,
"avg_line_length": 33.411033630371094,
"blob_id": "1f83b6a220cb96244f96098b68cdbc9c6b472889",
"content_id": "3664f3a9a42ae9cf06a5e92d159fea9806085d3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19899,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 562,
"path": "/tac_only.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\nimport serial\r\nimport heapq\r\nimport all_functions as fuck\r\nimport math\r\nimport time\r\nfrom time import sleep\r\n# loading caliberataed things\r\nlr=np.load('lrr.npy')\r\nur=np.load('urr.npy')\r\nly=np.load('lry.npy')\r\nuy=np.load('ury.npy')\r\nprint(ly,uy)\r\nlb=np.load('lrb.npy')\r\nub=np.load('urb.npy')\r\nlg=np.load('lrg.npy')\r\nug=np.load('urb.npy')\r\nlw=np.load('lrw.npy')\r\nuw=np.load('urw.npy')\r\nshape_mat=np.load('shape_mat.npy')\r\ncolor_mat=np.load('color_mat.npy')\r\nroi=np.load('roi.npy')\r\nprint(\"everything loaded\")\r\nprint(color_mat)\r\nprint(shape_mat)\r\nprint(roi)\r\n#starting serial\r\nser=serial.Serial('COM5',9600)\r\nsleep(2)\r\nprint(\"connected\")\r\ncap=cv2.VideoCapture(1)\r\n# variable declaration\r\ncols=1\r\nn=5\r\nrows=n*n+1\r\ntotal_cells=n*n+1\r\n\r\nvar_cell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_adj = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_wei = [[0 for i in range(cols)] for j in range(rows)]\r\ncell_center=[]# for storing center of cell\r\ncell_center.append([0,0])\r\n# numpy array declaration\r\ncell_cord=[]\r\ncell_cord.append((0,0))# as 0 is not numbering of any cell\r\ncell_num=np.zeros((n, n),dtype=np.int16)\r\nweight_mat = np.zeros((n, n),dtype=np.int16) # for no of corners\r\ncnt=1\r\n# numbering of cells of arena box\r\nfor i in range(n):\r\n for j in range(n):\r\n cell_num[i][j]=cnt# cell numbering\r\n cnt+=1\r\n cell_cord.append((i, j))# storing value of rows and columns in cell_id\r\n cell_center.append([(roi[2]//n) * (j + (1 / 2)), (roi[3]//n) * (i + (1 / 2))]) #finding centres of each cells of arena row=r[1] col=col[1]\r\n\r\nprint(cell_num)\r\nprint(cell_cord)\r\nprint(cell_center)\r\n\r\n# finding cordinate of any cell\r\ndef return_cord(cell_id,n=5):\r\n for i in range(n):\r\n for j in range(n):\r\n if(cell_num[i][j]==cell_id):\r\n return (i,j)\r\n\r\n# updating color of horcurex and jail\r\ndef update_color_shape(cell_id, lwr, upr, num):\r\n ret, frame = cap.read()\r\n arena = frame[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n r, c = return_cord(cell_id)\r\n mask = cv2.inRange(arena,lwr,upr)\r\n cv2.imshow('mask', mask)\r\n cv2.waitKey(0)\r\n\r\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cv2.destroyWindow('mask')\r\n # for ignoring small contours\r\n total_cnt_area = 0\r\n no_of_cnt = 0\r\n for cnt in contours:\r\n total_cnt_area += cv2.contourArea(cnt)\r\n no_of_cnt += 1\r\n\r\n avg_cnt_area = total_cnt_area / no_of_cnt\r\n thresh_cnt_area = avg_cnt_area / 8\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt > thresh_cnt_area):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n cenx = cenx // col\r\n ceny = ceny // row\r\n if (cell_id == cell_num[ceny][cenx]):\r\n color_mat[ceny][cenx] = num\r\n # shape recog\r\n rect = cv2.minAreaRect(cnt) # making min_area_rect aroung contours\r\n area_of_rect = rect[1][0] * rect[1][1] # area of contours\r\n box = cv2.boxPoints(rect) # recovering 4 point of min_rect\r\n box = np.int0(box)\r\n cv2.drawContours(mask, [box], 0, (100, 100, 255), 2) # drawing rectangle around contours\r\n # cv2.imshow('area_of',mask)\r\n # cv2.waitKey(100)\r\n rat = area_of_cnt / area_of_rect # taking ratio of (area of conotur/area of rectangle)\r\n if rat >= 0.85:\r\n print(rat, 1)\r\n shape_mat[ceny][cenx] = 1\r\n else:\r\n print(rat, 0)\r\n shape_mat[ceny][cenx] = 0\r\n return True\r\n return False\r\n\r\ndef find_boat_centre():\r\n while(True):\r\n print(\"here4\")\r\n ret, img = cap.read()\r\n if (ret == False):\r\n break\r\n cv2.imshow('frame', img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n arena = img[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n #zcv2.imshow('arena',arena)\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\r\n parameters = aruco.DetectorParameters_create()\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(arena, aruco_dict, parameters=parameters)\r\n if ids is None:\r\n continue\r\n img=cv2.circle(arena,(corners[0][0][0][0],corners[0][0][0][1]),5,(0,0,255),2)\r\n cv2.imshow('img',img)\r\n cv2.waitKey(10)\r\n cv2.destroyWindow('img')\r\n #print(\"corners\")\r\n #print(corners)\r\n for x in range(0,ids.shape[0]):\r\n p1 = max(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n p2 = min(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n q1 = max(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n q2 = min(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n xc = int(p2+abs(p1-p2)/2)\r\n yc = int(q2+abs(q1-q2)/2)\r\n return corners,[xc,yc]\r\n\r\n# for update of weights of which is decided by color and shape of weapons\r\ndef update_weight(var_cell_wei,shape,color,n=5):\r\n u_range=n*n+1 # 82 in case of n=9\r\n for i in range(1,u_range,1):\r\n for num in cell_adj[i]:\r\n if(num==0):\r\n continue\r\n else:\r\n r,c=return_cord(num)\r\n if(color_mat[r][c]==color and shape_mat[r][c]==shape):\r\n var_cell_wei.append(0) # putting 0 for same color and shape\r\n else:\r\n var_cell_wei.append(1) #putting 1 for different shape or color\r\n\r\ndef path(source,destination,wei=cell_wei,n=5):\r\n path=[]\r\n par=[]\r\n dis=[]\r\n vis=[]\r\n nodes=n*n+1\r\n for i in range(nodes):\r\n dis.append(1000000)\r\n vis.append(False)\r\n par.append(-1)\r\n dis[source]=0\r\n par[source]=0\r\n q=[]\r\n heapq.heappush(q,(0,source))\r\n while q:\r\n next_item = heapq.heappop(q)\r\n node=next_item[1]\r\n #print(node)\r\n if vis[node]:\r\n continue\r\n vis[node]=True\r\n i=1\r\n flag=False\r\n for item in cell_adj[node]:\r\n if item!=0:\r\n if(dis[item]>(dis[node]+cell_wei[node][i])):\r\n dis[item]=dis[node]+cell_wei[node][i]\r\n par[item] = node\r\n heapq.heappush(q,(dis[item],item))\r\n i=i+1\r\n #print(\"parent\")\r\n #print(destination)\r\n if(par[destination]==-1):\r\n return path\r\n path.append(destination)\r\n while(par[destination]!=0):\r\n #print(par[destination])\r\n path.append(par[destination])\r\n destination=par[destination]\r\n path.reverse()\r\n #print(path)\r\n return path\r\n\r\n\r\n# to connect a cell to its four neighbour whichever exists\r\ndef connect_edges(cell_id):\r\n r, c = return_cord(cell_id)\r\n upx = r - 1\r\n dwx = r + 1\r\n lefy = c - 1\r\n ry = c + 1\r\n if (upx >= 0):\r\n cell_adj[cell_id].append(cell_num[upx][c])\r\n cell_wei[cell_id].append(1)\r\n if (dwx < n):\r\n cell_adj[cell_id].append(cell_num[dwx][c])\r\n cell_wei[cell_id].append(1)\r\n if (lefy >= 0):\r\n cell_adj[cell_id].append(cell_num[r][lefy])\r\n cell_wei[cell_id].append(1)\r\n if (ry < n):\r\n cell_adj[cell_id].append(cell_num[r][ry])\r\n cell_wei[cell_id].append(1)\r\n# for remove edges of horcurex when weapons reaches to Horcurex\r\n\r\ndef check(prison):\r\n r,c = return_cord(prison)\r\n upx = r - 1\r\n dwx = r + 1\r\n lefy = c - 1\r\n ry = c + 1\r\n j = 0\r\n if(upx>=0):\r\n if(color_mat[upx][c]<2 and color_mat[upx][c]>=0):\r\n return True\r\n if(dwx<n):\r\n if(color_mat[dwx][c]<2 and color_mat[dwx][c]>=0):\r\n return True\r\n if(lefy>=0):\r\n if(color_mat[r][lefy]<2 and color_mat[r][lefy]>=0):\r\n return True\r\n if (ry<n):\r\n if (color_mat[r][ry]<2 and color_mat[dwx][c]>=0):\r\n return True\r\n return False\r\n\r\ndef remove_edges(cell_id):\r\n if cell_adj[cell_id] is None:\r\n return\r\n while(len(cell_adj[cell_id])>1):\r\n cell_adj[cell_id].pop()\r\n cell_wei[cell_id].pop()\r\n# return front direction of boat\r\n\r\ndef bot_vector(p,i,j):\r\n return (p[0][0][i][0]-p[0][0][j][0],p[0][0][i][1]-p[0][0][j][1])\r\n\r\n# return direction in which boat have to move\r\ndef dirn_of_mov_vector(boat_centre,next_cell):\r\n #cell_cent=cell_center[next_cell]\r\n return (next_cell[0] - boat_centre[0], next_cell[1] - boat_centre[1])\r\n\r\n# return distance between boat_centre and cell_cent\r\ndef find_dis(boat_centre,next_cell):\r\n #cell_cent=cell_center[next_cell]\r\n print(\"next_cell :\",next_cell,boat_centre)\r\n return math.sqrt(((next_cell[0]-boat_centre[0])**2)+((next_cell[1]-boat_centre[1])**2))\r\n\r\n# determine angle between boat direction and direction of movemnet\r\ndef cross_pro(dirn_of_mov=[1,1],boat_vector=[0,1]):\r\n a=np.array(dirn_of_mov)\r\n b=np.array(boat_vector)\r\n #print(np.cross(a, b))\r\n mag = (math.sqrt(dirn_of_mov[0] ** 2 + dirn_of_mov[1] ** 2)) * (math.sqrt(boat_vector[0] ** 2 + boat_vector[1] ** 2))\r\n #print(math.degrees(math.asin(np.cross(a,b)/mag)))\r\n return (math.degrees(math.asin(np.cross(a,b)/mag)))\r\n\r\n# determining measure of angle to turn\r\ndef dot_pro(dirn_of_mov,boat_vector):\r\n a = np.array(dirn_of_mov)\r\n b = np.array(boat_vector)\r\n # print(np.cross(a, b))\r\n mag = (math.sqrt(dirn_of_mov[0] ** 2 + dirn_of_mov[1] ** 2)) * (math.sqrt(boat_vector[0] ** 2 + boat_vector[1] ** 2))\r\n # print(math.degrees(math.asin(np.cross(a,b)/mag)))\r\n return (math.degrees(math.acos(np.dot(a, b) / mag)))\r\n\r\n# locomaion of boat\r\ndef bot_movement(go_path):\r\n\r\n flag2=False# if true then to move hoop that is next cell is blue\r\n flag = False # if true then down hoop to that is next cell is green\r\n for box in go_path:\r\n print(\"visiting :\",box)\r\n dis = 10000\r\n #ret,img=cap.read()\r\n #arena = img[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n if(box==go_path[0]):\r\n continue\r\n min_thres_dis=roi[2]/(2*n)# distance is less than length_of_one_side/3\r\n #print(\"min_threshold_dis :\",min_thres_dis)\r\n destination=cell_center[box]\r\n print(\"box :\",box)\r\n #print(\"destinstion :\",destination)\r\n if(box==go_path[len(go_path)-1]):\r\n break\r\n r,c=return_cord(box)\r\n r1=-1\r\n c1=-1\r\n # find centre of white box and making it as a centriod\r\n ret, img = cap.read()\r\n arena = img[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]\r\n mask_w=cv2.inRange(arena,lw,uw)\r\n contours, _ = cv2.findContours(mask_w, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n # for ignoring small contours\r\n total_cnt_area = 0\r\n no_of_cnt = 0\r\n for cnt in contours:\r\n total_cnt_area += cv2.contourArea(cnt)\r\n no_of_cnt += 1\r\n avg_cnt_area = total_cnt_area / no_of_cnt\r\n thresh_cnt_area = avg_cnt_area / 5\r\n\r\n for cnt in contours:\r\n area_of_cnt = cv2.contourArea(cnt)\r\n if (area_of_cnt > thresh_cnt_area):\r\n cen = cv2.moments(cnt)\r\n cenx = int(cen[\"m10\"] / cen[\"m00\"])\r\n ceny = int(cen[\"m01\"] / cen[\"m00\"])\r\n c1 = cenx // n\r\n r1 = ceny // n\r\n if(r1==r and c1==c):\r\n flag=True\r\n break\r\n if flag==True:\r\n flag2=False\r\n destination=np.array([r1,c1])\r\n min_thres_dis=(roi[2]/(1.5*n))# caliberated distance\r\n else:\r\n flag2=True\r\n min_thres_dis=(roi[2]/(2*n))#caliberated distance\r\n\r\n while (dis > min_thres_dis): # threshold distance by calibertaion\r\n #ret,frame=cap.read()\r\n print(\"dis :\",dis)\r\n p, b_c = find_boat_centre()\r\n bv = bot_vector(p,0,3)\r\n dv = dirn_of_mov_vector(b_c,destination)\r\n angle = cross_pro(dv, bv)\r\n #print(\"ang :\", angle)\r\n while (int(angle) > 10 or int(angle) < -10):\r\n #print(\"ang :\", angle)\r\n if (int(angle) <=-10):#small right turn\r\n ser.write(b'r')\r\n sleep(.3)\r\n ser.write(b's')\r\n sleep(0.4)\r\n elif (int(angle)>10):\r\n ser.write(b'l')\r\n sleep(.3)\r\n ser.write(b's')\r\n sleep(0.4)\r\n p, b_c = find_boat_centre()\r\n bv = bot_vector(p, 0, 3)\r\n dv = dirn_of_mov_vector(b_c,destination)\r\n angle = cross_pro(dv, bv)\r\n # arduino code to move forward little bit\r\n ser.write(b'f')\r\n sleep(.4)\r\n ser.write(b's')\r\n sleep(.4)\r\n #ret,frame=cap.read()\r\n p, b_c = find_boat_centre()\r\n #print(\"bc :\",b_c)\r\n dis=find_dis(b_c, destination)\r\n '''\r\n if(flag2):\r\n ser.write(b'u')\r\n\r\n # arduino code to move hoop up\r\n\r\n else:\r\n ser.write(b'd')\r\n #arduino to pick up the bx\r\n '''\r\n\r\n# finding horcurex and jail\r\nhorcruxes = []\r\nazkaban_prison = []\r\nweapons = []\r\ngreen_cell=[]\r\nfree_jail=[]# 3 jail together\r\nclosed_jail=[]# 1 jail closed\r\n\r\nfor i in range(n):\r\n for j in range(n):\r\n print(color_mat[i][j])\r\n if(color_mat[i][j]==-4):# white present on horcruex\r\n horcruxes.append(cell_num[i][j])\r\n if(color_mat[i][j]==2):# jail\r\n if(check(cell_num[i][j])):\r\n free_jail.append(cell_num[i][j])\r\n else:\r\n closed_jail.append(cell_num[i][j])\r\n if (color_mat[i][j] == 5):#weapons\r\n weapons.append(cell_num[i][j])\r\n if(color_mat[i][j]==4 or color_mat[i][j]==-4):\r\n green_cell.append(cell_num[i][j])\r\n\r\nprint(\"horcurex :\",horcruxes)\r\nprint(\"free_jail :\",free_jail)\r\nprint('closed_jail :',closed_jail)\r\nprint(\"green_cell\",green_cell)\r\nprint(\"weapons :\",weapons)\r\n\r\n\r\n# making cell_wei and cell_adj\r\nfor i in range(n):\r\n for j in range(n):\r\n upx=i-1\r\n dwx=i+1\r\n lefy=j-1\r\n ry=j+1\r\n if(color_mat[i][j]<=2 and color_mat[i][j]>=0):\r\n if(upx>=0):\r\n cell_adj[cell_num[i][j]].append(cell_num[upx][j])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if(dwx<n):\r\n cell_adj[cell_num[i][j]].append(cell_num[dwx][j])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if (lefy >=0):\r\n cell_adj[cell_num[i][j]].append(cell_num[i][lefy])\r\n cell_wei[cell_num[i][j]].append(1)\r\n if (ry < n):\r\n cell_adj[cell_num[i][j]].append(cell_num[i][ry])\r\n cell_wei[cell_num[i][j]].append(1)\r\nprint(cell_adj)\r\n\r\n# pass source position and destination position :taking 3 boxes to jail\r\nhorcruex_counter=0\r\njail_counter=0\r\nweapons_counter=0\r\nleft_jail=[]\r\nleft_horcruex=[]\r\nleft_weapons=[]\r\nvis_horcruex=[]# bool var to maintain visited of horcruex\r\nfor i in range(n*n+1):\r\n vis_horcruex.append(False)\r\n\r\nwhile(True):\r\n row = math.ceil(roi[2] / n)\r\n col = math.ceil(roi[2] / n)\r\n print(row,col)\r\n print(roi[2])\r\n for boxes in (horcruxes):\r\n p,bc=find_boat_centre()\r\n print(bc)\r\n cellid = cell_num[int(bc[1]//col)][int(bc[0]//row)]\r\n print(cellid)\r\n #print()\r\n print(cell_num[(bc[1]//col)][(bc[0]//row)],boxes)\r\n path_to_horcruex=path(cell_num[(bc[1]//col)][(bc[0]//row)],boxes)\r\n print(\"path_to_horcurex :\",path_to_horcruex)\r\n bot_movement(path_to_horcruex)\r\n ser.write(b's')\r\n connect_edges(boxes)\r\n time.sleep(1)\r\n\r\n p, bc = find_boat_centre()\r\n cellid = cell_num[int(bc[1] // col)][int(bc[0] // row)]\r\n print(cellid)\r\n path_to_jail = path(cell_num[(bc[1]//col)][(bc[0]//row)],free_jail[jail_counter]) # path from horcruex to jail\r\n print(\"path_to_jail :\",path_to_jail)\r\n bot_movement(path_to_jail)\r\n ser.write(b's')\r\n sleep(1)\r\n\r\n if (update_color_shape(boxes,lr,ur,0)):\r\n print(\"hel\")\r\n else:\r\n temp=update_color_shape(boxes,ly,uy, 1)\r\n #remove_edges(free_jail[jail_counter])\r\n jail_counter+=1\r\n horcruex_counter+=1\r\n # move bot two step back as jail is disconnected\r\n #ser.write(b'b')\r\n if(jail_counter==len(free_jail)):\r\n break\r\n if(horcruex_counter==len(horcruxes)):\r\n break\r\n \r\n for ele in (weapons):\r\n p,bc=find_boat_centre()\r\n path_to_weapons=path(cell_num[(bc[1]//col)][(bc[0]//row)],ele)\r\n print(\"path_to_weapons :\",path_to_weapons)\r\n bot_movement(path_to_weapons)\r\n ser.write(b's')\r\n sleep(1)\r\n '''if (last_horcruex != 0):\r\n fuck.remove_edges(cell_wei, cell_adj, last_horcruex)'''\r\n \r\n # arduino code to move two box backward\r\n #ser.write(b'B')\r\n # code to scan frame\r\n print(\"till sucess\")\r\n print(ele, lr, ur)\r\n print(ele, ly, uy)\r\n if(update_color_shape(ele,lr,ur,0)):\r\n print(\"hel\")\r\n\r\n elif(update_color_shape(ele,ly,uy,1)):\r\n r,c=return_cord(ele)\r\n print(color_mat[r][c],shape_mat[r][c])\r\n _f=update_color_shape(1, ly, uy, 0)\r\n _e=update_color_shape(5, ly, uy, 0)\r\n _d=update_color_shape(21, ly, uy, 0)\r\n _c=update_color_shape(1, ly, uy, 1)\r\n _a=update_color_shape(5, ly, uy, 1)\r\n _b=update_color_shape(21, ly, uy, 1)\r\n\r\n print(\"here yellow\")\r\n print(color_mat)\r\n print(shape_mat)\r\n #temp = update_color_shape(ele,ly,uy,1)\r\n # first of all move box to current weapons place\r\n #ser.write(b'F')\r\n # flag for making sure if it find its color and shape with horcruex\r\n print(\"hel\")\r\n flag=False# initially we believe that there is no match\r\n for boxes in (green_cell):\r\n r,c=return_cord(boxes)\r\n r1,c1=return_cord(ele)\r\n print(\"debug\")\r\n print(r,c)\r\n print(r1,c1)\r\n if(color_mat[r][c]==color_mat[r1][c1] and shape_mat[r][c]==shape_mat[r1][c1]):\r\n print(\"matched\")\r\n flag=True\r\n vis_horcruex[boxes]=True\r\n connect_edges(ele)\r\n #update weight of graph\r\n var_cell_wei=[[0 for i in range(cols)] for j in range(rows)]\r\n update_weight(var_cell_wei, shape_mat[r1][c1],color_mat[r1][c1],n)\r\n p,bc=find_boat_centre()\r\n path_to_horcruex=path(ele,boxes,wei=var_cell_wei,n=5)# if error replace find_boat_center by ele\r\n print(path_to_horcruex)\r\n bot_movement(path_to_horcruex)\r\n ser.write(b's')\r\n sleep(1)\r\n #remove_edges(boxes)\r\n color_mat[r][c]=10# to sure that it does not match with any other else\r\n #move bot two step back or either bot centre is not in that horcruex\r\n #ser.write(b'B')\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.5407872200012207,
"alphanum_fraction": 0.5995436310768127,
"avg_line_length": 40.80487823486328,
"blob_id": "6b87d9c79cb4bb7f1d43f3366adac9d9fa93e02d",
"content_id": "fd708e291fdd858f8381febd32bc763ea4a0e8ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1753,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 41,
"path": "/pix1.0/ADJ.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\nimage=cv2.imread('C:/Users/DELL/Pictures/ar1.png')\r\n'''Here you will be required to check the Aruco type whether it is\r\n 4X4, 5X5, 6X6, 7X7 and change accordingly'''\r\ncv2.imshow('w',image)\r\ncv2.waitKey(0)\r\nfor i in range(4):\r\n if(i>=1):\r\n image=cv2.rotate(image,cv2.ROTATE_90_CLOCKWISE)\r\n aruco_dict=aruco.Dictionary_get(aruco.DICT_4X4_250)\r\n parameters=aruco.DetectorParameters_create()\r\n corners,ids,rejectedImgPoints=aruco.detectMarkers(image,aruco_dict,parameters=parameters)\r\n print(\"corners\")\r\n print(corners)\r\n '''detection of center of Aruco. corners is a list having coordinates of 4 corners. \r\n Taking half of x of 1st and 2nd and half of y of 2nd and third.'''\r\n #for cor in corners:\r\n #print(cor)\r\n for x in range(0,ids.shape[0]):\r\n p1 = max(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n p2 = min(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n q1 = max(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n q2 = min(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n xc = int(p2+abs(p1-p2)/2)\r\n yc = int(q2+abs(q1-q2)/2)\r\n #A small dot is shown at center. See\r\n image[yc][xc]=(0,255,0)\r\n #print(xc,yc)\r\n aruco.drawDetectedMarkers(image,corners,ids) #This function draws the boundary of the aruco\r\n cv2.imshow('aruco',image)\r\n cv2.waitKey(0)\r\n#cv2.imwrite('detected_aruco.jpg',image)\r\ncv2.waitKey(0)\r\n#IDENTIFY THE CELL YOURSELF\r\n#contributed by: APG"
},
{
"alpha_fraction": 0.5179957747459412,
"alphanum_fraction": 0.5543401837348938,
"avg_line_length": 27.852632522583008,
"blob_id": "09deac7df2e2365faa9c56be70719c3810d14e67",
"content_id": "41e846f36041d8537ea9eab152e72390622aef1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2834,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 95,
"path": "/caliberate_color.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport all_functions as fuck\r\nimport time\r\n#import color_recognize\r\n\r\ncap=cv2.VideoCapture(0)\r\nwhile(True):\r\n time.sleep(2)\r\n ret, img = cap.read()\r\n if (ret == False):\r\n break\r\n cv2.imshow('frame', img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n # cv2.imshow('resize',res_img)\r\n # take ROI\r\n r = cv2.selectROI(img) # return x,y,w,h\r\n np.save('roi', r)\r\n arena = img[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]\r\n arena = cv2.resize(arena, (720, 720))\r\n cv2.imshow('arena', arena)\r\n cv2.imwrite('./arena.jpg', arena)\r\n # taking roi for colour detect\r\n n = 5\r\n LRR = [255, 255, 255]\r\n URR = [0, 0, 0]\r\n LRG = [255, 255, 255]\r\n URG = [0, 0, 0]\r\n LRB = [255, 255, 255]\r\n URB = [0, 0, 0]\r\n LRY = [255, 255, 255]\r\n URY = [0, 0, 0]\r\n LRW = [255, 255, 255]\r\n URW = [0, 0, 0]\r\n\r\n # range for lower and upper red\r\n LRR, URR = fuck.clr_range(LRR, URR, arena)\r\n print(LRR)\r\n print(URR)\r\n np.save('lrr', LRR)\r\n np.save('urr', URR)\r\n # range for lower and upper yellow\r\n LRY, URY = fuck.clr_range(LRY, URY, arena)\r\n print(LRY)\r\n print(URY)\r\n np.save('lry', LRY)\r\n np.save('ury', URY)\r\n # range for lower and upper blue\r\n LRB, URB = fuck.clr_range(LRB, URB, arena)\r\n print(LRB)\r\n print(URB)\r\n np.save('lrb', LRB)\r\n np.save('urb', URB)\r\n # range for lower and upper green\r\n LRG, URG = fuck.clr_range(LRG, URG, arena)\r\n print(LRG)\r\n print(URG)\r\n np.save('lrg', LRG)\r\n np.save('urg', URG)\r\n # range for lower and upper white\r\n LRW, URW = fuck.clr_range(LRW, URW, arena)\r\n print(LRW)\r\n print(URW)\r\n np.save('lrw', LRW)\r\n np.save('urw', URW)\r\n low = [LRR, LRY, LRB, LRW, LRG]\r\n high = [URR, URY, URB, URW, URG]\r\n\r\n # taking each square box separately\r\n leng = arena.shape\r\n row = leng[0] // n\r\n col = leng[1] // n\r\n print(row)\r\n print(col)\r\n\r\n # numpy array declaration\r\n shape_mat = np.zeros((n, n), dtype=np.int16) # for shape\r\n edge_mat = np.zeros((n, n), dtype=np.int16) # for no of corners\r\n color_mat = np.zeros((n, n), dtype=np.int16) # for color\r\n for i in range(n):\r\n for j in range(n):\r\n shape_mat[i][j] = -1\r\n # color recognise and shape detect\r\n fuck.recog_col(color_mat, shape_mat, row, col, LRR, URR, arena, 0)\r\n fuck.recog_col(color_mat, shape_mat, row, col, LRY, URY, arena, 1)\r\n fuck.recog_col(color_mat, shape_mat, row, col, LRW, URW, arena, 5)\r\n fuck.recog_col(color_mat, shape_mat, row, col, LRB, URB, arena, 2)\r\n fuck.recog_col(color_mat, shape_mat, row, col, LRG, URG, arena, 4)\r\n np.transpose(color_mat)\r\n print(color_mat)\r\n # print(edge_mat)\r\n print(shape_mat)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.43566176295280457,
"alphanum_fraction": 0.47242647409439087,
"avg_line_length": 34.400001525878906,
"blob_id": "93f62578e8c080952973efe581a93173f05b9850",
"content_id": "49e3e22b7842c7d623ae12e2521a9536cba6372c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 544,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 15,
"path": "/pix1.0/clr_det.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\n# finding color range RYBGW\r\ndef clr_range(LR,UR,arena):\r\n for i in range(1):\r\n r = cv2.selectROI(arena)\r\n col_img = arena[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]\r\n row, col, hei = col_img.shape\r\n for r1 in range(row):\r\n for c in range(col):\r\n pixe = col_img[r1][c]\r\n for k in range(3):\r\n LR[k] = min(LR[k], max(pixe[k]-15,0))\r\n UR[k] = max(UR[k], min(pixe[k]+15,255))\r\n return LR,UR"
},
{
"alpha_fraction": 0.5921052694320679,
"alphanum_fraction": 0.6410362124443054,
"avg_line_length": 23.35416603088379,
"blob_id": "c74d28e73af67d6355fde93114fcd29511ae00e3",
"content_id": "54a41cec07e779e809aedccd2feac661543cc8d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2432,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 96,
"path": "/pix1.0/cop_main_fil.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport clr_det\r\nimport color_recognize\r\n\r\n# cap=cv2.VideoCapture(0)\r\n# while(true)\r\n# ret,img=cap.read()\r\n\r\nimg = cv2.imread('C:/Users/DELL/Pictures/Screenshots/Screenshot (244).png', 1)\r\n# cv2.imshow('image',img)\r\nsiz = img.shape\r\nrat = siz[0] / siz[1]\r\nwei = 1500\r\nhei = rat * wei\r\nprint(wei)\r\nprint(hei)\r\n\r\nprint(siz)\r\n\r\nres_img = cv2.resize(img, (int(wei), int(hei)))\r\n\r\n# cv2.imshow('resize',res_img)\r\n# take ROI\r\nr = cv2.selectROI(res_img)# return x,y,w,h\r\narena = res_img[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]\r\narena = cv2.resize(arena, (720, 720))\r\ncv2.imshow('arena', arena)\r\n\r\n# taking roi for colour detect\r\n\r\nLRR = [255, 255, 255]\r\nURR = [0, 0, 0]\r\nLRG = [255, 255, 255]\r\nURG = [0, 0, 0]\r\nLRB = [255, 255, 255]\r\nURB = [0, 0, 0]\r\nLRY = [255, 255, 255]\r\nURY = [0, 0, 0]\r\nLRW = [255, 255, 255]\r\nURW = [0, 0, 0]\r\n\r\n# range for lower and upper red\r\nLRR, URR = clr_det.clr_range(LRR, URR, arena)\r\nprint(LRR)\r\nprint(URR)\r\n\r\n# range for lower and upper yellow\r\nLRY, URY = clr_det.clr_range(LRY, URY, arena)\r\nprint(LRY)\r\nprint(URY)\r\n\r\n# range for lower and upper blue\r\nLRB, URB = clr_det.clr_range(LRB, URB, arena)\r\nprint(LRB)\r\nprint(URB)\r\n\r\n# range for lower and upper green\r\nLRG, URG = clr_det.clr_range(LRG, URG, arena)\r\nprint(LRG)\r\nprint(URG)\r\n\r\n# range for lower and upper white\r\nLRW, URW = clr_det.clr_range(LRW, URW, arena)\r\nprint(LRW)\r\nprint(URW)\r\n\r\nlow = [LRR, LRY, LRB, LRW, LRG]\r\nhigh = [URR, URY, URB, URW, URG]\r\n\r\n# taking each square box separately\r\nleng = arena.shape\r\nrow = leng[0] // 9\r\ncol = leng[1] // 9\r\nprint(row)\r\nprint(col)\r\n\r\n# numpy array declaration\r\nshape_mat = np.zeros((9, 9), dtype=np.int16) # for shape\r\nedge_mat = np.zeros((9, 9), dtype=np.int16) # for no of corners\r\ncolor_mat = np.zeros((9, 9), dtype=np.int16) # for color\r\nfor i in range(9):\r\n for j in range(9):\r\n shape_mat[i][j]=-1\r\n# color recognise and shape detect\r\ncolor_recognize.recog_col(color_mat,shape_mat, row, col, LRR, URR, arena, 0)\r\ncolor_recognize.recog_col(color_mat,shape_mat, row, col, LRY, URY, arena, 1)\r\ncolor_recognize.recog_col(color_mat,shape_mat, row, col, LRW, URW, arena, 5)\r\ncolor_recognize.recog_col(color_mat,shape_mat, row, col, LRB, URB, arena, 2)\r\ncolor_recognize.recog_col(color_mat,shape_mat, row, col, LRG, URG, arena, 4)\r\nnp.transpose(color_mat)\r\nprint(color_mat)\r\n#print(edge_mat)\r\nprint(shape_mat)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()"
},
{
"alpha_fraction": 0.5683333277702332,
"alphanum_fraction": 0.6045238375663757,
"avg_line_length": 19.79207992553711,
"blob_id": "7990c70b34adfd8006fb9ad08cd0b73233aafde0",
"content_id": "3aaef8877fbe318eeaa323cbe1699a039a61c52a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4200,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 202,
"path": "/pix1.0/stringwrite/stringwrite.ino",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "/*Motor Driver Configuration-\nMOTOR-A\nPin 1- High-Clockwise\nPin 2- High- Anticlockwise\nMOTOR-B\nPin 3- High-Clockwise\nPin 4- High- Anticlockwise*/\n#include<Servo.h>\nServo myservo1,myservo2; //Servo Objects\nint pos=0; //Initial Position of both the servos\nint l1=6; // Connect l1 to Pin 1 of Motor Driver\nint l2=7; //Connect l2 to pin 2 \nint r1=9; //connect r1 to pin 3\nint r2=8;//connect r2 to pin 4\nint ena=5; //Enable A, Pin 5 is PWM for controlling speed\nint enb=11;//Enable B, Pin 11 is PWM for controlling speed of motor\nint speedr=30; //Chenge the values in order to configure perfectly of sppedl and speedr\nint speedl=37;\nchar val;\nint rled=13; \nint bled=2;\nchar data;\nint lm;\nint rm;\nint f;\nint b;\nvoid forward()\n{\n //Both motors will run clockwise here\n analogWrite(ena,speedl); //These tell motor with which speed it needs to move\n analogWrite(enb,speedr);\n digitalWrite(l1,HIGH);\n digitalWrite(l2,LOW);\n digitalWrite(r1,LOW);\n digitalWrite(r2,HIGH);\n}\n\nvoid reverse()\n{\n //Both motors will run anti clockwise here\n analogWrite(ena,speedl);\n analogWrite(enb,speedr);\n digitalWrite(l1,LOW);\n digitalWrite(l2,HIGH);\n digitalWrite(r1,HIGH);\n digitalWrite(r2,LOW);\n}\n\nvoid left()\n{\n //Similar to right()\n //speedl=125;\n analogWrite(ena,speedl+10);\n analogWrite(enb,speedr+10);\n digitalWrite(l1,LOW);\n digitalWrite(l2,HIGH);\n digitalWrite(r1,LOW);\n digitalWrite(r2,HIGH); \n}\n\n\nvoid right()\n{//speedr=90;\n //Motor A goes clockwise and Motor B goes anti clockwise for very fast turning (keeping bot steady, it turns)\n analogWrite(ena,speedl+10);\n analogWrite(enb,speedr+10);\n digitalWrite(l1,HIGH);\n digitalWrite(l2,LOW);\n digitalWrite(r1,HIGH);\n digitalWrite(r2,LOW); \n}\n\nvoid stop()\n{\n //Stop the bot\n analogWrite(ena,0);\n analogWrite(enb,0);\n digitalWrite(l1,LOW);\n digitalWrite(l2,LOW);\n digitalWrite(r1,LOW);\n digitalWrite(r2,LOW);\n}\n\nvoid u2d()\n{ //For servo movement from up to down\n for (pos = 100; pos <= 180; pos += 1) { // goes from intial degrees to final degrees\n // in steps of 1 degree\n myservo1.write(pos); \n myservo2.write(180-pos); // tell servo to go to position in variable 'pos'\n delay(20); \n }\n}\n\nvoid d2u(int f, int i)\n{\n //For servo movement from down to up\n for (pos = 180; pos >= 100; pos -= 1) { // goes from final degrees to initial degrees\n myservo1.write(pos);\n myservo2.write(pos-100);// tell servo to go to position in variable 'pos'\n delay(20); \n }\n}\n\nvoid setup() {\n // put your setup code here, to run once:\n Serial.begin(9600);\n myservo1.attach(12);\n myservo2.attach(13);\n //u2d(pos,pos); //Set initial servos to 'pos' initial degrees\n pinMode(l1,OUTPUT);\n //pinMode(l2,OUTPUT);\n pinMode(r1,OUTPUT);\n pinMode(r2,OUTPUT);\n pinMode(ena,OUTPUT);\n pinMode(enb,OUTPUT);\n pinMode(3,OUTPUT);\n pinMode(4,OUTPUT);\n pinMode(rled,OUTPUT);\n pinMode(11,OUTPUT);\n pinMode(bled,OUTPUT);\n pinMode(10,OUTPUT);\n //Serial.write('1');\n}\n\nvoid loop() {\n //forward();\n //reverse();\n //right();\n //left();\n if(Serial.available() > 0){\n lm=Serial.parseInt();\n rm=Serial.parseInt();\n f=Serial.parseInt();\n b=Serial.parseInt();\n data = Serial.read();\n speedr=lm;\n speedl=rm;\n Serial.print(lm);\n Serial.print(' ');\n Serial.print(rm);\n Serial.print(' ');\n Serial.print(f);\n Serial.print(' ');\n Serial.println(b);\n //Serial.print(data);\n if(f==0 && b==0 && lm==0 && rm==0)\n {\n stop();\n }\n else if(f==1 && b==1){\n forward();\n }\n else if(f==0 && b==0){\n reverse();\n }\n else if(f==1 && b==0){\n right();\n }\n else if(f==0 && b==1){\n left();\n }\n // delay(2000);\n \n\n /*if(data == 'o'){\n digitalWrite(rled,HIGH);\n }\n\n else if(data == 'a'){\n digitalWrite(rled,LOW);\n }\n\n if(data == 'f'){\n forward();\n Serial.println(data);\n //delay(90); // \n //stop();\n \n }\n else if(data == 'r'){\n fright();\n //delay(60); //45\n //stop();\n \n }\n else if(data == 'l'){\n fleft();\n //delay(60); //30\n //stop();\n \n }\n\n\n else if(data == 's')\n {\n stop();\n }\n \n //Serial.write('0');\n */\n }\n}\n"
},
{
"alpha_fraction": 0.502170741558075,
"alphanum_fraction": 0.5639170408248901,
"avg_line_length": 35.69091033935547,
"blob_id": "33ae3e5be22e967909095a1b89b831dab4362603",
"content_id": "c2de72ab2a9887e0a85fd8de1710e25956366a5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2073,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 55,
"path": "/pix1.0/find_aruco_corner.py",
"repo_name": "adipro7/Pixelate-2020",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\nwhile (True):\r\n ret, image = cap.read()\r\n if(ret==False):\r\n break \r\n cv2.imshow('frame', image)\r\n if cv2.waitKey(1) & 0xFF == ord('q'): \r\n break\r\n '''Here you will be required to check the Aruco type whether it is\r\n 4X4, 5X5, 6X6, 7X7 and change accordingly'''\r\n aruco_dict=aruco.Dictionary_get(aruco.DICT_6X6_1000)\r\n parameters=aruco.DetectorParameters_create()\r\n corners,ids,rejectedImgPoints=aruco.detectMarkers(image,aruco_dict,parameters=parameters)\r\n #print(type(corners))\r\n #print(len(corners))\r\n #print(corners[0][0][0][0],corners[0][0][0][1])\r\n #cv2.imshow('imag',img)\r\n #cv2.waitKey(0)'''\r\n\r\n '''detection of center of Aruco. corners is a list having coordinates of 4 corners. \r\n Taking half of x of 1st and 2nd and half of y of 2nd and third.'''\r\n #print(type(ids))\r\n if ids is None:\r\n\r\n continue\r\n img=cv2.circle(image,(corners[0][0][0][0],corners[0][0][0][1]),5,(0,0,255),2)\r\n cv2.imshow('img',img)\r\n cv2.waitKey(1)\r\n cv2.destroyWindow('img')\r\n print(\"corners\")\r\n print(corners)\r\n for x in range(0,ids.shape[0]):\r\n p1 = max(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n p2 = min(corners[x][0][0][0], corners[x][0][1][0],\r\n corners[x][0][2][0], corners[x][0][3][0])\r\n q1 = max(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n q2 = min(corners[x][0][0][1], corners[x][0][1][1],\r\n corners[x][0][2][1], corners[x][0][3][1])\r\n xc = int(p2+abs(p1-p2)/2)\r\n yc = int(q2+abs(q1-q2)/2)\r\n #A small dot is shown at center. See\r\n image[yc][xc]=(0,255,0)\r\n print(xc,yc)\r\n aruco.drawDetectedMarkers(image,corners,ids) #This function draws the boundary of the aruco\r\n #cv2.imshow('aruco',image)\r\n #cv2.waitKey(0)\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n"
}
] | 12 |
whitestorm19/PlasmidAnalysis | https://github.com/whitestorm19/PlasmidAnalysis | c4a5e07955580eb35e041593700c0ac660cccdf1 | af016a7c45676e7ffc8222a2bc11ffe1c6c4375f | 4577222e349e05ee2cf66cb5b611ca55bf78240b | refs/heads/master | 2021-01-10T19:05:42.459976 | 2015-01-05T13:20:59 | 2015-01-05T13:20:59 | 27,387,041 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 27,
"blob_id": "e3030329f8e86dd1d31b1d63921f663e9674e43a",
"content_id": "9efe09ea4cc853e36fb03b4dc66786ec4950b4ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/src/__init__.py",
"repo_name": "whitestorm19/PlasmidAnalysis",
"src_encoding": "UTF-8",
"text": "__author__ = 'dominikburri'\n"
},
{
"alpha_fraction": 0.7325905561447144,
"alphanum_fraction": 0.738161563873291,
"avg_line_length": 33.19047546386719,
"blob_id": "2a91e306d1c3acc82f60b7293ed67a9477c01a4c",
"content_id": "7d40dec7f8dacdba98586052c1e33d1493e650a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 721,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 21,
"path": "/README.md",
"repo_name": "whitestorm19/PlasmidAnalysis",
"src_encoding": "UTF-8",
"text": "PlasmidAnalysis\n===============\n\nHomework for Bioinformatics\n\nDominik: Promoter , RBS, -10, -35\n\nKevin: Terminator, CDS\n\nJeremy: oriT,poly A signal, rep_origin, primerbind, rRNA, mRNA, tRNA\n\nAlessandro: protein_bind, misc_binding, misc_recomb, LTR, misc_signal\nenhancer, mobile_element, sig_peptide\n\nAufgabenverteilung:\n Kevin 'wichtige annotation' in neue Liste speichern [done]\n Russo selektives speichern der Annotation (nur note und gene)\n 'wichtige Annotation' Sequenzen in Liste speichern und MUSCLE übergeben\n Jeremy Darstellung der Resultate\n Dominik PIM Auswertung: Rückgabe: Liste von \"fast identische Sequenzen\"\n zusätzliche Liste mit mehreren 'verbundenen' Sequenzen\n"
},
{
"alpha_fraction": 0.5774657726287842,
"alphanum_fraction": 0.5853281617164612,
"avg_line_length": 34.52368927001953,
"blob_id": "85d4c57efe6f716fc86ce6e4a9f5f0342aba8afb",
"content_id": "d5a0ffea9ab7a03dc746b58d5896d7174edc5a65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14245,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 401,
"path": "/src/Scanner/Scanner.py",
"repo_name": "whitestorm19/PlasmidAnalysis",
"src_encoding": "UTF-8",
"text": "from Bio import SeqIO\nfrom bioservices import *\nfrom Bio import motifs\nfrom Bio.Alphabet import IUPAC, Gapped\nfrom Bio.Blast import NCBIWWW\nimport pprint\n\n\nclass ResultObject:\n \"\"\"\n An Object for storing the sequence, feature type and annotation of the feature\n \"\"\"\n def __init__(self, sequence, feature_type, annotation):\n self.occurences = 0\n self.annotation = annotation\n self.feature_type = feature_type\n self.sequence = sequence\n def __str__(self):\n return str(self.feature_type)+\"; \" + str(self.sequence)+ \"; \" + str(self.annotation)\n\n def setOccurences(self):\n self.occurences += 1\n def getOccurences(self):\n return self.occurences\n\ndef generateList(feature_type, filePath):\n \"\"\"\n A generator\n :param feature_type:\n :return: a ResultObject with the desired sequence and annotation\n \"\"\"\n records = SeqIO.parse(filePath, \"genbank\")\n\n for record in records:\n if len(record.seq) > 1500: # minimum for number of bases\n for feature in record.features:\n if feature.type == feature_type:\n sequence_of_feature = record.seq[feature.location.start: feature.location.end]\n annotation = feature.qualifiers\n importantAnnotation = dict((k,annotation[k]) for k in ('note','gene','bound_moiety','mobile_element_type','product')if k in annotation)\n feature_type = feature.type\n result = ResultObject(sequence_of_feature, feature_type, importantAnnotation)\n yield result\n\ndef clustering(objects_of_sequences, durchgang):\n \"\"\"\n MUSCLE\n Compare the sequences to similarity. same sequences with similar annotations shall be clustered\n :param objects_of_sequences:\n :return:\n \"\"\"\n list_of_sequences = \"\"\n\n if len(objects_of_sequences)<=1:\n return []\n\n if durchgang == 2:\n print 'MUSCLE 2. Durchgang'\n for (i,k) in enumerate(objects_of_sequences):\n #print k\n list_of_sequences += \">\" + \"identifier\" + str(i) +\"\\n\"+str(k)+\"\\n\\n\"\n else:\n print 'MUSCLE 1. Durchgang'\n for (i,k) in enumerate(objects_of_sequences):\n #print k\n list_of_sequences += \">\" + \"identifier\" + str(i) +\"\\n\"+str(k.sequence)+\"\\n\\n\"\n\n #print list_of_sequences\n m = MUSCLE(verbose=False)\n jobid = m.run(frmt=\"fasta\", sequence=list_of_sequences, email=\"[email protected]\")\n\n while m.getStatus(jobid) == u'RUNNING':\n print \"Status: \", m.getStatus(jobid)\n\n if durchgang == 2:\n result=m.getResult(jobid, \"aln-fasta\")\n sequencelist = result\n f = open('fastatmp', 'w')\n f.write(sequencelist)\n f.close()\n else:\n resultFile = open('results.txt', 'a')\n result=m.getResult(jobid, \"sequence\")\n sequencelist = result\n f = open('sequence_result.fasta', 'w')\n f.write(sequencelist)\n resultFile.write(sequencelist)\n f.close()\n result=m.getResult(jobid, \"pim\")\n pim_result = result\n f = open('pim_result.txt', 'w')\n f.write(pim_result)\n resultFile.write(pim_result)\n f.close()\n resultFile.close()\n\n return sequencelist\n\ndef pim_evaluation(schwellenwert):\n '''\n Auswertung der Percent Identity Matrix\n Nimmt die bestehenden Files zur Berechnung: pim_result.txt und sequence_result.fasta\n :param schwellenwert: der Schwellenwert fuer die Erkennung von Matches\n :return: Liste mit aehnlichen Sequenzen (als Seq Object gespeichert),\n die jeweils in eine Liste gepackt sind\n '''\n\n identifier_list = []\n matches = []\n\n f = open('pim_result.txt', 'r')\n for i in range(6):\n f.readline()\n lines = 1\n while True:\n line = f.readline()\n if line == '':\n break\n words = line.split()\n words.pop(0) # deleting 1: etc\n name = words[0] # getting 'identifierXY'\n identifier_list.append(name)\n words.pop(0) # deleting 'identifierXY'\n if len(words) >= 1:\n index = 1\n for value in words:\n value = float(value)\n if index < lines:\n if value > schwellenwert:\n matches.append([name, index])\n #print name, secondname\n else:\n break\n index += 1\n lines += 1\n\n # get the correct index from the full identifier list\n # and set the name of the corresponding identifier\n names = []\n new_matches = []\n for match in matches:\n match[1] = identifier_list[match[1]-1]\n\n if match[1] in names:\n if not match[0] in new_matches:\n new_matches.append(match[0])\n else:\n names.append(match[1])\n\n print match # print the identifier names\n # if not match[0] in names:\n # names.append(match[0])\n # if not match[1] in names:\n # names.append(match[1])\n\n # TODO: get the multiple sequences that are similar\n multiple_similar_sequences = []\n for new_match in new_matches:\n multiple_similar_sequences.append(new_match)\n for match in matches:\n if new_match in match:\n for entry in match:\n if not entry in multiple_similar_sequences:\n multiple_similar_sequences.append(entry)\n #\n # print 'Multiple similar sequences: ' + str(multiple_similar_sequences)\n\n # TODO: get the unnessecary entries out\n # for match in matches:\n # print match\n # if match[0] in multiple_similar_sequences:\n # matches.remove(str(match))\n # if match[1] in multiple_similar_sequences:\n # matches.remove(str(match))\n\n matches.append(multiple_similar_sequences)\n\n print 'Matches: ' + str(matches)\n # get the sequence from the identifier name\n handle = open('sequence_result.fasta', 'r')\n for record in SeqIO.parse(handle, 'fasta', IUPAC.unambiguous_dna):\n for match in matches:\n for i in range(len(match)):\n if record.id == match[i]:\n match[i] = record.seq\n\n #print match\n\n handle.close()\n return matches\n\n\ndef createPSSM():\n print \"Start PSSM\"\n\n #sequencelist = sequencelist.replace(\"-\", \".\")\n list = []\n\n for seq_record in SeqIO.parse(\"fastatmp\", \"fasta\", IUPAC.unambiguous_dna):\n list.append(str(seq_record.seq))\n\n #Blast typical sequence\n result_handle = NCBIWWW.qblast(\"blastn\", \"nt\", list[0])\n save_file = open(\"my_blast.xml\", \"w\")\n save_file.write(result_handle.read())\n save_file.close()\n result_handle.close()\n\n #motifs.create(test, alphabet=Gapped(IUPAC.unambiguous_dna))\n m = motifs.create(list, alphabet=Gapped(IUPAC.unambiguous_dna))\n print \"motif created\"\n\n\n pwm = m.counts.normalize(pseudocounts=0.25)\n print \"PWM done\"\n pssm = pwm.log_odds()\n print \"PSSM done\"\n print pssm\n return pssm\n\ndef group_identical_annotations(single_sequence_list, feature):\n print 'Identical annotations with different Sequences are grouped'\n\n featureTypes = {\n 'terminator':{\n 'T0': 'note', 'T1': 'note', 'T2': 'note',\n 'T7': 'note', 'rrnB': 'note', 'tNOS': 'note'\n },\n 'CDS': {\n 'hypothetical protein': 'product', 'bla': 'gene', 'ampR': 'gene',\n 'kanamycin resistance protein': 'product', 'Amp': 'product', 'tetR': 'product',\n 'cat': 'gene', 'green fluorescent protein': 'product', 'neo': 'gene'\n },\n 'protein_bind' :{\n 'lacO':'bound_moiety', 'lac repressor protein':'bound_moiety', 'loxP site cassette':'note',\n 'lac operator':'note'\n },\n 'misc_binding' : {\n 'echinomycin':'bound_moiety','Escherichia coli IHF':'bound_moiety'\n },\n 'misc_recomb' : {\n 'AttR2':'note', 'AttR1':'note', 'FRT':'note', 'attB1':'note','attB2':'note', 'loxM3':'note',\n 'loxP':'note'\n },\n 'LTR' : {},\n 'misc_signal' : {\n 'enterokinase recognition sequence':'note'\n },\n 'enhancer' : {\n 'tranlational enhancer':'note'\n },\n 'mobile_element' : {},\n 'sig_peptide' : {},\n 'oriT' : {\n 'oriT':'note','origin of transfer':'note','IncP origin of transfer':'note'\n },\n 'polyA_signal' : {\n 'HSV':'note', 'SV40':'note','SV40 late polyadenylation signal':'note'\n },\n 'rep_origin' : {\n 'ColE1':'note', 'F1 Ori':'note', 'R6K':'note', 'SV40 origin of replication':'note', 'colE1':'note', 'f1':'note',\n 'oriV':'note', 'pBM1(ColE1)':'note', 'ColE1 pBR322':'note', 'ColE1-derived plasmid replication origin':'note', 'pUC':'note'\n },\n 'primer_bind' : {\n 'F24':'note', 'T7': 'note', 'R24': 'note', 'VF2 forward sequencing primer annealing site (BBa_G00100)': 'note'\n },\n 'rRNA' : {},\n 'mRNA' : {},\n 'tRNA' : {},\n 'promoter' : {\n 'actin 15':'gene', 'bla':'gene', 'ADH1 promoter':'gene', 'CMV promoter':'gene', 'CaMV 35S promoter':'gene',\n 'Plac':'gene', 'SP6 promoter':'gene', 'T3 promoter':'gene','T7 promoter':'gene'\n },\n 'RBS' : {}\n #-10_signal = {}\n #-35_signal = {}\n }\n\n# and resultValue == annotationKey\n\n save_list = []\n print featureTypes[feature]\n counter = 0\n print feature\n for resultKey, resultValue in featureTypes[feature].items():\n counter += 1\n tempSequenceList = single_sequence_list\n tempList = []\n for resultObject in tempSequenceList:\n for annotationKey, annotationValue in resultObject.annotation.items():\n if resultKey == annotationValue[0] and resultValue == annotationKey:\n tempList.append(resultObject)\n save_list.append(tempList)\n print \"*---*\"\n print len(save_list)\n print 'save list: ' + str(save_list)\n return save_list\n\n\n\ndef reduce_to_single_sequences(generated_object, feature):\n \"\"\"\n same sequences + annotations -> count occurences and prepare new list\n :param generated_object: list generator\n :return: list of identical objects\n \"\"\"\n\n featureTypes = {\n 'oriT': ['gene', 'product'], 'polyA_signal': ['note'], 'rep_origin': ['note'],\n 'primer_bind': ['note'], 'rRNA': ['poduct'], 'mRNA': ['gene'], 'tRNA': ['product'],\n 'promoter': ['note'], \"RBS\": ['note', 'gene'], \"-10_signal\": ['note', 'gene'],\n '-35_signal': ['note', 'gene'], 'terminator': ['note'], 'CDS': ['gene', 'product'],\n 'protein_bind': ['note', 'bound_moiety'], 'misc_binding': ['note', 'bound_moiety'],\n 'misc_recomb': ['note'], 'LTR': ['note'], 'misc_signal': ['note'], 'enhancer': ['note'],\n 'mobile_element': ['mobile_element_type', 'note'], 'sig_peptide': ['note']\n }\n\n results = []\n try:\n results.append(generated_object.next())\n except (StopIteration):\n print \"Warning: empty generator. \", feature, \" not found!\"\n return []\n #print results\n\n for resultObject in generated_object:\n counter = 1\n foundMatch = False\n for result in results:\n matchCounter = 0\n for key in featureTypes[feature]:\n if str(resultObject.sequence) == str(result.sequence) \\\n and resultObject.annotation.get(key)==result.annotation.get(key):\n matchCounter += 1\n if matchCounter == len(featureTypes[feature]):\n result.setOccurences()\n foundMatch = True\n if len(results) == counter:\n if foundMatch == False:\n results.append(resultObject)\n counter += 1\n\n return results\n\n\n\n\n# - - - - start of skript - - - -\n# - - - - - - - - - - - - - - - -\njeremyFeatures = ['oriT', 'polyA_signal', 'rep_origin', 'primer_bind', 'rRNA', 'mRNA', 'tRNA']\ndominiks_list = ['promoter', 'RBS', '-10_signal', '-35_signal']\nkevins_list = ['terminator', 'CDS']\nalessandros_list = ['protein_bind', 'misc_binding', 'misc_recomb', 'LTR', 'misc_signal',\n 'enhancer', 'mobile_element', 'sig_peptide']\n\n\ncomplete_list = jeremyFeatures + dominiks_list + kevins_list + alessandros_list\n\nsave_file_object = open(\"list_of_identical_objects.txt\", \"w\")\n\n# Schwellenwert fuer nahezu identische Sequenzen bei der percent identitiy matrix\nschwellenwert = 90.0\nf = open('pssm_results.txt', 'w')\nf.write('')\nf.close()\nresultFile = open('results.txt', 'w')\nresultFile.write('')\nresultFile.close()\nfor feature in kevins_list:\n print 'Feature: ' + feature\n filePath = \"../../files/vectors.gb\"\n # make a list generator with the desired feature and its annotation\n list_generator = generateList(feature, filePath)\n\n # same sequences + annotations -> count occurences and prepare new list\n list_of_identical_objects = reduce_to_single_sequences(list_generator, feature)\n summe = 0\n for object in list_of_identical_objects:\n summe += object.getOccurences()\n #Blast typical sequence\n save_file_object.write(str(object) + \"\\t\" + str(object.getOccurences()) + \"\\n\")\n print(\"Anzahl identischer objekte: \\t\" + str(len(list_of_identical_objects)))\n print(\"Summe aller Objekte: \\t\\t\\t\" + str(summe))\n # 'wichtige Annotation' Sequenzen in Liste speichern und MUSCLE uebergeben\n prepared_list = group_identical_annotations(list_of_identical_objects, feature)\n #prepared_list = list_of_identical_objects\n for entry in prepared_list:\n print str(entry)\n muscle_result = clustering(entry, 1)\n # PIM Auswertung: Sequenzen groesser Schwellenwert (bsp. 95%) rausspeichern. Rueckgabe: Liste von \"fast identische Sequenzen\"\n list_of_near_identical_sequences = pim_evaluation(schwellenwert)\n for sequences in list_of_near_identical_sequences:\n print 'Sequences for further inspection: ' + str(sequences)\n if len(sequences) > 1:\n clustering(sequences, 2)\n pssm_result = createPSSM()\n f = open('pssm_results.txt', 'a')\n f.write(str(pssm_result))\n f.close()\n\nsave_file_object.close()\n"
}
] | 3 |
YoshlikMedia/Braille-reader-bot | https://github.com/YoshlikMedia/Braille-reader-bot | c1131ecc31bf1141520dbdc665540cd134ee43c1 | c351d11d5069f78000c645efdbacd3998b793653 | 9ff34837178d94152d0e78c10ee3efd5fb08fb80 | refs/heads/main | 2023-08-01T12:30:01.303321 | 2021-09-19T12:17:15 | 2021-09-19T12:17:15 | 399,337,592 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6677696108818054,
"alphanum_fraction": 0.6706271767616272,
"avg_line_length": 45.16666793823242,
"blob_id": "870d142dde473d74d050a7c45269b767f00f6852",
"content_id": "c19e8c4453d763bc2eb247c10fe62defbe915ebb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6649,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 144,
"path": "/model/train.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nTrains model using parameters and setting defined in model.params\nResults are stored in local_config.data_path / params.model_name\n\"\"\"\nimport local_config\nimport sys\nsys.path.append(local_config.global_3rd_party)\nfrom collections import OrderedDict\nimport os\nimport torch\nimport ignite\nfrom ignite.engine import Events\nfrom pathlib import Path\n\nimport ovotools.ignite_tools\nimport ovotools.pytorch_tools\nimport ovotools.pytorch\n\nfrom data_utils import data\nfrom model import create_model_retinanet\nfrom model.params import params, settings\nimport model.validate_retinanet as validate_retinanet\n\nif settings.findLR:\n params.model_name += '_findLR'\nparams.save(can_overwrite=settings.can_overwrite)\n\n\nctx = ovotools.pytorch.Context(settings=None, params=params)\n\nmodel, collate_fn, loss = create_model_retinanet.create_model_retinanet(params, device=settings.device)\nif 'load_model_from' in params.keys():\n preloaded_weights = torch.load(Path(local_config.data_path) / params.load_model_from, map_location='cpu')\n model.load_state_dict(preloaded_weights)\n\nctx.net = model\nctx.loss = loss\n\ntrain_loader = data.create_dataloader(params, collate_fn,\n list_file_names=params.data.train_list_file_names, shuffle=True)\nval_loaders = { k: data.create_dataloader(params, collate_fn, list_file_names=v, shuffle=False)\n for k,v in params.data.val_list_file_names.items() }\nprint('data loaded. train:{} batches'.format(len(train_loader)))\nfor k,v in val_loaders.items():\n print(' {}:{} batches'.format(k, len(v)))\n\nctx.optimizer = eval(params.optim)(model.parameters(), **params.optim_params)\n\nmetrics = OrderedDict({\n 'loss': ignite.metrics.Loss(loss.metric('loss'), batch_size=lambda y: params.data.batch_size), # loss calc already called when train\n 'loc': ignite.metrics.Loss(loss.metric('loc'), batch_size=lambda y: params.data.batch_size),\n 'cls': ignite.metrics.Loss(loss.metric('cls'), batch_size=lambda y: params.data.batch_size),\n})\n\neval_metrics = OrderedDict({\n 'loss': ignite.metrics.Loss(loss, batch_size=lambda y: params.data.batch_size), # loss calc must be called when eval\n 'loc': ignite.metrics.Loss(loss.metric('loc'), batch_size=lambda y: params.data.batch_size),\n 'cls': ignite.metrics.Loss(loss.metric('cls'), batch_size=lambda y: params.data.batch_size),\n})\n\ntarget_metric = 'train:loss'\n\ntrainer_metrics = {} if settings.findLR else metrics\neval_loaders = {}\nif settings.findLR:\n eval_loaders['train'] = train_loader\neval_loaders.update(val_loaders)\neval_event = ignite.engine.Events.ITERATION_COMPLETED if settings.findLR else ignite.engine.Events.EPOCH_COMPLETED\neval_duty_cycle = 2 if settings.findLR else 1\ntrain_epochs = params.lr_finder.iters_num*len(train_loader) if settings.findLR else settings.max_epochs\n\ntrainer = ovotools.ignite_tools.create_supervised_trainer(model, ctx.optimizer, loss, metrics=trainer_metrics, device=settings.device)\nevaluator = ignite.engine.create_supervised_evaluator(model, metrics=eval_metrics, device=settings.device)\n\nif settings.findLR:\n best_model_buffer = None\nelse:\n best_model_buffer = ovotools.ignite_tools.BestModelBuffer(ctx.net, 'val:loss', minimize=True, params=ctx.params)\nlog_training_results = ovotools.ignite_tools.LogTrainingResults(evaluator = evaluator,\n loaders_dict = eval_loaders,\n best_model_buffer=best_model_buffer,\n params = params,\n duty_cycles = eval_duty_cycle)\ntrainer.add_event_handler(eval_event, log_training_results, event = eval_event)\n\nif settings.findLR:\n import math\n @trainer.on(Events.ITERATION_STARTED)\n def upd_lr(engine):\n log_lr = params.lr_finder.log_lr_start + (params.lr_finder.log_lr_end - params.lr_finder.log_lr_start) * (engine.state.iteration-1)/params.lr_finder.iters_num\n lr = math.pow(10, log_lr)\n ctx.optimizer.param_groups[0]['lr'] = lr\n engine.state.metrics['lr'] = ctx.optimizer.param_groups[0]['lr']\n if engine.state.iteration > params.lr_finder.iters_num:\n print('done')\n engine.terminate()\nelse:\n if params.lr_scheduler.type == 'clr':\n clr_scheduler = ovotools.ignite_tools.ClrScheduler(train_loader, model, ctx.optimizer, target_metric, params,\n engine=trainer)\n else:\n ctx.create_lr_scheduler()\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def lr_scheduler_step(engine):\n call_params = {'epoch': engine.state.epoch}\n if ctx.params.lr_scheduler.type.split('.')[-1] == 'ReduceLROnPlateau':\n call_params['metrics'] = engine.state.metrics['val_dsbi:loss']\n engine.state.metrics['lr'] = ctx.optimizer.param_groups[0]['lr']\n ctx.lr_scheduler.step(**call_params)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def eval_accuracy(engine):\n if engine.state.epoch % 100 == 1:\n data_set = validate_retinanet.prepare_data(ctx.params.data.val_list_file_names)\n for key, data_list in data_set.items():\n acc_res = validate_retinanet.evaluate_accuracy(os.path.join(ctx.params.get_base_filename(), 'param.txt'),\n model, settings.device, data_list)\n for rk, rv in acc_res.items():\n engine.state.metrics[key+ ':' + rk] = rv\n\n#@trainer.on(Events.EPOCH_COMPLETED)\n#def save_model(engine):\n# if save_every and (engine.state.epoch % save_every) == 0:\n# ovotools.pytorch_tools.save_model(model, params, rel_dir = 'models', filename = '{:05}.t7'.format(engine.state.epoch))\n\ntimer = ovotools.ignite_tools.IgniteTimes(trainer, count_iters = False, measured_events = {\n 'train:time.iter': (trainer, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED),\n 'train:time.epoch': (trainer, Events.EPOCH_STARTED, Events.EPOCH_COMPLETED),\n 'val:time.epoch': (evaluator, Events.EPOCH_STARTED, Events.EPOCH_COMPLETED),\n})\n\ntb_logger = ovotools.ignite_tools.TensorBoardLogger(trainer,params,count_iters=settings.findLR)\ntb_logger.start_server(settings.tensorboard_port, start_it = False)\n\[email protected](Events.ITERATION_COMPLETED)\ndef reset_resources(engine):\n engine.state.batch = None\n engine.state.output = None\n #torch.cuda.empty_cache()\n\ntrainer.run(train_loader, max_epochs = train_epochs)\n\n"
},
{
"alpha_fraction": 0.43475356698036194,
"alphanum_fraction": 0.45826098322868347,
"avg_line_length": 32.42856979370117,
"blob_id": "c1808ad81c03366da472f23dbfa0036bcde44ec3",
"content_id": "41c610c5938787091654678fcb3e6c55b83a1885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7615,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 224,
"path": "/braille_utils/braille.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "global void\nglobal a\nglobal b\nglobal c\nglobal d\nglobal e\nglobal f\nglobal g\nglobal h\nglobal i\nglobal j\nglobal k\nglobal l\nglobal m\nglobal n\nglobal o\nglobal p\nglobal q\nglobal r\nglobal s\nglobal t\nglobal u\nglobal v\nglobal w\nglobal x\nglobal y\nglobal z\n\ncharToArray = {\n \" \" : [[0,0],[0,0],[0,0]],\n \"a\" : [[1,0],[0,0],[0,0]],\n \"b\" : [[1,0],[1,0],[0,0]],\n \"c\" : [[1,1],[0,0],[0,0]],\n \"d\" : [[1,1],[0,1],[0,0]],\n \"e\" : [[1,0],[0,1],[1,0]],\n \"f\" : [[1,1],[1,0],[0,0]],\n \"g\" : [[1,1],[1,1],[0,0]],\n \"h\" : [[1,0],[1,1],[0,0]],\n \"i\" : [[0,1],[1,0],[1,0]],\n \"j\" : [[0,1],[1,1],[0,0]],\n \"k\" : [[1,0],[0,0],[1,0]],\n \"l\" : [[1,0],[1,0],[1,0]],\n \"m\" : [[1,1],[0,0],[1,0]],\n \"n\" : [[1,1],[0,1],[1,0]],\n \"o\" : [[1,0],[0,1],[1,1]],\n \"p\" : [[1,1],[1,0],[1,0]],\n \"q\" : [[1,1],[1,1],[1,0]],\n \"r\" : [[1,0],[1,1],[1,0]],\n \"s\" : [[0,1],[1,0],[1,0]],\n \"t\" : [[0,1],[1,1],[1,0]],\n \"u\" : [[1,0],[0,0],[1,1]],\n \"v\" : [[1,0],[1,0],[1,1]],\n \"w\" : [[0,1],[0,1],[1,1]],\n \"x\" : [[1,1],[0,0],[1,1]],\n \"y\" : [[1,1],[0,1],[1,1]],\n \"z\" : [[1,0],[0,1],[1,1]]\n}\n\nascii_braille = {}\n\nasciicodes = [' ','!','\"','#','$','%','&','','(',')','*','+',',','-','.','/',\n '0','1','2','3','4','5','6','7','8','9',':',';','<','=','>','?','@',\n 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q',\n 'r','s','t','u','v','w','x','y','z','[','\\\\',']','^','_']\n\nbrailles = ['⠀','⠮','⠐','⠼','⠫','⠩','⠯','⠄','⠷','⠾','⠡','⠬','⠠','⠤','⠨','⠌','⠴','⠂','⠆','⠒','⠲','⠢',\n '⠖','⠶','⠦','⠔','⠱','⠰','⠣','⠿','⠜','⠹','⠈','⠁','⠃','⠉','⠙','⠑','⠋','⠛','⠓','⠊','⠚','⠅',\n '⠇','⠍','⠝','⠕','⠏','⠟','⠗','⠎','⠞','⠥','⠧','⠺','⠭','⠽','⠵','⠪','⠳','⠻','⠘','⠸']\n\narrayLength = len(asciicodes)\ncounter = 0\n\nwhile counter < arrayLength:\n ascii_braille[asciicodes[counter]] = brailles[counter]\n counter = counter + 1\n\nletterToImgPath = {\n \"a\": \"images/a.png\",\n \"b\": \"images/b.png\",\n \"c\": \"images/c.png\",\n \"d\": \"images/d.png\",\n \"e\": \"images/e.png\",\n \"f\": \"images/f.png\",\n \"g\": \"images/g.png\",\n \"h\": \"images/h.png\",\n \"i\": \"images/i.png\",\n \"j\": \"images/j.png\",\n \"k\": \"images/k.png\",\n \"l\": \"images/l.png\",\n \"m\": \"images/m.png\",\n \"n\": \"images/n.png\",\n \"o\": \"images/o.png\",\n \"p\": \"images/p.png\",\n \"q\": \"images/q.png\",\n \"r\": \"images/r.png\",\n \"s\": \"images/s.png\",\n \"t\": \"images/t.png\",\n \"u\": \"images/u.png\",\n \"v\": \"images/v.png\",\n \"w\": \"images/w.png\",\n \"x\": \"images/x.png\",\n \"y\": \"images/y.png\",\n \"z\": \"images/z.png\",\n \" \": \"images/void.png\",\n}\n\n# def addImages(list_im):\n# imgs = [PIL.Image.open(i) for i in list_im]\n# min_shape = sorted([(np.sum(i.size), i.size ) for i in imgs])[0][1]\n# imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n# imgs_comb = PIL.Image.fromarray(imgs_comb)\n# imgs_comb.save('output.jpg')\n \n# def writeImage(b_string):\n# images = []\n# for letter in b_string:\n# images.append(letterToImgPath[letter])\n# addImages(images)\n# img = Image.open('output.jpg')\n# img.show()\n\ndef writeText(b_string):\n final_string = ''\n for letters in b_string:\n final_string = final_string + ascii_braille[letters.lower()]\n print(final_string)\n return final_string\n\ndef textToBraille(text):\n final_string = ''\n for char in text:\n char = char.lower()\n if char == \"a\":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"a\"]))\n elif char == \"b\":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"b\"]))\n elif char == \"c\":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"c\"]))\n elif char == \"d\":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"d\"]))\n elif char == \"e\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"e\"]))\n elif char == \"f\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"f\"]))\n elif char == \"g\":\n final_string = final_string + ascii_braille[char] \n print(char + \" \" + str(charToArray[\"g\"]))\n elif char == \"h\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"h\"]))\n elif char == \"i\":\n final_string = final_string + ascii_braille[char] \n print(char + \" \" + str(charToArray[\"i\"]))\n elif char == \"j\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"j\"]))\n elif char == \"k\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"k\"]))\n elif char == \"l\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"l\"]))\n elif char == \"m\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"m\"]))\n elif char == \"n\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"n\"]))\n elif char == \"o\":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"o\"]))\n elif char == \"p\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"p\"]))\n elif char == \"q\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"q\"]))\n elif char == \"r\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"r\"]))\n elif char == \"s\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"s\"]))\n elif char == \"t\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"t\"]))\n elif char == \"u\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"u\"]))\n elif char == \"v\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"v\"]))\n elif char == \"w\":\n final_string = final_string + ascii_braille[char] \n print(char + \" \" + str(charToArray[\"w\"]))\n elif char == \"x\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"x\"]))\n elif char == \"y\": \n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"y\"]))\n elif char == \"z\":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\"z\"]))\n elif char == \" \":\n final_string = final_string + ascii_braille[char]\n print(char + \" \" + str(charToArray[\" \"]))\n print(final_string)\n\ndef brailleToTextArray(array):\n new_chars = ''\n for key in array:\n for a_key in charToArray:\n if charToArray[a_key] == key:\n new_chars = new_chars + str(a_key)\n\n# if __name__==\"__main__\":\n#"
},
{
"alpha_fraction": 0.5982339978218079,
"alphanum_fraction": 0.6048564910888672,
"avg_line_length": 28.225807189941406,
"blob_id": "49c0ec066a1af35967bb5929adba7d3dff3224f4",
"content_id": "2d179f4ecf993f8168793cfa24f27afb403c8c69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 957,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 31,
"path": "/convert_users_list.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\nОдноразовый скрипт для конвертации старого формата all_users.json в новый\n\"\"\"\nimport json\nimport uuid\n\nfn = r\"web_app\\static\\data\\all_users.json\"\n\nwith open(fn, encoding='utf-8') as f:\n all_users = json.load(f)\n\nnew_all_users = dict()\n\nfor user_key, user_dict in all_users.items():\n if \"email\" in user_dict.keys():\n new_key, new_dict = user_key, user_dict\n else:\n assert len(user_dict.keys()) == 1, user_dict\n assert list(user_dict.keys())[0] == \"name\", user_dict\n new_key = uuid.uuid4().hex\n new_dict = {\n \"name\": user_dict[\"name\"],\n \"email\": user_key\n }\n assert new_key not in new_all_users.keys(), (new_key, new_all_users)\n new_all_users[new_key] = new_dict\n\nwith open(fn, 'w', encoding='utf8') as f:\n json.dump(new_all_users, f, sort_keys=True, indent=4, ensure_ascii=False)\n"
},
{
"alpha_fraction": 0.6839622855186462,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 29.285715103149414,
"blob_id": "08e9b18558e8ed0427d06f495553ffc646ec62ff",
"content_id": "2d0022f64484b48d6f28306cea34ce7943ee26d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 7,
"path": "/local_config.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\"\"\"\nLocal settings\n\"\"\"\nfrom pathlib import Path\ndata_path = str(Path(__file__).parent) # root local data directory\nglobal_3rd_party = str(Path(__file__).parent) # root local 3rd_party directory\n"
},
{
"alpha_fraction": 0.6815816760063171,
"alphanum_fraction": 0.7081165313720703,
"avg_line_length": 25.3150691986084,
"blob_id": "63afd3c4975d69b6d67dc283b23d6492c58acc33",
"content_id": "caa9d1030efc35428e7d21795ae7592891c7fca6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2000,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 73,
"path": "/NN/RetinaNet/test_retinanet.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Ïðîâåðêà ðàáîòû íåéðîñåòè äëÿ îáëàñòåé îäèíàêîâûõ òîâàðîâ\n# ðèñîâàíèå êàðòèíîê\n# âû÷èñëåíèå symmetric_best_dice\n\n# In[1]:\n\n\nfrom ovotools.params import AttrDict\nimport sys\nsys.path.append('../..')\nimport local_config\nfrom os.path import join\nmodel_name = 'NN_results/retina_chars_b24228'\nmodel_fn = join(local_config.data_path, model_name)\n\nparams = AttrDict.load(model_fn + '.param.txt', verbose = True)\nmodel_fn += '/models/02500.t7'\n#params.data.net_hw = (416, 416) #(512,768) ###### (1024,1536) #\nparams.data.batch_size = 1 #######\n\nimport torch\nimport ignite\n\ndevice = 'cuda:0'\n\n\n# In[3]:\n\nimport DSBI_invest.data\nimport create_model_retinanet\n\nmodel, collate_fn, loss = create_model_retinanet.create_model_retinanet(params, phase='train', device=device)\nmodel = model.to(device)\nmodel.load_state_dict(torch.load(model_fn))\nmodel.eval()\nprint(\"Model loaded\")\n\ntrain_loader, (val_loader1, val_loader2) = DSBI_invest.data.create_dataloaders(params, collate_fn)\nval_loader1_it = iter(val_loader1)\nbatch = next(val_loader1_it)\ndata, target = ignite.engine._prepare_batch(batch, device=device)\n\nwith torch.no_grad():\n (loc_preds, cls_preds) = model(data.to(device))\n\nloss_val = loss((loc_preds, cls_preds), target)\nloss_val, loss.get_dict()\n\nimport PIL\nimport PIL.ImageDraw\n\nimport numpy as np\ndef TensorToPilImage(tensor, params):\n vx_np = tensor.cpu().numpy().copy()\n vx_np *= np.asarray(params.data.std)[:, np.newaxis, np.newaxis]\n vx_np += np.asarray(params.data.mean)[:, np.newaxis, np.newaxis]\n vx_np = vx_np.transpose(1,2,0)*255\n return PIL.Image.fromarray(vx_np.astype(np.uint8))\n\nimg = TensorToPilImage(data[0], params)\nw,h = img.size\nencoder = loss.encoder\nboxes, labels = encoder.decode(loc_preds[0].cpu().data, cls_preds[0].cpu().data, (w,h))\n\ndraw = PIL.ImageDraw.Draw(img)\nfor box in boxes:\n draw.rectangle(list(box), outline='red')\nimg\n\npass\n\n"
},
{
"alpha_fraction": 0.5409139394760132,
"alphanum_fraction": 0.5515409111976624,
"avg_line_length": 23.05128288269043,
"blob_id": "4f325aab0105dade6e574395ea9091d2bd0c0d4e",
"content_id": "f808c03ea0cb0f6f59545560e4a3c0d0083a1e8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 972,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 39,
"path": "/data_utils/check_labels_are_interpretable.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nСтроит гистрограмму размеров рамок\n\"\"\"\nimport json\nfrom pathlib import Path\nfrom braille_utils import label_tools as lt\n\ndef check_file(file_path):\n with open(file_path) as f:\n data = json.loads(f.read())\n rects = [s[\"points\"] for s in data[\"shapes\"]]\n labels = [s[\"label\"] for s in data[\"shapes\"]]\n n = 0\n for i, lbl in enumerate(labels):\n try:\n lt.human_label_to_int(lbl)\n except Exception as e:\n print(file_path)\n print(e)\n print(lbl, rects[i])\n\n\ndef check(dir, mask=\"\"):\n if mask == \"\":\n mask = \"**/\"\n img_files = list(Path(dir).glob(mask+\"*.json\"))\n for i, file_path in enumerate(img_files):\n if i % 100 == 99:\n print(i, \"/\", len(img_files))\n check_file(file_path)\n\n\n\nif __name__==\"__main__\":\n data_dir = r\"input/test001.jpeg\"\n mask = \"\"\n check(data_dir, mask)\n\n\n\n"
},
{
"alpha_fraction": 0.7984496355056763,
"alphanum_fraction": 0.7984496355056763,
"avg_line_length": 31.25,
"blob_id": "2df45294c553907750491f9c6c29f0f0931c6559",
"content_id": "5e7c08a864192107c0df388f7faf323689978454",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 8,
"path": "/loader.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from aiogram import Bot, Dispatcher, types\nfrom aiogram.contrib.fsm_storage.mongo import MongoStorage\n\nfrom data import config\n\nbot = Bot(token=config.BOT_TOKEN, parse_mode=types.ParseMode.HTML)\nstorage = MongoStorage()\ndp = Dispatcher(bot, storage=storage)\n"
},
{
"alpha_fraction": 0.4441007375717163,
"alphanum_fraction": 0.5183385014533997,
"avg_line_length": 28.0256404876709,
"blob_id": "6a50df77722a813a99a7c38053a4f4e308e8123f",
"content_id": "5376cc860861d0a0e9ee1929c3fc61b3dcb07d65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2312,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 78,
"path": "/model/params.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "import local_config\nfrom ovotools import AttrDict\n\nsettings = AttrDict(\n max_epochs=100000,\n tensorboard_port=6006,\n device='cuda:3',\n findLR=False,\n can_overwrite=False,\n)\n\nparams = AttrDict(\n data_root = local_config.data_path,\n model_name = 'NN_results/dsbi_lay{model_params.num_fpn_layers}',\n data = AttrDict(\n get_points = False,\n class_as_6pt=False, # классификация присутствия каждой точки в рамке отдельно\n batch_size = 12,\n net_hw = (416, 416),\n rect_margin = 0.3, # every of 4 margions to char width\n max_std = 0.1,\n train_list_file_names = [\n #r'DSBI/data/val_li2.txt',\n r'DSBI/data/train_li2.txt',\n ],\n val_list_file_names = {\n 'val' : [r'DSBI/data/val_li2.txt',],\n 'test' : [r'DSBI/data/test_li2.txt',]\n }\n ),\n augmentation = AttrDict(\n img_width_range=( 810, 890, ), # 768*0.8, 1536*1.2\n stretch_limit = 0.1,\n rotate_limit = 5,\n ),\n model = 'retina',\n model_params = AttrDict(\n num_fpn_layers=5,\n encoder_params = AttrDict(\n anchor_areas=[34*55/4,], # [22*22*0.62, 33*33*0.62, 45*45*0.62,], #[8*16., 12*24., 16*32.,], # 34*55/4\n aspect_ratios=[0.62,], # [0.62,], #[1 / 2.,],\n scale_ratios=[1.],\n iuo_fit_thr = 0, # if iou > iuo_fit_thr => rect fits anchor\n iuo_nofit_thr = 0,\n ),\n loss_params=AttrDict(\n class_loss_scale = 1,\n ),\n ),\n #load_model_from = 'NN_results/dsbi_tst1_lay5_083746/models/clr.003.t7', # retina_chars_d58e5f # retina_chars_7e1d4e\n optim = 'torch.optim.Adam',\n optim_params = AttrDict(\n lr=0.0001,\n #momentum=0.9,\n #weight_decay = 0, #0.001,\n #nesterov = False,\n ),\n lr_finder=AttrDict(\n iters_num=200,\n log_lr_start=-5,\n log_lr_end=-1,\n ),\n lr_scheduler=AttrDict(\n type='clr',\n #params=AttrDict(\n # milestones=[5000, 10000,],\n # gamma=0.1,\n #),\n ),\n clr=AttrDict(\n warmup_epochs=10,\n min_lr=1e-5,\n max_lr=0.0002,\n period_epochs=500,\n scale_max_lr=0.95,\n scale_min_lr=0.95,\n ),\n)"
},
{
"alpha_fraction": 0.4729985296726227,
"alphanum_fraction": 0.48938852548599243,
"avg_line_length": 41.88288116455078,
"blob_id": "77b10740af95ec7be8826060740117b4b5eb0e4e",
"content_id": "63048631395442870f61448ba034b64055986530",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4759,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 111,
"path": "/data_utils/dsbi.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nUtils for DSBI dataset (https://github.com/yeluo1994/DSBI)\n\"\"\"\nimport collections\nfrom braille_utils import label_tools as lt\n\nCellInfo = collections.namedtuple('CellInfo', \n ['row', 'col', # row and column in a symbol grid\n 'left', 'top', 'right', 'bottom', # symbol corner coordinates in pixels\n 'label']) # symbol label either like '246' or '010101' format\n\ndef read_txt(file_txt, binary_label = True):\n \"\"\"\n Loads Braille annotation from DSBI annotation txt file\n :param file_txt: filename of txt file\n :param binary_label: return symbol label in binary format, like '010101' (if True),\n or human readable like '246' (if False)\n :return: tuple (\n angle: value from 1st line of annotation file,\n h_lines: list of horizontal lines Y-coordinates,\n v_lines: list of vertical lines X-coordinates,,\n cells: symbols as list of CellInfo\n )\n None, None, None, None for empty annotation\n \"\"\"\n with open(file_txt, 'r') as f:\n l = f.readlines()\n if len(l) < 3:\n return None, None, None, None\n angle = eval(l[0])\n v_lines = list(map(eval, l[1].split(' ')))\n assert len(v_lines)%2 == 0, (file_txt, len(v_lines))\n h_lines = list(map(eval, l[2].split(' ')))\n assert len(h_lines)%3 == 0, (file_txt, len(h_lines))\n cells = []\n for cell_ln in l[3:]:\n cell_nums = list(cell_ln[:-1].split(' ')) # exclude last '\\n'\n assert len(cell_nums) == 8, (file_txt, cell_ln)\n row = eval(cell_nums[0])\n col = eval(cell_nums[1])\n if binary_label:\n label = ''.join(cell_nums[2:])\n else:\n label = ''\n for i, c in enumerate(cell_nums[2:]):\n if c == '1':\n label += str(i+1)\n else:\n assert c == '0', (file_txt, cell_ln, i, c)\n left = v_lines[(col-1)*2]\n right = v_lines[(col-1)*2+1]\n top = h_lines[(row-1)*3]\n bottom = h_lines[(row-1)*3+2]\n cells.append(CellInfo(row=row, col=col,\n left=left, top=top, right=right, bottom=bottom,\n label=label))\n return angle, h_lines, v_lines, cells\n\n\ndef read_DSBI_annotation(label_filename, width, height, rect_margin, get_points):\n \"\"\"\n Loads Braille annotation from DSBI annotation txt file in albumentations format\n :param label_filename: filename of txt file\n :param width: image width\n :param height: image height\n :param rect_margin:\n :param get_points: Points or Symbols mode\n :return:\n List of symbol rects if get_points==False. Each rect is a tuple (left, top, right, bottom, label) where\n left..bottom are in [0,1], label is int in [1..63]. Symbol size is extended to rect_margin*width of symbol\n in every side.\n List of points rects if get_points==True. Each point is a tuple (left, top, right, bottom, label) where\n left..bottom are in [0,1], label is 0. Width and height of point is 2*rect_margin*width of symbol\n \"\"\"\n _, _, _, cells = read_txt(label_filename, binary_label=True)\n if cells is not None:\n if get_points:\n rects = []\n for cl in cells:\n w = int((cl.right - cl.left) * rect_margin)\n h = w\n for i in range(6):\n if cl.label[i] == '1':\n iy = i % 3\n ix = i - iy\n if ix == 0:\n xc = cl.left\n else:\n xc = cl.right\n left, right = xc - w, xc + w\n if iy == 0:\n yc = cl.top\n elif iy == 1:\n yc = (cl.top + cl.bottom) // 2\n else:\n yc = cl.bottom\n top, bottom = yc - h, yc + h\n rects.append([left / width, top / height, right / width, bottom / height, 0])\n else:\n rects = [(\n (c.left - rect_margin * (c.right - c.left)) / width,\n (c.top - rect_margin * (c.right - c.left)) / height,\n (c.right + rect_margin * (c.right - c.left)) / width,\n (c.bottom + rect_margin * (c.right - c.left)) / height,\n lt.label010_to_int(c.label),\n ) for c in cells if c.label != '000000']\n else:\n rects = []\n return rects"
},
{
"alpha_fraction": 0.5913515686988831,
"alphanum_fraction": 0.5999547243118286,
"avg_line_length": 44.07143020629883,
"blob_id": "d13084d04f00d8ea9f1d9c7828f6a796eed371ca",
"content_id": "8eb613df869b5bd51a1718a69177de52179fd24a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4475,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 98,
"path": "/model/create_model_retinanet.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "import os\nimport torch\nimport numpy as np\n\nfrom pytorch_retinanet.loss import FocalLoss\nfrom pytorch_retinanet.retinanet import RetinaNet\nfrom pytorch_retinanet.encoder import DataEncoder\n\nimport local_config\nfrom braille_utils import label_tools\n\ndef create_model_retinanet(params, device):\n '''\n Creates model and auxiliary functions\n :param params: OvoTools.AttrDict with parameters\n :param device: 'cuda'/'cpu'\n :return: model, detection_collate function, loss function\n '''\n use_multiple_class_groups = params.data.get('class_as_6pt', False)\n num_classes = 1 if params.data.get_points else ([1]*6 if use_multiple_class_groups else 64)\n encoder = DataEncoder(**params.model_params.encoder_params)\n model = RetinaNet(num_layers=encoder.num_layers(), num_anchors=encoder.num_anchors(),\n num_classes=num_classes,\n num_fpn_layers=params.model_params.get('num_fpn_layers', 0)).to(device)\n retina_loss = FocalLoss(num_classes=num_classes, **params.model_params.get('loss_params', dict()))\n\n\n def detection_collate(batch):\n '''\n :param batch: list of (tb image(CHW float), [(left, top, right, bottom, class),...]) сcoords in [0,1], extra_params\n :return: batch: ( images (BCNHW), ( encoded_rects, encoded_labels ) )\n copied from RetinaNet, but a) accepts rects as input, b) returns (x,y) where y = (encoded_rects, encoded_labels)\n '''\n\n # t = [b for b in batch if b[1].shape[0]==0]\n # if len(t):\n # pass\n\n #device = torch.device('cpu') # commented to use settings.device\n\n boxes = [torch.tensor(b[1][:, :4], dtype = torch.float32, device=device)\n *torch.tensor(params.data.net_hw[::-1]*2, dtype = torch.float32, device=device) for b in batch]\n labels = [torch.tensor(b[1][:, 4], dtype = torch.long, device=device) for b in batch]\n if params.data.get_points:\n labels = [torch.tensor([0]*len(lb), dtype = torch.long, device=device) for lb in labels]\n elif use_multiple_class_groups:\n # классы нумеруются с 0, отсутствие класса = -1, далее в encode cls_targets=1+labels\n labels = [torch.tensor([[int(ch)-1 for ch in label_tools.int_to_label010(int_lbl.item())] for int_lbl in lb],\n dtype=torch.long, device=device) for lb in labels]\n\n original_images = [b[3] for b in batch if len(b)>3] # batch contains augmented image if not in train mode\n\n imgs = [x[0] for x in batch]\n calc_cls_mask = torch.tensor([b[2].get('calc_cls', True) for b in batch],\n dtype=torch.bool,\n device=device)\n\n h, w = tuple(params.data.net_hw)\n num_imgs = len(batch)\n inputs = torch.zeros(num_imgs, 3, h, w).to(imgs[0])\n\n loc_targets = []\n cls_targets = []\n for i in range(num_imgs):\n inputs[i] = imgs[i]\n labels_i = labels[i]\n if use_multiple_class_groups and len(labels_i.shape) != 2: # it can happen if no labels are on image\n labels_i = labels_i.reshape((0, len(num_classes)))\n loc_target, cls_target, max_ious = encoder.encode(boxes[i], labels_i, input_size=(w,h))\n loc_targets.append(loc_target)\n cls_targets.append(cls_target)\n if original_images: # inference mode\n return inputs, ( torch.stack(loc_targets), torch.stack(cls_targets), calc_cls_mask), original_images\n else:\n return inputs, (torch.stack(loc_targets), torch.stack(cls_targets), calc_cls_mask)\n\n class Loss:\n def __init__(self):\n self.encoder = encoder\n pass\n def __call__(self, pred, targets):\n loc_preds, cls_preds = pred\n loc_targets, cls_targets, calc_cls_mask = targets\n if calc_cls_mask.min(): # Ничего не пропускаем\n calc_cls_mask = None\n loss = retina_loss(loc_preds, loc_targets, cls_preds, cls_targets, cls_calc_mask=calc_cls_mask)\n return loss\n def get_dict(self, *kargs, **kwargs):\n return retina_loss.loss_dict\n def metric(self, key):\n def call(*kargs, **kwargs):\n return retina_loss.loss_dict[key]\n return call\n\n return model, detection_collate, Loss()\n\nif __name__ == '__main__':\n pass\n"
},
{
"alpha_fraction": 0.5242744088172913,
"alphanum_fraction": 0.5261213779449463,
"avg_line_length": 53.14285659790039,
"blob_id": "57f86c3db74ec1d57f0df34e670c3ddaca008568",
"content_id": "4d71f84fdb21a27f3de2a0155e2afcc327ffb05d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3790,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 70,
"path": "/run_local.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nLocal application for Brailler Reader bot local app\n\"\"\"\nimport argparse\nimport os\nfrom pathlib import Path\n\nimport local_config\nimport model.infer_retinanet as infer_retinanet\n\nmodel_weights = 'model.t7'\n\nparser = argparse.ArgumentParser(description='Angelina Braille Reader: optical Braille text recognizer .')\n\nparser.add_argument('input', type=str, help='File(s) to be processed: image, pdf or zip file or directory name')\nparser.add_argument('results_dir', nargs='?', type=str, help='(Optional) output directory. If not specified, results are placed at input location')\nparser.add_argument('-l', '--lang', type=str, default='RU', help='(Optional) Document language (RU, EN, LV, GR, UZ or UZL). If not specified, is RU')\nparser.add_argument('-o', '--orient', action='store_false', help=\"Don't find orientation, use original file orientation\")\nparser.add_argument('-2', dest='two', action='store_true', help=\"Process 2 sides\")\n\nargs = parser.parse_args()\n\nif not Path(args.input).exists():\n print('input file/path does not exist: ' + args.input)\n exit()\n\nrecognizer = infer_retinanet.BrailleInference(\n params_fn=os.path.join(local_config.data_path, 'weights', 'param.txt'),\n model_weights_fn=os.path.join(local_config.data_path, 'weights', model_weights),\n create_script=None)\n\nif Path(args.input).is_dir():\n results_dir = args.results_dir or args.input\n recognizer.process_dir_and_save(str(Path(args.input)/'**'/'*.*'), results_dir,\n lang=args.lang, extra_info=None,\n draw_refined=recognizer.DRAW_NONE,\n remove_labeled_from_filename=False,\n find_orientation=args.orient,\n align_results=True,\n process_2_sides=args.two,\n repeat_on_aligned=False,\n save_development_info=False)\nelse:\n results_dir = args.results_dir or Path(args.input).parent\n if Path(args.input).suffix == '.zip':\n recognizer.process_archive_and_save(args.input, results_dir,\n lang=args.lang, extra_info=None,\n draw_refined=recognizer.DRAW_NONE,\n remove_labeled_from_filename=False,\n find_orientation=args.orient,\n align_results=True,\n process_2_sides=args.two,\n repeat_on_aligned=False,\n save_development_info=False)\n elif Path(args.input).suffix in ('.jpg', '.jpe', '.jpeg', '.png', '.gif', '.svg', '.bmp'):\n recognizer.run_and_save(args.input, results_dir, target_stem=None,\n lang=args.lang, extra_info=None,\n draw_refined=recognizer.DRAW_NONE,\n remove_labeled_from_filename=False,\n find_orientation=args.orient,\n align_results=True,\n process_2_sides=args.two,\n repeat_on_aligned=False,\n save_development_info=False)\n else:\n print('Incorrect file extention: ' + Path(args.input).suffix + ' . Only images, .pdf and .zip files allowed')\n exit()\nprint('Done. Results are saved in ' + str(results_dir))\n"
},
{
"alpha_fraction": 0.7022900581359863,
"alphanum_fraction": 0.7022900581359863,
"avg_line_length": 22.81818199157715,
"blob_id": "308ec40dd6cadeccc2604dcc4fce264b690eea9d",
"content_id": "d493899bba02271fcdb3e485cb62e2ad2189a66f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 11,
"path": "/data/config.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from environs import Env\n\n# Теперь используем вместо библиотеки python-dotenv библиотеку environs\nenv = Env()\nenv.read_env()\n\nBOT_TOKEN = env.str(\"BOT_TOKEN\")\nADMINS = env.list(\"ADMINS\")\nIP = env.str(\"ip\")\nAPP_ID = env.str(\"APP_ID\")\nAPP_KEY = env.str(\"APP_KEY\")\n"
},
{
"alpha_fraction": 0.7707006335258484,
"alphanum_fraction": 0.7707006335258484,
"avg_line_length": 35.94117736816406,
"blob_id": "7b144158c9e7b4476795e8f5e871f8b96a6631ab",
"content_id": "1ac70555e7d3835d863e6bd412d7249ed6b027a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 17,
"path": "/handlers/users/GetSettings.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from aiogram import types\nfrom aiogram.dispatcher import FSMContext\n\nfrom keyboards.default import main_menu, set_settings\nfrom loader import dp\nfrom states.UserStats import Form\n\n\[email protected]_handler(text=\"Braille -> Text\")\nasync def set_settings(msg: types.Message):\n await msg.answer(\"Braille alifbosida yozilgan rasmni yuborish uchun bosing!\", reply_markup=main_menu)\n await Form.GetPhoto.set()\n\[email protected]_handler(text=\"Text -> Braille\")\nasync def set_settings(msg: types.Message):\n await msg.answer(\"Braillega o'tkazish kerak bo'lgan rasmni yuboring!\", reply_markup=main_menu)\n await Form.TextToBraille.set()\n"
},
{
"alpha_fraction": 0.6397393941879272,
"alphanum_fraction": 0.6416938304901123,
"avg_line_length": 39.9466667175293,
"blob_id": "c9de32cb71b28c5cb1540da216a7a64d1d016024",
"content_id": "b323d365d1b549f355a6208c8a68d6dded299ed1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3170,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 75,
"path": "/handlers/users/SendPhoto.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom uuid import uuid4\nimport os\n\nfrom keyboards.default import main_menu, set_settings\nfrom loader import dp, bot\nfrom states.UserStats import Form\nfrom braille_utils.braille import writeText\nimport requests\nfrom data import mathpix\nimport json\n\[email protected]_handler(text=\"📝Отправить картинку\", state=Form.GetPhoto)\nasync def get_photo_message(msg: types.Message):\n await msg.answer(\"Отправить изображение Брайля\", reply_markup=types.ReplyKeyboardRemove())\n\[email protected]_handler(content_types='photo', state=Form.GetPhoto)\nasync def get_photo(message: types.Message, state: FSMContext):\n text = uuid4()\n\n await message.photo[-1].download('input/{}.jpg'.format(text))\n await message.answer(\"Фото принято\\n\\n\"\n \"Подождите пожалуйста ...\")\n os.system('pwd')\n os.system('python run_local.py -l EN -o input/{}.jpg output'.format(text))\n\n with open(\"output/{}.marked.jpg\".format(text), 'rb') as photo:\n with open(\"output/{}.marked.txt\".format(text), 'r') as txt:\n marc = txt.readlines()\n listToStr = ' '.join(map(str, marc))\n await message.answer_photo(photo=photo, caption=f\"<code>{listToStr}</code>\", reply_markup=main_menu)\n\n with open(\"output/{}.marked.brl\".format(text), 'rb') as brl:\n await message.answer_document(brl)\n\n await state.finish()\n\[email protected]_handler(text=\"📝Отправить картинку\", state=Form.TextToBraille)\nasync def get_photo_message(msg: types.Message):\n await msg.answer(\"Rasm yuboring!\", reply_markup=types.ReplyKeyboardRemove())\n\[email protected]_handler(content_types='photo', state=Form.TextToBraille)\nasync def get_photo(message: types.Message, state: FSMContext):\n text = uuid4()\n await message.photo[-1].download('input/{}.jpg'.format(text))\n r = mathpix.latex({\n 'src': mathpix.image_uri('input/{}.jpg'.format(text)),\n 'ocr': ['math', 'text'],\n 'skip_recrop': True,\n 'formats': ['text', 'latex_styled', 'asciimath', 'mathml'],\n 'format_options': {\n 'text': {\n 'transforms': ['rm_spaces', 'rm_newlines'],\n 'math_delims': ['$', '$']\n },\n 'latex_styled': {'transforms': ['rm_spaces']}\n }\n })\n\n print(\"\\nResult object: \\n{}\".format(json.dumps(r, indent=4, sort_keys=True)))\n with open(f\"output/{text}.brl\", 'w+') as f:\n f.writelines(writeText(r['asciimath']))\n with open(f\"output/{text}.brl\", 'rb') as f:\n send_text = f\"<b>Photo text:</b> <code>{r['asciimath']}</code>\\n\\n\"\n send_text += f\"<b>Latex style</b>: <code>{r['latex_styled']}</code>\\n\\n\"\n send_text += f\"<b>Braille text:</b> <code>{writeText(r['asciimath'])}</code>\\n\"\n\n await message.answer_document(f, caption=send_text, reply_markup=main_menu)\n\n\[email protected]_handler(text=\"↪️ Ortga qaytish\", state='*')\nasync def back_menu(msg: types.Message, state: FSMContext):\n await msg.answer(\"Iltimos tanlang\", reply_markup=set_settings)\n await state.finish()"
},
{
"alpha_fraction": 0.5984053611755371,
"alphanum_fraction": 0.6294586658477783,
"avg_line_length": 31.20270347595215,
"blob_id": "71a9640ffba86197354ec95bb73cd4fc005c8ef0",
"content_id": "bda964128e99675cb3b6ca4a0548ba713cf129c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2383,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 74,
"path": "/DSBI_invest/debug_data.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from ovotools.params import AttrDict\nimport sys\nsys.path.append('..')\nsys.path.append('../NN/RetinaNet')\nimport local_config\nfrom os.path import join\nmodel_name = 'NN_results/retina_points_cde773'\nmodel_fn = join(local_config.data_path, model_name)\n\nparams = AttrDict.load(model_fn + '.param.txt', verbose = True)\nparams.data.net_hw = (128,128,) #(512,768) ###### (1024,1536) #\nparams.data.batch_size = 1 #######\n\nparams.data.get_points = False\nparams.model_params.encoder_params.anchor_areas=[5 * 5., ] # 6 * 6., 10 * 10.,\nparams.model_params.encoder_params.iuo_fit_thr = 0 # if iou > iuo_fit_thr => rect fits anchor\nparams.model_params.encoder_params.iuo_nofit_thr = 0\n\nparams.augmentation = AttrDict(\n img_width_range=(1100, 1100),\n stretch_limit = 0,\n rotate_limit=0,\n)\n\nimport ignite\n\n# device = 'cuda:0'\ndevice = 'cpu'\n\n# In[3]:\n\nimport torch\nimport DSBI_invest.data\nimport create_model_retinanet\n\nmodel, collate_fn, loss = create_model_retinanet.create_model_retinanet(params, phase='train', device=device)\nmodel = None\n\ntrain_loader, (val_loader1, val_loader2) = DSBI_invest.data.create_dataloaders(params, collate_fn,\n data_dir=r'D:\\Programming\\Braille\\Data\\My', fn_suffix = '', mode='inference',\n #mode = 'debug',\n verbose = 2)\nval_loader1_it = iter(train_loader)\n\nimport PIL\nimport PIL.ImageDraw\nimport numpy as np\ndef TensorToPilImage(tensor, params):\n std = (0.2,) #params.data.std\n mean = (0.5,) #params.data.mean\n vx_np = tensor.cpu().numpy().copy()\n vx_np *= np.asarray(std)[:, np.newaxis, np.newaxis]\n vx_np += np.asarray(mean)[:, np.newaxis, np.newaxis]\n vx_np = vx_np.transpose(1,2,0)*255\n return PIL.Image.fromarray(vx_np.astype(np.uint8))\n\nencoder = loss.encoder\n\nbatch = next(val_loader1_it)\ndata, target = ignite.engine._prepare_batch(batch, device=device)\n\ncls_thresh = 0.6\nnms_thresh = 0\n\nimg = TensorToPilImage(data[0], params)\nw,h = img.size\n\nlabels = target[1][0].clamp(min=0)\nly = torch.eye(65, device = labels.device) # [D,D]\ncls_preds = ly[labels][:,1:]\n\nboxest, labelst, scores = encoder.decode(target[0][0].cpu().data, cls_preds, (w,h),\n cls_thresh = cls_thresh, nms_thresh = nms_thresh)\nprint(len(boxest))\n"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 30,
"blob_id": "14a92674a832732b64f96367dbb2c3dfc5e2aea5",
"content_id": "e7a86438c195ab423e93693b7fb988cc18f666a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/keyboards/inline/__init__.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from .set_lang import set_lang\n"
},
{
"alpha_fraction": 0.48010361194610596,
"alphanum_fraction": 0.5003185272216797,
"avg_line_length": 38.977928161621094,
"blob_id": "f49466be9d48429a11c005416f568f745078b8b8",
"content_id": "bbf500952332917c180b050cfdbd4cb7fa6139ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23653,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 589,
"path": "/model/validate_retinanet.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nevaluate levenshtein distance as recognition error for dataset using various model(s)\n\"\"\"\n\n# Для отладки\ninference_width = 850\nverbose = 0\n\nmodels = [\n #('NN_results/dsbi_tst_as_fcdca3_c63909', 'models/clr.099.t7'),\n #\n ('NN_results/dsbi_lay3_c4ca62', 'models/clr.005.t7'),\n ('NN_results/dsbi_lay3_c4ca62', 'models/clr.006.t7'),\n]\n\nmodel_dirs = [\n]\n\ndatasets = {\n # 'DSBI_train': [\n # r'DSBI\\data\\train.txt',\n # ],\n # 'DSBI_test': [\n # r'DSBI\\data\\test.txt',\n # ],\n 'val': [r'DSBI/data/val_li2.txt', ],\n 'test': [r'DSBI/data/test_li2.txt', ],\n}\n\nlang = 'RU'\n\nimport os\nimport sys\nimport Levenshtein\nfrom pathlib import Path\nimport PIL\nimport torch\nsys.path.append(r'../..')\nsys.path.append('../NN/RetinaNet')\nimport local_config\nimport data_utils.data as data\nimport data_utils.dsbi as dsbi\nimport braille_utils.postprocess as postprocess\nimport model.infer_retinanet as infer_retinanet\nfrom braille_utils import label_tools\n\nrect_margin=0.3\n\nfor md in model_dirs:\n models += [\n (str(md[0]), str(Path('models')/m.name))\n for m in (Path(local_config.data_path)/md[0]).glob(md[1])\n ]\n\ndef prepare_data(datasets=datasets):\n \"\"\"\n data (datasets defined above as global) -> dict: key - list of dict (image_fn\":full image filename, \"gt_text\": groundtruth pseudotext, \"gt_rects\": groundtruth rects + label 0..64)\n :return:\n \"\"\"\n res_dict = dict()\n for key, list_file_names in datasets.items():\n data_list = list()\n res_dict[key] = data_list\n for list_file_name in list_file_names:\n list_file = os.path.join(local_config.data_path, list_file_name)\n data_dir = os.path.dirname(list_file)\n with open(list_file, 'r') as f:\n files = f.readlines()\n for fn in files:\n if fn[-1] == '\\n':\n fn = fn[:-1]\n fn = fn.replace('\\\\', '/')\n full_fn = os.path.join(data_dir, fn)\n if os.path.isfile(full_fn):\n rects = None\n lbl_fn = full_fn.rsplit('.', 1)[0] + '.json'\n if os.path.isfile(lbl_fn):\n rects = data.read_LabelMe_annotation(label_filename=lbl_fn, get_points=False)\n else:\n lbl_fn = full_fn.rsplit('.', 1)[0] + '.txt'\n if os.path.isfile(lbl_fn):\n img = PIL.Image.open(full_fn)\n rects = dsbi.read_DSBI_annotation(label_filename=lbl_fn,\n width=img.width,\n height=img.height,\n rect_margin=rect_margin,\n get_points=False\n )\n else:\n full_fn = full_fn.rsplit('.', 1)[0] + '+recto.jpg'\n lbl_fn = full_fn.rsplit('.', 1)[0] + '.txt'\n if os.path.isfile(lbl_fn):\n img = PIL.Image.open(full_fn)\n rects = dsbi.read_DSBI_annotation(label_filename=lbl_fn,\n width=img.width,\n height=img.height,\n rect_margin=rect_margin,\n get_points=False)\n if rects is not None:\n boxes = [r[:4] for r in rects]\n labels = [r[4] for r in rects]\n lines = postprocess.boxes_to_lines(boxes, labels, lang=lang)\n gt_text = lines_to_pseudotext(lines)\n data_list.append({\"image_fn\":full_fn, \"gt_text\": gt_text, \"gt_rects\": rects})\n return res_dict\n\n\ndef label_to_pseudochar(label):\n \"\"\"\n int (0..63) - str ('0' .. 'o')\n \"\"\"\n return chr(ord('0') + label)\n\n\ndef lines_to_pseudotext(lines):\n \"\"\"\n lines (list of postprocess.Line) -> pseudotext (multiline '\\n' delimetered, int labels converted to pdeudo chars)\n \"\"\"\n out_text = []\n for ln in lines:\n if ln.has_space_before:\n out_text.append('')\n s = ''\n for ch in ln.chars:\n s += ' ' * ch.spaces_before + label_to_pseudochar(ch.label)\n out_text.append(s)\n return '\\n'.join(out_text)\n\n\ndef pseudo_char_to_label010(ch):\n lbl = ord(ch) - ord('0')\n label_tools.validate_int(lbl)\n label010 = label_tools.int_to_label010(lbl)\n return label010\n\n\ndef count_dots_lbl(lbl):\n n = 0\n label010 = label_tools.int_to_label010(lbl)\n for c01 in label010:\n if c01 == '1':\n n += 1\n else:\n assert c01 == '0'\n return n\n\ndef count_dots_str(s):\n n = 0\n for ch in s:\n if ch in \" \\n\":\n continue\n label010 = pseudo_char_to_label010(ch)\n for c01 in label010:\n if c01 == '1':\n n += 1\n else:\n assert c01 == '0'\n return n\n\n\ndef dot_metrics(res, gt):\n tp = 0\n fp = 0\n fn = 0\n opcodes = Levenshtein.opcodes(res, gt)\n for op, i1, i2, j1, j2 in opcodes:\n if op == 'delete':\n fp += count_dots_str(res[i1:i2])\n elif op == 'insert':\n fn += count_dots_str(gt[j1:j2])\n elif op == 'equal':\n tp += count_dots_str(res[i1:i2])\n elif op == 'replace':\n res_substr = res[i1:i2].replace(\" \", \"\").replace(\"\\n\", \"\")\n gt_substr = gt[j1:j2].replace(\" \", \"\").replace(\"\\n\", \"\")\n d = len(res_substr) - len(gt_substr)\n if d > 0:\n fp += count_dots_str(res_substr[-d:])\n res_substr = res_substr[:-d]\n elif d < 0:\n fn += count_dots_str(gt_substr[d:])\n gt_substr = gt_substr[:d]\n assert len(res_substr) == len(gt_substr)\n for i, res_i in enumerate(res_substr):\n res010 = pseudo_char_to_label010(res_i)\n gt010 = pseudo_char_to_label010(gt_substr[i])\n for p in range(6):\n if res010[p] == '1' and gt010[p] == '0':\n fp += 1\n elif res010[p] == '0' and gt010[p] == '1':\n fn += 1\n elif res010[p] == '1' and gt010[p] == '1':\n tp += 1\n else:\n raise Exception(\"incorrect operation \" + op)\n\n return tp, fp, fn\n\n\ndef filter_lonely_rects(boxes, labels, img):\n dx_to_h = 2.35 # расстояние от края до центра 3го символа\n res_boxes = []\n res_labels = []\n filtered = []\n for i in range(len(boxes)):\n box = boxes[i]\n cy = (box[1] + box[3])/2\n dx = (box[3]-box[1])*dx_to_h\n for j in range(len(boxes)):\n if i == j:\n continue\n box2 = boxes[j]\n if (box2[0] < box[2] + dx) and (box2[2] > box[0] - dx) and (box2[1] < cy) and (box2[3] > cy):\n res_boxes.append(boxes[i])\n res_labels.append(labels[i])\n break\n else:\n filtered.append(box)\n # if filtered:\n # draw = PIL.ImageDraw.Draw(img)\n # for b in filtered:\n # draw.rectangle(b, fill=\"red\")\n # img.show()\n\n return res_boxes, res_labels\n\ndef dot_metrics_rects(boxes, labels, gt_rects, image_wh, img, do_filter_lonely_rects):\n if do_filter_lonely_rects:\n boxes, labels = filter_lonely_rects(boxes, labels, img)\n gt_labels = [r[4] for r in gt_rects]\n gt_rec_labels = [-1] * len(gt_rects) # recognized label for gt, -1 - missed\n rec_is_false = [1] * len(labels) # recognized is false\n\n if len(gt_rects) and len(labels):\n boxes = torch.tensor(boxes)\n gt_boxes = torch.tensor([r[:4] for r in gt_rects], dtype=torch.float32) * torch.tensor([image_wh[0], image_wh[1], image_wh[0], image_wh[1]])\n\n # Для отладки\n # labels = torch.tensor(labels)\n # gt_labels = torch.tensor(gt_labels)\n #\n # _, rec_order = torch.sort(boxes[:, 1], dim=0)\n # boxes = boxes[rec_order][:15]\n # labels = labels[rec_order][:15]\n # _, gt_order = torch.sort(gt_boxes[:, 1], dim=0)\n # gt_boxes = gt_boxes[gt_order][:15]\n # gt_labels = gt_labels[gt_order][:15]\n #\n # _, rec_order = torch.sort(labels, dim=0)\n # boxes = boxes[rec_order]\n # labels = labels[rec_order]\n # _, gt_order = torch.sort(-gt_labels, dim=0)\n # gt_boxes = gt_boxes[gt_order]\n # gt_labels = gt_labels[gt_order]\n #\n # labels = torch.tensor(labels)\n # gt_labels = torch.tensor(gt_labels)\n\n areas = (boxes[:, 2] - boxes[:, 0])*(boxes[:, 3] - boxes[:, 1])\n gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0])*(gt_boxes[:, 3] - gt_boxes[:, 1])\n x1 = torch.max(gt_boxes[:, 0].unsqueeze(1), boxes[:, 0].unsqueeze(0))\n y1 = torch.max(gt_boxes[:, 1].unsqueeze(1), boxes[:, 1].unsqueeze(0))\n x2 = torch.min(gt_boxes[:, 2].unsqueeze(1), boxes[:, 2].unsqueeze(0))\n y2 = torch.min(gt_boxes[:, 3].unsqueeze(1), boxes[:, 3].unsqueeze(0))\n intersect_area = (x2-x1).clamp(min=0)*(y2-y1).clamp(min=0)\n iou = intersect_area / (gt_areas.unsqueeze(1) + areas.unsqueeze(0) - intersect_area)\n for gt_i in range(len(gt_labels)):\n rec_i = iou[gt_i, :].argmax()\n if iou[gt_i, rec_i] > 0:\n gt_i2 = iou[:, rec_i].argmax()\n if gt_i2 == gt_i:\n gt_rec_labels[gt_i] = labels[rec_i]\n rec_is_false[rec_i] = 0\n\n tp = 0\n fp = 0\n fn = 0\n for gt_label, rec_label in zip(gt_labels, gt_rec_labels):\n if rec_label == -1:\n fn += count_dots_lbl(gt_label)\n else:\n res010 = label_tools.int_to_label010(rec_label)\n gt010 = label_tools.int_to_label010(gt_label)\n for p in range(6):\n if res010[p] == '1' and gt010[p] == '0':\n fp += 1\n elif res010[p] == '0' and gt010[p] == '1':\n fn += 1\n elif res010[p] == '1' and gt010[p] == '1':\n tp += 1\n for label, is_false in zip(labels, rec_is_false):\n if is_false:\n fp += count_dots_lbl(label)\n return tp, fp, fn\n\n\ndef char_metrics_rects(boxes, labels, gt_rects, image_wh, img, do_filter_lonely_rects):\n if do_filter_lonely_rects:\n boxes, labels = filter_lonely_rects(boxes, labels, img)\n gt_labels = [r[4] for r in gt_rects]\n gt_is_correct = [0] * len(gt_rects) # recognized label for gt, -1 - missed\n rec_is_false = [1] * len(labels) # recognized is false\n\n tp = 0\n fp = 0\n fn = 0\n if len(gt_rects) and len(labels):\n boxes = torch.tensor(boxes)\n gt_boxes = torch.tensor([r[:4] for r in gt_rects], dtype=torch.float32) * torch.tensor([image_wh[0], image_wh[1], image_wh[0], image_wh[1]])\n\n # Для отладки\n # labels = torch.tensor(labels)\n # gt_labels = torch.tensor(gt_labels)\n #\n # _, rec_order = torch.sort(boxes[:, 1], dim=0)\n # boxes = boxes[rec_order][:15]\n # labels = labels[rec_order][:15]\n # _, gt_order = torch.sort(gt_boxes[:, 1], dim=0)\n # gt_boxes = gt_boxes[gt_order][:15]\n # gt_labels = gt_labels[gt_order][:15]\n #\n # _, rec_order = torch.sort(labels, dim=0)\n # boxes = boxes[rec_order]\n # labels = labels[rec_order]\n # _, gt_order = torch.sort(-gt_labels, dim=0)\n # gt_boxes = gt_boxes[gt_order]\n # gt_labels = gt_labels[gt_order]\n #\n # labels = torch.tensor(labels)\n # gt_labels = torch.tensor(gt_labels)\n\n areas = (boxes[:, 2] - boxes[:, 0])*(boxes[:, 3] - boxes[:, 1])\n gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0])*(gt_boxes[:, 3] - gt_boxes[:, 1])\n x1 = torch.max(gt_boxes[:, 0].unsqueeze(1), boxes[:, 0].unsqueeze(0))\n y1 = torch.max(gt_boxes[:, 1].unsqueeze(1), boxes[:, 1].unsqueeze(0))\n x2 = torch.min(gt_boxes[:, 2].unsqueeze(1), boxes[:, 2].unsqueeze(0))\n y2 = torch.min(gt_boxes[:, 3].unsqueeze(1), boxes[:, 3].unsqueeze(0))\n intersect_area = (x2-x1).clamp(min=0)*(y2-y1).clamp(min=0)\n iou = intersect_area / (gt_areas.unsqueeze(1) + areas.unsqueeze(0) - intersect_area)\n for gt_i in range(len(gt_labels)):\n rec_i = iou[gt_i, :].argmax()\n if iou[gt_i, rec_i] > 0.5:\n if labels[rec_i] == gt_labels[gt_i]:\n gt_is_correct[gt_i] = 1\n for rec_i in range(len(labels)):\n gt_i = iou[:, rec_i].argmax()\n if iou[gt_i, rec_i] > 0.5:\n if labels[rec_i] == gt_labels[gt_i]:\n rec_is_false[rec_i] = 0\n tp = sum(gt_is_correct)\n fp = sum(rec_is_false)\n fn = len(gt_is_correct) - tp\n\n # if fp or fn:\n # draw = PIL.ImageDraw.Draw(img)\n # for i, is_correct in enumerate(gt_is_correct):\n # if not is_correct:\n # draw.rectangle(gt_boxes[i].tolist(), outline=\"red\")\n # draw.text((gt_boxes[i][0]-20, gt_boxes[i][3]), label_tools.int_to_label123(gt_labels[i]), fill=\"red\")\n # else:\n # draw.rectangle(gt_boxes[i].tolist(), outline=\"green\")\n # for i, is_false in enumerate(rec_is_false):\n # if is_false:\n # draw.rectangle(boxes[i].tolist(), outline=\"blue\")\n # draw.text((boxes[i][0]+20, boxes[i][3]), label_tools.int_to_label123(labels[i]), fill=\"blue\")\n # img.show()\n\n return tp, fp, fn\n\n\ndef validate_model(recognizer, data_list, do_filter_lonely_rects, metrics_for_lines = False):\n \"\"\"\n :param recognizer: infer_retinanet.BrailleInference instance\n :param data_list: list of (image filename, groundtruth pseudotext)\n :return: (<distance> avg. by documents, <distance> avg. by char, <<distance> avg. by char> avg. by documents>)\n \"\"\"\n sum_d = 0\n sum_d1 = 0.\n sum_len = 0\n # по тексту\n tp = 0\n fp = 0\n fn = 0\n # по rect\n tp_r = 0\n fp_r = 0\n fn_r = 0\n\n # по символам\n tp_c = 0\n fp_c = 0\n fn_c = 0\n\n for gt_dict in data_list:\n img_fn, gt_text, gt_rects = gt_dict['image_fn'], gt_dict['gt_text'], gt_dict['gt_rects']\n res_dict = recognizer.run(img_fn,\n lang=lang,\n draw_refined=infer_retinanet.BrailleInference.DRAW_NONE,\n find_orientation=False,\n process_2_sides=False,\n align_results=False,\n repeat_on_aligned=False,\n gt_rects=gt_rects)\n\n lines = res_dict['lines']\n if do_filter_lonely_rects:\n lines, filtered_chars = postprocess.filter_lonely_rects_for_lines(lines)\n if filtered_chars and show_filtered:\n img = res_dict['labeled_image']\n draw = PIL.ImageDraw.Draw(img)\n for b in filtered_chars:\n draw.rectangle(b.refined_box, fill=\"red\")\n img.show()\n\n if metrics_for_lines:\n boxes = []\n labels = []\n for ln in lines:\n boxes += [ch.refined_box for ch in ln.chars]\n labels += [ch.label for ch in ln.chars]\n else:\n boxes = res_dict['boxes']\n labels = res_dict['labels']\n\n tpi, fpi, fni = dot_metrics_rects(boxes = boxes, labels = labels,\n gt_rects = res_dict['gt_rects'], image_wh = (res_dict['labeled_image'].width, res_dict['labeled_image'].height),\n img=res_dict['labeled_image'], do_filter_lonely_rects=do_filter_lonely_rects)\n tp_r += tpi\n fp_r += fpi\n fn_r += fni\n\n res_text = lines_to_pseudotext(lines)\n d = Levenshtein.distance(res_text, gt_text)\n sum_d += d\n if len(gt_text):\n sum_d1 += d/len(gt_text)\n sum_len += len(gt_text)\n tpi, fpi, fni = dot_metrics(res_text, gt_text)\n tp += tpi\n fp += fpi\n fn += fni\n\n tpi, fpi, fni = char_metrics_rects(boxes = boxes, labels = labels,\n gt_rects = res_dict['gt_rects'], image_wh = (res_dict['labeled_image'].width, res_dict['labeled_image'].height),\n img=res_dict['labeled_image'], do_filter_lonely_rects=do_filter_lonely_rects)\n tp_c += tpi\n fp_c += fpi\n fn_c += fni\n\n\n # precision = tp/(tp+fp)\n # recall = tp/(tp+fn)\n precision_r = tp_r/(tp_r+fp_r) if tp_r+fp_r != 0 else 0.\n recall_r = tp_r/(tp_r+fn_r) if tp_r+fn_r != 0 else 0.\n precision_c = tp_c/(tp_c+fp_c) if tp_c+fp_c != 0 else 0.\n recall_c = tp_c/(tp_c+fn_c) if tp_c+fn_c != 0 else 0.\n return {\n # 'precision': precision,\n # 'recall': recall,\n # 'f1': 2*precision*recall/(precision+recall),\n 'precision_r': precision_r,\n 'recall_r': recall_r,\n 'f1_r': 2*precision_r*recall_r/(precision_r+recall_r) if precision_r+recall_r != 0 else 0.,\n 'precision_c': precision_c,\n 'recall_c': recall_c,\n 'f1_c': 2*precision_c*recall_c/(precision_c+recall_c) if precision_c+recall_c != 0 else 0.,\n 'd_by_doc': sum_d/len(data_list),\n 'd_by_char': sum_d/sum_len,\n 'd_by_char_avg': sum_d1/len(data_list)\n }\n\ndef evaluate_accuracy(params_fn, model, device, data_list, do_filter_lonely_rects = False, metrics_for_lines = True):\n \"\"\"\n :param recognizer: infer_retinanet.BrailleInference instance\n :param data_list: list of (image filename, groundtruth pseudotext)\n :return: (<distance> avg. by documents, <distance> avg. by char, <<distance> avg. by char> avg. by documents>)\n \"\"\"\n # по символам\n recognizer = infer_retinanet.BrailleInference(\n params_fn=params_fn,\n model_weights_fn=model,\n create_script=None,\n inference_width=inference_width,\n device=device,\n verbose=verbose)\n\n tp_c = 0\n fp_c = 0\n fn_c = 0\n for gt_dict in data_list:\n img_fn, gt_text, gt_rects = gt_dict['image_fn'], gt_dict['gt_text'], gt_dict['gt_rects']\n res_dict = recognizer.run(img_fn,\n lang=lang,\n draw_refined=infer_retinanet.BrailleInference.DRAW_NONE,\n find_orientation=False,\n process_2_sides=False,\n align_results=False,\n repeat_on_aligned=False,\n gt_rects=gt_rects)\n lines = res_dict['lines']\n if do_filter_lonely_rects:\n lines, filtered_chars = postprocess.filter_lonely_rects_for_lines(lines)\n if metrics_for_lines:\n boxes = []\n labels = []\n for ln in lines:\n boxes += [ch.refined_box for ch in ln.chars]\n labels += [ch.label for ch in ln.chars]\n else:\n boxes = res_dict['boxes']\n labels = res_dict['labels']\n tpi, fpi, fni = char_metrics_rects(boxes = boxes, labels = labels,\n gt_rects = res_dict['gt_rects'], image_wh = (res_dict['labeled_image'].width, res_dict['labeled_image'].height),\n img=None, do_filter_lonely_rects=do_filter_lonely_rects)\n tp_c += tpi\n fp_c += fpi\n fn_c += fni\n precision_c = tp_c/(tp_c+fp_c) if tp_c+fp_c != 0 else 0.\n recall_c = tp_c/(tp_c+fn_c) if tp_c+fn_c != 0 else 0.\n return {\n 'precision': precision_c,\n 'recall': recall_c,\n 'f1': 2*precision_c*recall_c/(precision_c+recall_c) if precision_c+recall_c != 0 else 0.,\n }\n\ndef main(table_like_format):\n # make data list\n for m in models:\n print(m)\n data_set = prepare_data()\n prev_model_root = None\n\n if table_like_format:\n print('model\\tweights\\tkey\\t'\n 'precision\\trecall\\tf1\\t'\n 'precision_c\\trecall_C\\tf1_c\\t'\n 'd_by_doc\\td_by_char\\td_by_char_avg')\n for model_root, model_weights in models:\n if model_root != prev_model_root:\n if not table_like_format:\n print('model: ', model_root)\n else:\n print()\n prev_model_root = model_root\n if verbose:\n print('evaluating weights: ', model_weights)\n params_fn = Path(local_config.data_path) / model_root / 'param.txt'\n if not params_fn.is_file():\n params_fn = Path(local_config.data_path) / (model_root + '.param.txt') # старый вариант\n assert params_fn.is_file(), str(params_fn)\n recognizer = infer_retinanet.BrailleInference(\n params_fn=params_fn,\n model_weights_fn=os.path.join(local_config.data_path, model_root, model_weights),\n create_script=None,\n inference_width=inference_width,\n verbose=verbose)\n for key, data_list in data_set.items():\n res = validate_model(recognizer, data_list, do_filter_lonely_rects=do_filter_lonely_rects, metrics_for_lines = metrics_for_lines)\n # print('{model_weights} {key} precision: {res[precision]:.4}, recall: {res[recall]:.4} f1: {res[f1]:.4} '\n # 'precision_r: {res[precision_r]:.4}, recall_r: {res[recall_r]:.4} f1_r: {res[f1_r]:.4} '\n # 'd_by_doc: {res[d_by_doc]:.4} d_by_char: {res[d_by_char]:.4} '\n # 'd_by_char_avg: {res[d_by_char_avg]:.4}'.format(model_weights=model_weights, key=key, res=res))\n if table_like_format:\n print('{model}\\t{weights}\\t{key}\\t'\n '{res[precision_r]:.4}\\t{res[recall_r]:.4}\\t{res[f1_r]:.4}\\t'\n '{res[precision_c]:.4}\\t{res[recall_c]:.4}\\t{res[f1_c]:.4}\\t'\n '{res[d_by_doc]:.4}\\t{res[d_by_char]:.4}\\t'\n '{res[d_by_char_avg]:.4}'.format(model=model_root, weights=model_weights, key=key, res=res))\n else:\n print('{model_weights} {key} '\n 'precision_r: {res[precision_r]:.4}, recall_r: {res[recall_r]:.4} f1_r: {res[f1_r]:.4} '\n 'd_by_doc: {res[d_by_doc]:.4} d_by_char: {res[d_by_char]:.4} '\n 'd_by_char_avg: {res[d_by_char_avg]:.4}'.format(model_weights=model_weights, key=key, res=res))\n\nif __name__ == '__main__':\n import timeit\n infer_retinanet.nms_thresh = 0.02\n postprocess.Line.LINE_THR = 0.6\n do_filter_lonely_rects = False\n metrics_for_lines = True # was False\n show_filtered = False\n t0 = timeit.default_timer()\n # for thr in (0.5, 0.6, 0.7, 0.8):\n # postprocess.Line.LINE_THR = thr\n # print(thr)\n main(table_like_format=True)\n print(timeit.default_timer() - t0)\n"
},
{
"alpha_fraction": 0.4488849639892578,
"alphanum_fraction": 0.4864208400249481,
"avg_line_length": 26.101797103881836,
"blob_id": "8bdc1942a554abecc4650b2c914f5fb1daac2f86",
"content_id": "6ee95557d39e79084197a6d97dc5a0c01d7cb7c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4560,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 167,
"path": "/data_utils/size_statistics.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nСтроит гистрограмму размеров рамок\n\"\"\"\nimport json\nimport numpy as np\nfrom pathlib import Path\n\nH_RANGE = (0, 100)\nW2H_RANGE = (0, 2, 0.01)\n\nclass Hist:\n def __init__(self, x1, x2, step = 1):\n self.x1 = x1\n self.x2 = x2\n self.step = step\n n = int((x2-x1)/step + 1)\n self.hist = np.zeros((n,), dtype=np.float)\n def add(self, v):\n v = np.clip(v, self.x1, self.x2)\n i = int((v - self.x1)/self.step)\n self.hist[i] += 1.\n def add_hist(self, h, scale = 1.):\n assert h.x1 == self.x1\n assert h.x2 == self.x2\n assert h.step == self.step\n self.hist += h.hist*scale\n def scale(self, scale):\n self.hist *= scale\n def bin_val(self, i):\n decimals = max(int(-np.log10(self.step) + 1), 0)\n return np.round(self.x1 + self.step*i, decimals=decimals)\n def total_sum(self):\n return self.hist.sum()\n def print_hist(self):\n all_s = \"\"\n s = \"\"\n for i in range(len(self.hist)):\n if all_s == \"\" and self.hist[i] == 0:\n continue\n s += \"{}\\t{}\\n\".format(self.bin_val(i), self.hist[i])\n if self.hist[i] != 0:\n all_s += s\n s = \"\"\n return all_s\n def quantiles(self, qs):\n iq = 0\n total = self.hist.sum()\n s = 0\n res = []\n max_i = 0\n for i in range(len(self.hist)):\n if self.hist[i]:\n max_i = i\n s += self.hist[i]/total\n if s > qs[iq]:\n res.append(self.bin_val(i))\n iq += 1\n if iq >= len(qs):\n break\n assert qs[iq] > qs[iq-1]\n if iq < len(qs) and qs[iq] >= 1.:\n res.append(self.bin_val(max_i))\n iq += 1\n assert iq == len(qs)\n return res\n\n\ndef init_hist():\n return Hist(*H_RANGE, 1), Hist(*W2H_RANGE)\n\ndef process_file(file_path):\n hh, w2hh = init_hist()\n with open(file_path) as f:\n data = json.loads(f.read())\n rects = [s[\"points\"] for s in data[\"shapes\"]]\n n = 0\n for r in rects:\n assert len(r) == 2\n for pt in r:\n assert len(pt) == 2\n w = r[1][0] - r[0][0]\n h = r[1][1] - r[0][1]\n assert w > 0 and h > 0\n hh.add(h)\n w2hh.add(w/h)\n n += 1\n if n > 0:\n hh.scale(1/n)\n w2hh.scale(1/n)\n assert hh.hist.sum() > 0.95 and hh.hist.sum() < 1.05\n assert w2hh.hist.sum() > 0.95 and w2hh.hist.sum() < 1.05\n return hh, w2hh\n else:\n return None, None\n\n\ndef process_dir_recursive(dir, mask=\"\"):\n hh, w2hh = init_hist()\n if mask == \"\":\n mask = \"**/\"\n img_files = list(Path(dir).glob(mask+\"*.json\"))\n for i, file_path in enumerate(img_files):\n if i % 100 == 99:\n print(i, \"/\", len(img_files))\n hhi, w2hhi = process_file(file_path)\n if hhi is not None:\n hh.add_hist(hhi)\n w2hh.add_hist(w2hhi)\n return hh, w2hh\n\n\ndef dir_statistics(data_dir, mask):\n hh, w2hh = process_dir_recursive(data_dir, mask)\n print(data_dir+mask, \"S:\", int(hh.total_sum()), \"H: \", hh.quantiles((0, 0.05, 0.25, 0.5, 0.75, 0.95, 1)), \"W2H:\", w2hh.quantiles((0, 0.05, 0.25, 0.5, 0.75, 0.95, 1)))\n #\n # print()\n # print(hh.print_hist())\n # print()\n # print(w2hh.print_hist())\n\ndef check_file(file_path, what, min_v, max_v):\n hhi, w2hhi = process_file(file_path)\n if what == \"h\":\n h = hhi\n elif what == \"w2h\":\n h = w2hhi\n else:\n assert False, what\n if h is None:\n return None\n q = h.quantiles([0.25, 0.75])\n if q[0] < min_v:\n return q[0]\n elif q[1] > max_v:\n return q[1]\n else:\n return None\n\ndef select_outliers(dir, mask, what, min_v, max_v):\n if mask == \"\":\n mask = \"**/\"\n img_files = list(Path(dir).glob(mask+\"*.json\"))\n for i, file_path in enumerate(img_files):\n # if i % 100 == 99:\n # print(i, \"/\", len(img_files))\n v = check_file(file_path, what, min_v, max_v)\n if v is not None:\n print(v, file_path)\n\n\n\nif __name__==\"__main__\":\n data_dir = r\"D:\\Programming.Data\\Braille\\web_uploaded\\re-processed200823\"\n mask = \"\"\n dir_statistics(data_dir, mask)\n\n what = \"h\"\n min_v = 20 #25\n max_v = 7000 #50\n\n # what = \"w2h\"\n # min_v = 0.57\n # max_v = 0.76 #50\n\n select_outliers(data_dir, mask, what, min_v, max_v)\n\n\n\n"
},
{
"alpha_fraction": 0.7832699418067932,
"alphanum_fraction": 0.7832699418067932,
"avg_line_length": 23,
"blob_id": "c0cf32a088c13bca1b2a424a2e3861c51a8bce02",
"content_id": "5ddeeec53294231ada4c08e70c84eb95ffbff231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 263,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/utils/db_api/mongo.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from pymongo import MongoClient\nfrom aiogram.contrib.fsm_storage.mongo import MongoStorage\nfrom data.config import IP\n\nclient = MongoClient(IP)\nstorage = MongoStorage()\n\ndatabase = client[\"breille\"]\n\nusers_db = database[\"users\"]\nprofiles_db = database[\"profiles\"]"
},
{
"alpha_fraction": 0.5213304162025452,
"alphanum_fraction": 0.6082791090011597,
"avg_line_length": 31.35087776184082,
"blob_id": "19117bdb364be2e9000e66d17f843b16f6aab45d",
"content_id": "e9138b140e46adff1b31baf78c6bad35f6e7c0ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5569,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 171,
"path": "/braille_utils/label_tools.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n'''\nUtilities to handle braille labels in various formats:\n int_label: label as int [0..63]\n label010: label as str of six 0 and 1s: '010101' etc.\n label123: label as str like '246'\n human_labels: labels in a manual annotation\n'''\nfrom collections import defaultdict\nfrom . import letters\n\ndef validate_int(int_label):\n '''\n Validate int_label is in [0..63]\n Raise exception otherwise\n '''\n assert isinstance(int_label, int)\n assert int_label >= 0 and int_label < 64, \"Ошибочная метка: \" + str(int_label)\n\ndef label010_to_int(label010):\n '''\n Convert label in label010 format to int_label\n '''\n v = [1,2,4,8,16,32]\n r = sum([v[i] for i in range(6) if label010[i]=='1'])\n validate_int(r)\n return r\n\ndef label_vflip(int_lbl):\n '''\n convert int_label in case of vertical flip\n '''\n validate_int(int_lbl)\n return ((int_lbl&(1+8))<<2) + ((int_lbl&(4+32))>>2) + (int_lbl&(2+16))\n\ndef label_hflip(int_lbl):\n '''\n convert int_label in case of horizontal flip\n '''\n validate_int(int_lbl)\n return ((int_lbl&(1+2+4))<<3) + ((int_lbl&(8+16+32))>>3)\n\ndef int_to_label010(int_lbl):\n int_lbl = int(int_lbl)\n v = [1,2,4,8,16,32]\n r = ''.join([ '1' if int_lbl&v[i] else '0' for i in range(6)])\n return r\n\ndef int_to_label123(int_lbl):\n int_lbl = int(int_lbl)\n v = [1,2,4,8,16,32]\n r = ''.join([ str(i+1) for i in range(6) if int_lbl&v[i]])\n return r\n\ndef int_to_unicode(int_lbl):\n return chr(0x2800 + int_lbl)\n\ndef label123_to_int(label123):\n v = [1,2,4,8,16,32]\n try:\n r = sum([v[int(ch)-1] for ch in label123])\n except:\n raise ValueError(\"incorrect label in 123 format: \" + label123)\n validate_int(r)\n return r\n\n\n# acceptable strings for manual labeling -> output chars in letters.py (acceptable synonyms)\nlabeling_synonyms = {\n \"xx\": \"XX\",\n \"хх\": \"XX\", # russian х on the left\n \"cc\": \"CC\",\n \"сс\": \"CC\", # russian с on the left\n \"<<\": \"«\",\n \">>\": \"»\",\n \"((\": \"()\",\n \"))\": \"()\",\n \"№\": \"н\",\n \"&&\": \"§\",\n}\n\n\ndef human_label_to_int(label):\n '''\n Convert label from manual annotations to int_label\n '''\n label = label.lower()\n if label[0] == '~':\n label123 = label[1:]\n if label123[-1] == '~':\n label123 = label123[:-1]\n else:\n label = labeling_synonyms.get(label, label)\n ch_list = reverce_dict.get(label, None)\n if not ch_list:\n raise ValueError(\"unrecognized label: \" + label)\n if len(ch_list) > 1:\n raise ValueError(\"label: \" + label + \" has more then 1 meanings: \" + str(ch_list))\n label123 = list(ch_list)[0]\n return label123_to_int(label123)\n\n\ndef int_to_letter(int_lbl, langs):\n '''\n Gets letter corresponding to int_lbl in a first language dict that contains it\n :param int_lbl:\n :param langs: list of language dict codes (see letters.letter_dicts)\n :return: letter or string (for special symbols that need postprocessing) or None\n '''\n label123 = int_to_label123(int_lbl)\n for lang in langs:\n d = letters.letter_dicts[lang]\n res = d.get(label123, None)\n if res is not None:\n return res\n return None\n\n\n# global dict: letter (or spec. string) -> set of labels123 from different dicts from letters.letter_dicts\nreverce_dict = defaultdict(set)\nfor d in letters.letter_dicts.values():\n for lbl123, char in d.items():\n reverce_dict[char].add(lbl123)\n\n# global list of 64 bools indicating what labels are valid in most common language dicts\nlabel_is_valid = [\n True if (int_to_letter(int_label, ['SYM','RU', 'EN', 'NUM']) is not None) else False\n for int_label in range(64)\n]\n\n\nif __name__ == '__main__':\n assert label010_to_int('100000') == 1\n assert label010_to_int('101000') == 1+4\n assert label010_to_int('000001') == 32\n\n assert label_hflip(label010_to_int('111000')) == label010_to_int('000111')\n assert label_hflip(label010_to_int('000011')) == label010_to_int('011000')\n assert label_hflip(label010_to_int('001100')) == label010_to_int('100001')\n\n assert label_vflip(label010_to_int('111100')) == label010_to_int('111001')\n assert label_vflip(label010_to_int('001011')) == label010_to_int('100110')\n\n assert int_to_label010(label010_to_int('001011')) == '001011'\n assert int_to_label123(label010_to_int('001011')) == '356'\n\n assert int_to_letter(label010_to_int('110110'),['EN']) == 'g'\n assert int_to_letter(label010_to_int('000000'),['EN']) is None\n\n assert int_to_label010(label123_to_int('124')) == '110100'\n assert int_to_label010(label123_to_int('26')) == '010001'\n assert int_to_label010(label123_to_int('')) == '000000'\n #assert int_to_label010(label123_to_int('8')) == '000000'\n\n assert int_to_label010(human_label_to_int('1')) == '100000'\n assert int_to_label010(human_label_to_int('CC')) == '000110'\n assert int_to_label010(human_label_to_int('xx')) == '111111'\n assert int_to_label010(human_label_to_int('Хх')) == '111111' # русский\n assert int_to_label010(human_label_to_int('##')) == '001111'\n assert int_to_label010(human_label_to_int('а')) == '100000'\n assert int_to_label010(human_label_to_int('Б')) == '110000'\n assert int_to_label010(human_label_to_int('2')) == '110000'\n\n print([\n (label_is_valid[int_lbl], int_to_label123(int_lbl), int_to_letter(int_lbl, ['RU']))\n for int_lbl in range(64)\n ])\n print(sum(label_is_valid))\n\n print('OK')\n"
},
{
"alpha_fraction": 0.6055470108985901,
"alphanum_fraction": 0.6055470108985901,
"avg_line_length": 33.21052551269531,
"blob_id": "6d75da3ddac39ca3b4e5df39899acfe80353aeb6",
"content_id": "64149c375b1f77c8a6712afc036bf1645949a300",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/keyboards/inline/set_lang.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\n\nset_lang = InlineKeyboardMarkup(\n keyboard=[\n [\n InlineKeyboardButton(text=\"Russian\", callback_data='RU'),\n InlineKeyboardButton(text=\"English\", callback_data='EN'),\n ],\n [\n InlineKeyboardButton(text=\"Uzbek (cyrillic)\", callback_data='UZ'),\n InlineKeyboardButton(text=\"Uzbek (latin)\", callback_data='UZL'),\n ],\n [\n InlineKeyboardButton(text=\"Latvian\", callback_data='LV'),\n InlineKeyboardButton(text=\"Greek\", callback_data='GR'),\n ],\n ],\n resize_keyboard=True,\n)"
},
{
"alpha_fraction": 0.6570155620574951,
"alphanum_fraction": 0.6592427492141724,
"avg_line_length": 32.67499923706055,
"blob_id": "0a3dac34016dd34dfeaf2714a4abe0c500de10ae",
"content_id": "af56dccf12b15a84f268af0926a628bc40c3dbed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1347,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 40,
"path": "/braille_utils/json_to_text.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os.path\nimport glob\nimport sys\nsys.path.insert(1, '..')\nimport braille_utils.postprocess as postprocess\nimport train.data as data\n\n\ndef annonation_to_text(json_filename, lang):\n rects = data.read_LabelMe_annotation(label_filename = json_filename, get_points = False)\n boxes = [r[:4] for r in rects]\n labels = [r[4] for r in rects]\n lines = postprocess.boxes_to_lines(boxes, labels, lang=lang)\n return postprocess.lines_to_text(lines)\n\n\ndef process_json_annotation(json_filename, lang='RU'):\n print('processing ' + json_filename)\n txt = annonation_to_text(json_filename, lang)\n if json_filename.endswith('.labeled.json'):\n out_fn = json_filename[:-len('.labeled.json')]+'.marked.txt'\n else:\n raise Exception('incorrect filename')\n with open(out_fn, 'wt') as f:\n f.write(txt)\n\nif __name__ == \"__main__\":\n parcer = argparse.ArgumentParser()\n parcer.add_argument('file')\n args = parcer.parse_args()\n print(args.file)\n if os.path.isfile(args.file) and args.file.lower().endswith('.json'):\n process_json_annotation(args.file)\n elif os.path.isdir(args.file):\n files = glob.glob(os.path.join(args.file, '*.json'))\n for fn in files:\n process_json_annotation(fn)\n else:\n raise Exception('incorrect argument: ' + args.file)\n"
},
{
"alpha_fraction": 0.5725811123847961,
"alphanum_fraction": 0.592341423034668,
"avg_line_length": 43.42856979370117,
"blob_id": "ef14f9d77fa1cff7a89914a89d46fd90d822011d",
"content_id": "ba3c88c337033e7ca1b605da2b47ccccd7685225",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17356,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 385,
"path": "/data_utils/data.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n'''\nclass BrailleDataset: loads indexed dataset from DSBI dataset or LabelMe annotated dataset\n(see https://github.com/IlyaOvodov/labelme labelling tool)\n'''\nimport os\nimport random\nimport json\nimport PIL\nimport numpy as np\nimport albumentations\nimport albumentations.augmentations.transforms as T\nimport albumentations.augmentations.functional as albu_f\nimport torch\nimport torchvision.transforms.functional as F\nimport cv2\n\nfrom data_utils import dsbi\nfrom braille_utils import label_tools as lt\nimport local_config\n\n\ndef rect_vflip(b):\n '''\n Flips symbol box converting label\n :param b: tuple (left, top, right, bottom, label)\n :return: converted tuple (left, top, right, bottom, label)\n '''\n return b[:4] + (lt.label_vflip(b[4]),)\n\ndef rect_hflip(b):\n '''\n Flips symbol box converting label\n :param b: tuple (left, top, right, bottom, label)\n :return: converted tuple (left, top, right, bottom, label)\n '''\n return b[:4] + (lt.label_hflip(b[4]),)\n\n\ndef common_aug(mode, params):\n '''\n :param mode: 'train', 'test', 'inference'\n :param params:\n '''\n #aug_params = params.get('augm_params', dict())\n augs_list = []\n assert mode in {'train', 'debug', 'inference'}\n if mode == 'train':\n augs_list.append(albumentations.PadIfNeeded(min_height=params.data.net_hw[0], min_width=params.data.net_hw[1],\n border_mode=cv2.BORDER_REPLICATE,\n always_apply=True))\n augs_list.append(albumentations.RandomCrop(height=params.data.net_hw[0], width=params.data.net_hw[1], always_apply=True))\n if params.augmentation.rotate_limit:\n augs_list.append(T.Rotate(limit=params.augmentation.rotate_limit, border_mode=cv2.BORDER_CONSTANT, always_apply=True))\n # augs_list.append(T.OpticalDistortion(border_mode=cv2.BORDER_CONSTANT)) - can't handle boundboxes\n elif mode == 'debug':\n augs_list.append(albumentations.CenterCrop(height=params.data.net_hw[0], width=params.data.net_hw[1], always_apply=True))\n if mode != 'inference':\n if params.augmentation.get('blur_limit', 4):\n augs_list.append(T.Blur(blur_limit=params.augmentation.get('blur_limit', 4)))\n if params.augmentation.get('RandomBrightnessContrast', True):\n augs_list.append(T.RandomBrightnessContrast())\n #augs_list.append(T.MotionBlur())\n if params.augmentation.get('JpegCompression', True):\n augs_list.append(T.JpegCompression(quality_lower=30, quality_upper=100))\n #augs_list.append(T.VerticalFlip())\n if params.augmentation.get('HorizontalFlip', True):\n augs_list.append(T.HorizontalFlip())\n\n return albumentations.ReplayCompose(augs_list, p=1., bbox_params = {'format':'albumentations', 'min_visibility':0.5})\n\n\nclass ImagePreprocessor:\n '''\n Preprocess image and it's annotation\n '''\n def __init__(self, params, mode):\n assert mode in {'train', 'debug', 'inference'}\n self.params = params\n self.albumentations = common_aug(mode, params)\n\n def preprocess_and_augment(self, img, rects=[]):\n aug_img = self.random_resize_and_stretch(img,\n new_width_range=self.params.augmentation.img_width_range,\n stretch_limit=self.params.augmentation.stretch_limit)\n aug_res = self.albumentations(image=aug_img, bboxes=rects)\n aug_img = aug_res['image']\n aug_bboxes = aug_res['bboxes']\n aug_bboxes = [b for b in aug_bboxes if\n b[0] > 0 and b[0] < 1 and\n b[1] > 0 and b[1] < 1 and\n b[2] > 0 and b[2] < 1 and\n b[3] > 0 and b[3] < 1]\n if not self.params.data.get('get_points', False):\n for t in aug_res['replay']['transforms']:\n if t['__class_fullname__'].endswith('.VerticalFlip') and t['applied']:\n aug_bboxes = [rect_vflip(b) for b in aug_bboxes]\n if t['__class_fullname__'].endswith('.HorizontalFlip') and t['applied']:\n aug_bboxes = [rect_hflip(b) for b in aug_bboxes]\n return aug_img, aug_bboxes\n\n def random_resize_and_stretch(self, img, new_width_range, stretch_limit = 0):\n new_width_range = T.to_tuple(new_width_range)\n stretch_limit = T.to_tuple(stretch_limit, bias=1)\n new_sz = int(random.uniform(new_width_range[0], new_width_range[1]))\n stretch = random.uniform(stretch_limit[0], stretch_limit[1])\n\n img_max_sz = img.shape[1] #max(img.shape[0]*stretch, img.shape[1]) #img.shape[1] # GVNC - now it is resizing to max\n new_width = int(img.shape[1]*new_sz/img_max_sz)\n new_width = ((new_width+31)//32)*32\n new_height = int(img.shape[0]*stretch*new_sz/img_max_sz)\n new_height = ((new_height+31)//32)*32\n return cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_LINEAR)\n\n def to_normalized_tensor(self, img, device='cpu'):\n '''\n returns image converted to FloatTensor and normalized\n '''\n assert img.ndim == 3\n ten_img = torch.from_numpy(img.transpose((2, 0, 1))).to(device).float()\n means = ten_img.view(3, -1).mean(dim=1)\n std = torch.max(ten_img.view(3, -1).std(dim=1), torch.tensor(self.params.data.get('max_std',0)*255).to(ten_img))\n #(ten_img.view(3, -1).max(dim=1)[0] - ten_img.view(3, -1).min(dim=1)[0])/6)\n ten_img = (ten_img - means.view(-1, 1, 1)) / (3*std.view(-1, 1, 1))\n # decolorize\n ten_img = ten_img.mean(dim=0).expand(3, -1, -1)\n return ten_img\n\n\ndef unify_shape(img):\n if len(img.shape) == 2:\n img = np.tile(img[:, :, np.newaxis], (1, 1, 3))\n if img.shape[2] == 4:\n img = img[:, :, :3]\n return img\n\n\nclass BrailleDataset(torch.utils.data.ConcatDataset):\n '''\n Indexed assess to annotated images listed in a txt file\n Returns annotated images as tuple: ( img: Tensor CxHxW,\n symbols: np.array(Nx5 i.e. Nx(left, top, right, bottom [0..1), class [1..63]))\n If get_points mode is on, class is always 0\n '''\n def __init__(self, params, list_file_names, mode, verbose):\n '''\n :param params: params dict\n :param list_file_names: list of files with image files list (relative to local_config.data_path)\n each file should contain list of image file paths relative to that list file location\n :param mode: augmentation and output mode ('train', 'debug', 'inference')\n :param verbose: if != 0 enables debug print\n '''\n sub_datasets = []\n for list_file_name in list_file_names:\n if isinstance(list_file_name, (tuple, list)):\n list_params = list_file_name[2] if len(list_file_name) >=3 else {}\n assert isinstance(list_params, dict)\n list_file_name, sample_weight = list_file_name[:2]\n else:\n sample_weight = 1\n list_params = {}\n while sample_weight >= 1:\n sub_datasets.append(BrailleSubDataset(params, list_file_name, mode, verbose, 1, list_params))\n sample_weight -= 1\n if sample_weight > 1e-10:\n sub_datasets.append(BrailleSubDataset(params, list_file_name, mode, verbose, sample_weight, list_params))\n\n super(BrailleDataset, self).__init__(sub_datasets)\n\nclass BrailleSubDataset:\n '''\n Provides subset of data for BrailleSubDataset defined by one list file\n '''\n\n def __init__(self, params, list_file_name, mode, verbose, sample_weight, list_params):\n '''\n :param params: params dict\n :param list_file_names: list of files with image files list (relative to local_config.data_path)\n each file should contain list of image file paths relative to that list file location\n :param mode: augmentation and output mode ('train', 'debug', 'inference')\n :param verbose: if != 0 enables debug print\n :param sample_weight: при значениях больше двух - датасет повторяется. При дробных значениях - уменьшается\n видимы размер датасета за счето того, что при запросе одного индекса выдатся последовательно разные элементы.\n дробная часть долна быть кратна 1/n\n :param list_params: опиональный параметр - dict, 3-й при в списке в m param. Выдается в батч ввместе с данными об item.\n '''\n assert mode in {'train', 'debug', 'inference'}\n self.params = params\n self.mode = mode\n self.image_preprocessor = ImagePreprocessor(params, mode)\n\n self.image_files = []\n self.label_files = []\n\n list_file = os.path.join(local_config.data_path, list_file_name)\n data_dir = os.path.dirname(list_file)\n with open(list_file, 'r') as f:\n files = f.readlines()\n for fn in files:\n if fn[-1] == '\\n':\n fn = fn[:-1]\n fn = fn.replace('\\\\', '/')\n image_fn, labels_fn = self.filenames_of_item(data_dir, fn)\n if image_fn:\n self.image_files.append(image_fn)\n self.label_files.append(labels_fn)\n else:\n print(\"WARNING: can't load file:\", data_dir, fn)\n\n assert len(self.image_files) > 0, list_file\n\n self.images = [None] * len(self.image_files)\n self.rects = [None] * len(self.image_files)\n self.aug_images = [None] * len(self.image_files)\n self.aug_bboxes = [None] * len(self.image_files)\n self.REPEAT_PROBABILITY = 0.6\n self.verbose = verbose\n assert sample_weight <= 1\n if sample_weight < 1:\n assert mode == 'train'\n self.denominator = int(1/sample_weight)\n self.call_count = 0\n self.list_params = list_params\n\n def __len__(self):\n return len(self.image_files) // self.denominator\n\n def __getitem__(self, item):\n if self.denominator > 1:\n self.call_count = (self.call_count + 1) % self.denominator\n item = item * self.denominator + self.call_count\n img = self.images[item]\n if img is None:\n img_fn = self.image_files[item]\n img = PIL.Image.open(img_fn)\n img = np.asarray(img)\n img= unify_shape(img)\n assert len(img.shape) == 3 and img.shape[2] == 3, (img_fn, img.shape)\n self.images[item] = img\n width = img.shape[1]\n height = img.shape[0]\n rects = self.rects[item]\n if rects is None:\n lbl_fn = self.label_files[item]\n rects = self.read_annotation(lbl_fn, width, height)\n rects = [ (min(r[0], r[2]), min(r[1], r[3]), max(r[0], r[2]), max(r[1], r[3])) + r[4:] for r in rects]\n rects = [ r for r in rects if r[0] < r[2] and r[1] < r[3]]\n self.rects[item] = rects\n\n if (self.aug_images[item] is not None) and (random.random() < self.REPEAT_PROBABILITY):\n aug_img = self.aug_images[item]\n aug_bboxes = self.aug_bboxes[item]\n else:\n aug_img, aug_bboxes = self.image_preprocessor.preprocess_and_augment(img, rects)\n self.aug_images[item] = aug_img\n self.aug_bboxes[item] = aug_bboxes\n\n if self.verbose >= 2:\n print('BrailleDataset: preparing file '+ self.image_files[item] + '. Total rects: ' + str(len(aug_bboxes)))\n\n if self.mode == 'train':\n return self.image_preprocessor.to_normalized_tensor(aug_img), np.asarray(aug_bboxes).reshape(-1, 5), self.list_params\n else:\n return self.image_preprocessor.to_normalized_tensor(aug_img), np.asarray(aug_bboxes).reshape(-1, 5), self.list_params, aug_img\n\n def filenames_of_item(self, data_dir, fn):\n '''\n Finds appropriate image and label full filenames for list item and validates these files exists\n :param data_dir: dir base for filename from list\n :param fn: filename of image file relative to data_dir\n :return: image filename, label filename or None, None if no label file exists\n '''\n def check_label_ext(image_fn, ext):\n if not os.path.isfile(image_fn):\n return None\n lbl_fn = image_fn.rsplit('.',1)[0]+ext\n if os.path.isfile(lbl_fn):\n return lbl_fn\n return None\n\n full_fn = os.path.join(data_dir, fn)\n lbl_fn = check_label_ext(full_fn, '.json')\n if lbl_fn:\n return full_fn, lbl_fn\n lbl_fn = check_label_ext(full_fn, '.txt')\n if lbl_fn:\n return full_fn, lbl_fn\n full_fn = full_fn.rsplit('.', 1)[0] + '+recto.jpg'\n lbl_fn = check_label_ext(full_fn, '.txt')\n if lbl_fn:\n return full_fn, lbl_fn\n return None, None\n\n def read_annotation(self, label_filename, width, height):\n '''\n Reads annotation file (DSBI or LabelMe)\n :param label_filename: annotation file (txt for DSBI or JSON for LabelMe\n :return: list: [(left,top,right,bottom,label), ...] where coords are (0..1), label is int [1..63]\n '''\n ext = label_filename.rsplit('.', 1)[-1]\n if ext == 'txt':\n return dsbi.read_DSBI_annotation(label_filename, width, height,\n self.params.data.get('rect_margin', 0.3),\n self.params.data.get('get_points', False))\n elif ext == 'json':\n return read_LabelMe_annotation(label_filename, self.params.data.get('get_points', False))\n else:\n raise ValueError(\"unsupported label file type: \" + ext)\n\n\ndef limiting_scaler(source, dest):\n '''\n Creates function to convert coordinates from source scale to dest with limiting to [0..dest)\n :param source: source scale\n :param dest: dest scale\n :return: function f(x) for linear conversion [0..sousce)->[0..dest) so that\n f(0) = 0, f(source-1) = (source-1)/source*dest, f(x<0)=0, f(x>=source) = (source-1)/source*dest\n '''\n def scale(x):\n return int(min(max(0, x), source-1)) * dest/source\n return scale\n\n\ndef read_LabelMe_annotation(label_filename, get_points):\n '''\n Reads LabelMe (see https://github.com/IlyaOvodov/labelme labelling tool) annotation JSON file.\n :param label_filename: path to LabelMe annotation JSON file\n :return: list of rect objects. Each rect object is a tuple (left, top, right, bottom, label) where\n left..bottom are in [0,1), label is int in [1..63]\n '''\n if get_points:\n raise NotImplementedError(\"read_annotation get_point mode not implemented for LabelMe annotation\")\n with open(label_filename, 'r', encoding='cp1251') as opened_json:\n loaded = json.load(opened_json)\n convert_x = limiting_scaler(loaded[\"imageWidth\"], 1.0)\n convert_y = limiting_scaler(loaded[\"imageHeight\"], 1.0)\n rects = [(convert_x(min(xvals)),\n convert_y(min(yvals)),\n convert_x(max(xvals)),\n convert_y(max(yvals)),\n lt.human_label_to_int(label),\n ) for label, xvals, yvals in\n ((shape[\"label\"],\n [coords[0] for coords in shape[\"points\"]],\n [coords[1] for coords in shape[\"points\"]]\n ) for shape in loaded[\"shapes\"]\n )\n ]\n return rects\n\n\ndef create_dataloader(params, collate_fn, list_file_names, shuffle, mode = 'train', verbose = 0):\n '''\n :param params: params AttrDict\n :param collate_fn: converts batch from BrailleDataset output to format required by model\n :return: pytorch DataLoader\n '''\n dataset = BrailleDataset(params, list_file_names=list_file_names, mode=mode, verbose=verbose)\n loader = torch.utils.data.DataLoader(dataset, params.data.batch_size, shuffle=shuffle, num_workers=0, collate_fn=collate_fn)\n return loader\n\n\nif __name__ == '__main__':\n from ovotools import AttrDict\n\n assert rect_hflip( (0,1,2,3, lt.label010_to_int('111000'),) ) == (0,1,2,3, lt.label010_to_int('000111'),)\n assert rect_hflip( (0,1,2,3, lt.label010_to_int('000011'),) ) == (0,1,2,3, lt.label010_to_int('011000'),)\n assert rect_hflip( (0,1,2,3, lt.label010_to_int('001100'),) ) == (0,1,2,3, lt.label010_to_int('100001'),)\n\n assert rect_vflip( (0,1,2,3, lt.label010_to_int('111100'),) ) == (0,1,2,3, lt.label010_to_int('111001'),)\n assert rect_vflip( (0,1,2,3, lt.label010_to_int('001011'),) ) == (0,1,2,3, lt.label010_to_int('100110'),)\n\n params = AttrDict(data=AttrDict(\n batch_size=2,\n get_points=False,\n rect_margin=0.3\n ))\n data_loader = create_dataloader(params, collate_fn = None,\n list_file_names = [os.path.join(local_config.data_path, r\"DSBI\\data\\train.txt\")],\n shuffle=False)\n print(len(data_loader))\n\n\n print('OK')\n"
},
{
"alpha_fraction": 0.8030303120613098,
"alphanum_fraction": 0.8030303120613098,
"avg_line_length": 32,
"blob_id": "b5446ffa6b72b8845eda7a2e703cd235d1130a4d",
"content_id": "e6afcbc979a25bfab4ee08bad27f40543afc39a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 66,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 2,
"path": "/run_web_app.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from web_app import angelina_reader_app\nangelina_reader_app.run()\n"
},
{
"alpha_fraction": 0.4690265357494354,
"alphanum_fraction": 0.4851453900337219,
"avg_line_length": 26.042734146118164,
"blob_id": "16e73a51045eb0f2367bc8a0b956cf90e1eb9a60",
"content_id": "08d82ac5b4bb54a2ef43d039b520b8d839a11c25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3195,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 117,
"path": "/data_utils/size_statistics_dsbi.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nСтроит гистрограмму размеров рамок\n\"\"\"\nimport json\nimport numpy as np\nfrom pathlib import Path\n\nimport dsbi\n\nH_RANGE = (0, 100)\n\nclass Hist:\n def __init__(self, x1, x2, step = 1):\n self.x1 = x1\n self.x2 = x2\n self.step = step\n n = int((x2-x1)/step + 1)\n self.hist = np.zeros((n,), dtype=np.float)\n def add(self, v):\n v = np.clip(v, self.x1, self.x2)\n i = int((v - self.x1)/self.step)\n self.hist[i] += 1.\n def add_hist(self, h, scale = 1.):\n assert h.x1 == self.x1\n assert h.x2 == self.x2\n assert h.step == self.step\n self.hist += h.hist*scale\n def scale(self, scale):\n self.hist *= scale\n def bin_val(self, i):\n decimals = max(int(-np.log10(self.step) + 1), 0)\n return np.round(self.x1 + self.step*i, decimals=decimals)\n def total_sum(self):\n return self.hist.sum()\n def print_hist(self):\n all_s = \"\"\n s = \"\"\n for i in range(len(self.hist)):\n if all_s == \"\" and self.hist[i] == 0:\n continue\n s += \"{}\\t{}\\n\".format(self.bin_val(i), self.hist[i])\n if self.hist[i] != 0:\n all_s += s\n s = \"\"\n return all_s\n def quantiles(self, qs):\n iq = 0\n total = self.hist.sum()\n s = 0\n res = []\n max_i = 0\n for i in range(len(self.hist)):\n if self.hist[i]:\n max_i = i\n s += self.hist[i]/total\n if s > qs[iq]:\n res.append(self.bin_val(i))\n iq += 1\n if iq >= len(qs):\n break\n assert qs[iq] > qs[iq-1]\n if iq < len(qs) and qs[iq] >= 1.:\n res.append(self.bin_val(max_i))\n iq += 1\n assert iq == len(qs)\n return res\n\n\ndef process_list(xs, fn):\n hh = Hist(*H_RANGE, 1)\n n = len(xs) - 1\n if n>0:\n for i, xi in enumerate(xs[:-1]) :\n w = xs[i+1] - xi\n if w > 0:\n hh.add(w)\n else:\n print(xi, xs[i+1], w, fn)\n return hh\n\n\ndef process_file(file_path):\n angle, h_lines, v_lines, cells = dsbi.read_txt(file_path)\n if h_lines is not None:\n ww = process_list(v_lines, file_path)\n hh = process_list(h_lines, file_path)\n return hh, ww\n else:\n return None, None\n\n\ndef process_dir_recursive(dir, mask=\"\"):\n hh, ww = Hist(*H_RANGE, 1), Hist(*H_RANGE, 1)\n if mask == \"\":\n mask = \"**/\"\n img_files = list(Path(dir).glob(mask+\"*recto.txt\"))\n for i, file_path in enumerate(img_files):\n if i % 10 == 99:\n print(i, \"/\", len(img_files))\n hhi, wwi = process_file(file_path)\n if hhi is not None:\n hh.add_hist(hhi)\n ww.add_hist(wwi)\n return hh, ww\n\n\ndef dir_statistics(data_dir, mask):\n hh, ww = process_dir_recursive(data_dir, mask)\n print(hh.print_hist())\n print(ww.print_hist())\n\nif __name__==\"__main__\":\n data_dir = r\"D:\\Programming.Data\\Braille\\DSBI\\data\"\n mask = \"\"\n dir_statistics(data_dir, mask)\n"
},
{
"alpha_fraction": 0.7782942056655884,
"alphanum_fraction": 0.7903788089752197,
"avg_line_length": 42.918365478515625,
"blob_id": "7fb22aee1679ccab79a8d150388efc5e82a78ce3",
"content_id": "3e3abec4bfbd612f46a1729ea83fef8bbad7c51f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5537,
"license_type": "no_license",
"max_line_length": 594,
"num_lines": 98,
"path": "/README.md",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "## Общее описание решения\n\nПроект является цифровым решением в рамках конкурса World AI&DATA Challenge, соответствующиим задаче \"<Распознавание химических, математических, физических символов и нот в электронный шрифт Брайля>\" (https://git.asi.ru/tasks/world-ai-and-data-challenge/<url задачи>)\n\nРазработанное и представленное Ильёй Оводовым в 2020 году на конкурс World AI&Data Challenge решение Angelina Braille Reader (https://datamasters.ru/digital_solutions_library/social/sol1 : репозиторий - https://git.asi.ru/solutions/world-ai-and-data-challenge/angelina-braille-reader , ссылка на решение - from=http://angelina-reader.ru/), получившее главный приз конкурса, требует расширения своей функциональности на обработку математических, физических и химических текстов, а также нот, написанных Брайлем.Также мы улучшили модель и добавили перевод с текста на брайл.\n(https://git.asi.ru/Fazo_Team/braille-reader-bot)\n\n## Общее описание логики работы решения\nРазработка модуля распознавания химического, физического и математического языков, а также символом нотного стана с целью развития модели Angelina Braille Reader\n\nПолзователь может запустить бота с помощью Telegram : T.me/Fazouzbot\n\n\n\n## Требования к окружению для запуска продукта\nПлатформа: кроссплатформенное решение, linux, windows, macOS, иное.\n\nИспользуемый язык программирования с указанием версии(Python 3.8+), если существенно.\n\n## Сценарий сборки и запуска проекта\nПриведите пошаговую инструкцию по запуску вашего проекта.\n\n 1) Найти по ссылку : T.me/fazouzbot\n 2) Запустить и регистрироваться \n 3) Далее выбрать необходимый параметр Text --> Braille или Braille --> Text\n 4) Нажимать кнопку отправить фото и выбрать необходимую картинку для перевода \n\n\n## Сценарий компиляции и запуска\n``` \ngit clone https://git.asi.ru/Fazo_Team/braille-reader-bot\n\npip install -r requirements.txt\n\nwget -O weights/model.t7 http://angelina-reader.ovdv.ru/retina_chars_eced60.clr.008\n\npython3 app.py\n```\n## Примеры использования\n\n\nhttps://youtube.com/shorts/bN4xtTGXQhw?feature=share\n\n\n## Используемые наборы данных\n\nhttps://elib.bspu.by/bitstream/doc/4402/1/Условные обозначения по система Брайля_2010wm.pdf\n\n## Дополнительный инструментарий\n\nДополнительные инструменты, которые требуются для развёртывания решения.\nframeworks(OpenCV, Pillow, Aiogramm, Keras, Telegram Bot API)\n\n## General Solution Description\n\nThe project is the digital solution for a World AI&DATA Challenge's task \"<Recognition of chemical, mathematical, physical symbols and notes in electronic Braille>\" (https://git.asi.ru/tasks/world-ai-and-data-challenge/<task url>)\n\nDeveloped and presented by Ilya Ovodov in 2020 for the World AI&Data Challenge competition, the Angelina Braille Reader solution (https://datamasters.ru/digital_solutions_library/social/sol1 : repository - https://git.asi.ru/solutions/world-ai-and-data-challenge/angelina-braille-reader , link to the solution - from=http: / / angelina-reader. ru/), which received the main prize of the competition, requires expanding its functionality to process mathematical, physical and chemical texts, as well as notes written in Braille.We also improved the model and added a translation from text to braille.\n(https://git.asi.ru/Fazo_Team/braille-reader-bot)\n\n## Solution's logics general description\n\nDevelopment of a module for recognizing chemical, physical and mathematical languages, as well as the symbol of the musical notation in order to develop the Angelina Braille Reader model\n\nThe user can launch the bot using Telegram : T.me/Fazouzbot\n\n## Execution environmental requirements and setup\nPlatform: cross-platform solution, linux, windows, macOS, other.\n\nThe programming language used, indicating the version (Python 3.8+), if significant.\n\n\n\n## Compilation and launch scenario\n\n```\ngit clone https://git.asi.ru/Fazo_Team/braille-reader-bot\n\npip install -r requirements.txt\n\nwget -O weights/model.t7 http://angelina-reader.ovdv.ru/retina_chars_eced60.clr.008\n\npython3 app.py\n```\n\n\n## Use cases and examples\n\nhttps://youtube.com/shorts/bN4xtTGXQhw?feature=share\n\n\n## Used datasets \n\nhttps://elib.bspu.by/bitstream/doc/4402/1/Условные обозначения по система Брайля_2010wm.pdf\n\n## Additional instrumentation and tools\n\nframeworks(OpenCV, Pillow, Aiogramm, Keras, Telegram Bot API)"
},
{
"alpha_fraction": 0.4926108419895172,
"alphanum_fraction": 0.6995074152946472,
"avg_line_length": 16,
"blob_id": "52ed175380da47c46a0a0301e7f3740f814f65d1",
"content_id": "86b4d349a78f0e66ffccf4540be89672997954df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 12,
"path": "/requirements.txt",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "torch~=1.9.0\npillow~=8.3.1\nnumpy~=1.21.2\nsetuptools~=52.0.0\nenvirons~=8.0.0\npymongo~=3.12.0\naiogram~=2.14.3\nwtforms~=2.3.3\nwerkzeug~=2.0.1\nalbumentations~=1.0.3\ntorchvision~=0.10.0\nopencv-python~=4.5.3.5"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6103093028068542,
"avg_line_length": 25.94444465637207,
"blob_id": "382463269736adb5cd6a73510162f741308686e0",
"content_id": "35c78846fadf65865ece3052e480d5ca84ecee11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 485,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 18,
"path": "/join_marked_txt.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\n\nsource_dir = Path(r'D:\\Programming.Data\\Braille\\My\\labeled\\ASI\\Student_Book\\56-61')\nout_file = 'all.txt'\nwrite_filename = True\n\nfiles = sorted(source_dir.glob('*.marked.txt'))\n\nwith (source_dir/out_file).open('w') as f:\n for fi in files:\n if write_filename:\n f.write(fi.name)\n f.write('\\n\\n')\n text = fi.read_text(encoding=\"utf-8\")\n f.write(text)\n f.write('\\n\\n')\n\nprint((source_dir/out_file).read_text())\n"
},
{
"alpha_fraction": 0.5794392228126526,
"alphanum_fraction": 0.5794392228126526,
"avg_line_length": 20.433332443237305,
"blob_id": "0c46405c3baae8433b34678bdce5f96286cea031",
"content_id": "8d585c5e4b6e4ef4cc5abb5bf073d6437eddbe23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 30,
"path": "/keyboards/default/MainMenu.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from aiogram.types import ReplyKeyboardMarkup, KeyboardButton\n\nset_lang_def = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(text=\"🔄Tilni tanlash\"),\n ],\n ],\n resize_keyboard=True,\n)\n\nmain_menu = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(text=\"📝Отправить картинку\"),\n KeyboardButton(text=\"↪️ Ortga qaytish\")\n ],\n ],\n resize_keyboard=True,\n)\n\nset_settings = ReplyKeyboardMarkup(\n keyboard=[\n [\n KeyboardButton(text=\"Braille -> Text\"),\n KeyboardButton(text=\"Text -> Braille\")\n ]\n ],\n resize_keyboard=True\n)"
},
{
"alpha_fraction": 0.8222222328186035,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 45,
"blob_id": "ed093f345e99ca275eee7a33f84ef0d8062bc2dd",
"content_id": "c110df1ff5d21fe2076e3a1cb7938a870e4158dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 1,
"path": "/keyboards/default/__init__.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from .MainMenu import main_menu, set_settings"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 21,
"blob_id": "4cc9e7e9ef17c230e849930bca461df45f4bf4fe",
"content_id": "50480f072914cf6b67b5181d5747f1aee0baf663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/data/__init__.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from . import mathpix\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 40,
"blob_id": "1aeddfb7881a5a1470fa2c4bd0b4573398c9e957",
"content_id": "2f665d964770cd34266b68c101c27884cd2ae648",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 1,
"path": "/utils/db_api/__init__.py",
"repo_name": "YoshlikMedia/Braille-reader-bot",
"src_encoding": "UTF-8",
"text": "from .mongo import users_db, profiles_db"
}
] | 32 |
davr/liblightstone | https://github.com/davr/liblightstone | bddb84e4bd0e4370728c9711c1ba58529f9c5d5c | a8676762d41f0d175920da400c07052fde4dae0d | 8e167dff32e186939c2627e2a260f147ddb883cf | refs/heads/master | 2021-01-16T23:00:53.077912 | 2012-03-06T17:58:55 | 2012-03-06T17:58:55 | 884,426 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6089321374893188,
"alphanum_fraction": 0.6149441599845886,
"avg_line_length": 36.559139251708984,
"blob_id": "31355caa5ab36a37248bee7c1829d0ac2760132a",
"content_id": "9fd74ba264a035f5f64d51ddc0e57740a94cdd03",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 3493,
"license_type": "permissive",
"max_line_length": 191,
"num_lines": 93,
"path": "/CMakeLists.txt",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "######################################################################################\n# CMake directives\n######################################################################################\n\n#Require 2.6 or higher.\nCMAKE_MINIMUM_REQUIRED(VERSION 2.6.0 FATAL_ERROR)\n\n#See if we've got our cmake modules checked out. If not, advise to do so.\nIF(EXISTS \"${CMAKE_SOURCE_DIR}/../compily_buildd/cmake\")\n SET(BUILDSYS_CMAKE_DIR \"${CMAKE_SOURCE_DIR}/../compily_buildd/cmake\")\n MESSAGE(STATUS \"Using outside compily_buildd directory ${BUILDSYS_CMAKE_DIR}\")\nELSEIF(EXISTS \"${CMAKE_SOURCE_DIR}/compily_buildd/cmake\")\n SET(BUILDSYS_CMAKE_DIR \"${CMAKE_SOURCE_DIR}/compily_buildd/cmake\")\n MESSAGE(STATUS \"Using compily_buildd git submodule ${BUILDSYS_CMAKE_DIR}\")\nELSE(EXISTS \"${CMAKE_SOURCE_DIR}/compily_buildd/cmake\")\n MESSAGE(FATAL_ERROR\n \"Cannot find compily_buildd directory for np labs project compilation functions.\\n\"\n \"Make sure you've either put the compily_buildd directory in the same root as your repository directory, or that you've used the compily_buildd submodule (git submodule update --init).\\n\"\n \"NP Labs build repository is available at git://github.com/qdot/compily_buildd.git\"\n )\nENDIF(EXISTS \"${CMAKE_SOURCE_DIR}/../compily_buildd/cmake\")\n\nLIST(APPEND CMAKE_MODULE_PATH ${BUILDSYS_CMAKE_DIR})\n\nINCLUDE( ${BUILDSYS_CMAKE_DIR}/BuildSysCMakeLib.cmake )\n\n#setting link directory policy\n\nIF(COMMAND cmake_policy)\n CMAKE_POLICY(SET CMP0003 NEW)\nENDIF(COMMAND cmake_policy)\n\n######################################################################################\n# Project declaration and options\n######################################################################################\n\n#Project declaration\n\nPROJECT(LIBLIGHTSTONE)\nINITIALIZE_BUILD()\n\n#Common Options\n\nOPTION_LIBRARY_BUILD_STATIC(ON)\nOPTION_LIBRARY_BUILD_SHARED(ON)\n\n#Project specific options\nOPTION(BUILD_EXAMPLES \"Build liblightstone examples\" ON)\n\n######################################################################################\n# Project specific package finding\n######################################################################################\n\nIF(NOT WIN32)\n FIND_PACKAGE(libusb-1.0 REQUIRED)\n INCLUDE_DIRECTORIES(${LIBUSB_1_INCLUDE_DIRS})\n SET(LIBLIGHTSTONE_DEP_LIBS ${LIBUSB_1_LIBRARIES})\nELSE()\n SET(WDK_PATH CACHE PATH \"Path to WDK Installation\")\n INCLUDE_DIRECTORIES(${WDK_PATH}/inc/api ${WDK_PATH}/inc/crt ${WDK_PATH}/inc)\n LINK_DIRECTORIES(${WDK_PATH}/lib/wxp/i386)\n SET(LIBLIGHTSTONE_DEP_LIBS \"hid.lib\" \"setupapi.lib\")\nENDIF()\n\n######################################################################################\n# Project specific globals\n######################################################################################\n\n#library definitions\n\nSET(LIBLIGHTSTONE_MAJOR_VERSION 1)\nSET(LIBLIGHTSTONE_MINOR_VERSION 5)\nSET(LIBLIGHTSTONE_BUILD_VERSION 0)\n\nINCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include)\n\nSET(LIBLIGHTSTONE_VERSION ${LIBLIGHTSTONE_MAJOR_VERSION}.${LIBLIGHTSTONE_MINOR_VERSION}.${LIBLIGHTSTONE_BUILD_VERSION})\n\n#library name definitions\n\nCREATE_LIBRARY_LINK_NAME(lightstone)\n\nINCLUDE_DIRECTORIES(${LIBLIGHTSTONE_INCLUDE_DIR})\nLINK_DIRECTORIES(${LIBRARY_OUTPUT_PATH})\n\n#If we build libusb staticly on apple, we need the proper frameworks\nIF(BUILD_STATIC AND APPLE)\n LIST(APPEND LIBLIGHTSTONE_DEP_LIBS \"-framework IOKit\" \"-framework CoreFoundation\")\nENDIF(BUILD_STATIC AND APPLE)\n\nADD_SUBDIRECTORY(include)\nADD_SUBDIRECTORY(src)\nADD_SUBDIRECTORY(examples)\n"
},
{
"alpha_fraction": 0.5435897707939148,
"alphanum_fraction": 0.5507692098617554,
"avg_line_length": 24,
"blob_id": "6aab042a025a2b34e5a8840b9cdaf02a2079a693",
"content_id": "86e3915dc3e0772bb2e729539fbd4a6df32f05d2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 975,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 39,
"path": "/src/CMakeLists.txt",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "######################################################################################\n# Build function for main library\n######################################################################################\n\nIF(NOT WIN32)\n INCLUDE_DIRECTORIES(.)\n SET(SRCS lightstone.c lightstone_libusb1.c)\n\n BUILDSYS_BUILD_LIB(\n NAME lightstone\n SOURCES \"${SRCS}\" \n CXX_FLAGS FALSE\n LINK_LIBS ${LIBLIGHTSTONE_DEP_LIBS}\n LINK_FLAGS FALSE\n DEPENDS FALSE\n SHOULD_INSTALL TRUE\n VERSION \"${LIBLIGHTSTONE_VERSION}\"\n )\nELSE()\n INCLUDE_DIRECTORIES(../libnputil/include/ .)\n SET(SRCS lightstone.c lightstone_win32.c)\n\n #SET(DEFS \"-DUSE_WIN32\")\n\n IF(BUILD_SHARED)\n LIST(APPEND DEFS \"-DLIGHTSTONE_DYNAMIC\")\n ENDIF()\n\n BUILDSYS_BUILD_LIB(\n NAME lightstone\n SOURCES \"${SRCS}\" \n CXX_FLAGS \"${DEFS}\"\n LINK_LIBS ${LIBLIGHTSTONE_DEP_LIBS}\n LINK_FLAGS FALSE\n DEPENDS FALSE\n SHOULD_INSTALL TRUE\n VERSION \"${LIBLIGHTSTONE_VERSION}\"\n )\nENDIF()\n"
},
{
"alpha_fraction": 0.6007533073425293,
"alphanum_fraction": 0.6205273270606995,
"avg_line_length": 19.423076629638672,
"blob_id": "b70d208458717259db4ab0401973ffd25e02cf09",
"content_id": "78439eb838047e3d99cec57ca88ecf0f3a2d5e85",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1062,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 52,
"path": "/examples/lightstone_test/lightstone_test.c",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "/***\n * @file lightstone_test.c\n * @brief Tests lightstone connection and communication\n * @author Kyle Machulis ([email protected])\n * @copyright (c) 2006-2011 Nonpolynomial Labs/Kyle Machulis\n * @license BSD License\n *\n * Project info at http://liblightstone.nonpolynomial.com/\n *\n */\n\n#include \"lightstone/lightstone.h\"\n#include <stdio.h>\n\nint main(int argc, char** argv)\n{\n\tlightstone* test = lightstone_create();\n\tlightstone_info r;\n\tint ret, count, i, j;\n\n\tcount = lightstone_get_count(test);\n\n\tif(!count)\n\t{\n\t\tprintf(\"No lightstones connected!\\n\");\n\t\treturn 1;\n\t}\n\tprintf(\"Found %d Lightstones\\n\", count);\n\tfor(i = 0; i < count; ++i)\n\t{\n\t\tret = lightstone_open(test, i);\n\t\tif(ret < 0)\n\t\t{\n\t\t\tprintf(\"Cannot open lightstone!\\n\");\n\t\t\treturn 1;\n\t\t}\n\t\tprintf(\"Opening lightstone %d\\n\", i + 1);\n\t\tfor(j = 0; j < 1000; ++j)\n\t\t{\n\t\t\tr = lightstone_get_info(test);\n\t\t\tif(r.hrv < 0) \n\t\t\t{\n\t\t\t\tprintf(\"Error reading lightstone, shutting down!\\n\");\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tprintf (\"%f %f\\n\", r.hrv, r.scl);\n\t\t}\n\t\tprintf(\"Closed lightstone %d\\n\", i + 1);\n\t}\n\tlightstone_delete(test);\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.4848156273365021,
"alphanum_fraction": 0.5075922012329102,
"avg_line_length": 29.72222137451172,
"blob_id": "3ed32117f93f6927b399638ff57b2ff79b50578b",
"content_id": "5fa8e47bf9e5c7560281ddf28f5ec22ad661c0cf",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2766,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 90,
"path": "/python/lightstone.py",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom contextlib import contextmanager, closing\nimport sys\nfrom libhid import *\nimport os\nimport re\n\nclass lightstone(object):\n # VID : PID\n LIGHTSTONE_ID_LIST = { 0x0483 : 0x0035, 0x14FA : 0x0001 } \n\n hrv = 0.0\n scl = 0.0\n hid = None\n rawMsg = None\n\n def __init__(self):\n self._init_usb()\n self.hid = hid_new_HIDInterface()\n return\n\n def _init_usb(self):\n ret = hid_init()\n if ret != HID_RET_SUCCESS:\n sys.stderr.write(\"hid_init failed with return code %d.\\n\" % ret)\n\n @classmethod\n def open(cls):\n l = lightstone()\n l._open()\n return l\n\n def _open(self):\n matcher = HIDInterfaceMatcher()\n matcher.vendor_id = 0x0483\n matcher.product_id = 0x0035\n\n ret = hid_force_open(self.hid, 0, matcher, 3)\n if ret != HID_RET_SUCCESS:\n sys.stderr.write(\"hid_force_open failed with return code %d.\\n\" % ret)\n return False\n\n @contextmanager\n def closing(self):\n self.close()\n\n def close(self):\n ret = hid_close(self.hid)\n if ret != HID_RET_SUCCESS:\n sys.stderr.write(\"hid_close failed with return code %d.\\n\" % ret)\n return False\n hid_cleanup()\n return True\n \n def get_data(self):\n ret = 0\n message_finished = False\n while not message_finished:\n InputStruct = hid_interrupt_read(self.hid,0x81,0x8,10); \n if InputStruct[0] != HID_RET_SUCCESS:\n continue\n InputReport = InputStruct[1]\n for msg_index in range(1, ord(InputReport[0]) + 1):\n if self.rawMsg is None and InputReport[msg_index] != '<':\n continue\n elif self.rawMsg is None:\n self.rawMsg = ''\n if InputReport[msg_index] != '\\r' and InputReport[msg_index] != '\\n':\n self.rawMsg += InputReport[msg_index]\n elif InputReport[msg_index] == '\\n':\n raw_re = re.compile(\"\\<RAW\\>(?P<scl>[0-9A-Fa-f]{4}) (?P<hrv>[0-9A-Fa-f]{4})\\<\\\\\\\\RAW\\>\")\n result = re.search(raw_re, self.rawMsg)\n if result:\n self.scl = int(result.group(\"scl\"), 16) * .01\n self.hrv = int(result.group(\"hrv\"), 16) * .001\n message_finished = True;\n self.rawMsg = None\n \ndef main(argv = None):\n with closing(lightstone.open()) as l:\n try:\n while(1):\n l.get_data()\n print \"%f %f\" % (l.scl, l.hrv)\n except KeyboardInterrupt, e:\n print \"Exiting\"\n\nif __name__ == '__main__':\n sys.exit(main())\n\n"
},
{
"alpha_fraction": 0.40625,
"alphanum_fraction": 0.40625,
"avg_line_length": 57.66666793823242,
"blob_id": "c762c547f03bbf1fec3e8b1065ab2bb816732ae5",
"content_id": "be9887ad676705916eaa1c9888070cdaaad33b25",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 352,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 6,
"path": "/include/CMakeLists.txt",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "######################################################################################\n# Installation of headers\n######################################################################################\n\n#Only install one of the comm headers\nINSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/lightstone/lightstone.h DESTINATION ${INCLUDE_INSTALL_DIR}/lightstone)\n"
},
{
"alpha_fraction": 0.4809619188308716,
"alphanum_fraction": 0.4809619188308716,
"avg_line_length": 30.1875,
"blob_id": "8f54e85f16a4b426058fbbd8f7f5e24ffb69c259",
"content_id": "ee7e9b896d8a68765308e9913a065a3304c3e0e8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 499,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 16,
"path": "/examples/CMakeLists.txt",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "######################################################################################\n# Build function for liblightstone_test\n######################################################################################\n\nSET(SRCS lightstone_test/lightstone_test.c)\nSET(LIBS ${liblightstone_LIBRARY} ${LIBLIGHTSTONE_DEP_LIBS})\n\nBUILDSYS_BUILD_EXE(\n NAME lightstone_test\n SOURCES \"${SRCS}\" \n CXX_FLAGS FALSE\n LINK_LIBS \"${LIBS}\"\n LINK_FLAGS FALSE \n DEPENDS lightstone_DEPEND\n SHOULD_INSTALL TRUE\n )\n"
},
{
"alpha_fraction": 0.63231360912323,
"alphanum_fraction": 0.645975649356842,
"avg_line_length": 19.04166603088379,
"blob_id": "ff235d81e0d687762020fcafb368fd2e0723d8b0",
"content_id": "54dbee5ae603bd87cd56b959076937d8736d296b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3367,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 168,
"path": "/src/lightstone_libusb1.c",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "/***\n * @file lightstone_libusb1.c\n * @brief LibUSB based implementation of lightstone communication\n * @author Kyle Machulis ([email protected])\n * @copyright (c) 2006-2011 Nonpolynomial Labs/Kyle Machulis\n * @license BSD License\n *\n * Project info at http://liblightstone.nonpolynomial.com/\n *\n */\n\n\n#include <stdlib.h>\n#include \"lightstone/lightstone.h\"\n\n\nLIGHTSTONE_DECLSPEC lightstone* lightstone_create()\n{\n\tlightstone* s = (lightstone*)malloc(sizeof(lightstone));\n\ts->_is_open = 0;\n\ts->_is_inited = 0;\n\tif(libusb_init(&s->_context) < 0)\n\t{\n\t\treturn NULL;\n\t}\n\ts->_is_inited = 1;\t\n\treturn s;\n}\n\nLIGHTSTONE_DECLSPEC int lightstone_get_count(lightstone* s)\n{\n\tstruct libusb_device **devs;\n\tstruct libusb_device *found = NULL;\n\tstruct libusb_device *dev;\n\tsize_t i = 0;\n\tint j = 0;\n\tint count = 0;\n\n\tif (!s->_is_inited)\n\t{\n\t\treturn E_LIGHTSTONE_NOT_INITED;\n\t}\n\t\n\tif (libusb_get_device_list(s->_context, &devs) < 0)\n\t{\n\t\treturn E_LIGHTSTONE_DRIVER_ERROR;\n\t}\n\n\twhile ((dev = devs[i++]) != NULL)\n\t{\n\t\tstruct libusb_device_descriptor desc;\n\t\tint dev_error_code;\n\t\tdev_error_code = libusb_get_device_descriptor(dev, &desc);\n\t\tif (dev_error_code < 0)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\tfor(j = 0; j < LIGHTSTONE_VID_PID_PAIRS_COUNT; ++j)\n\t\t{\n\t\t\tif (desc.idVendor == lightstone_vid_pid_pairs[j][0] && desc.idProduct == lightstone_vid_pid_pairs[j][1])\n\t\t\t{\n\t\t\t\t++count;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\tlibusb_free_device_list(devs, 1);\n\treturn count;\n}\n\nLIGHTSTONE_DECLSPEC int lightstone_open(lightstone* s, unsigned int device_index)\n{\n\tint ret;\n\tstruct libusb_device **devs;\n\tstruct libusb_device *found = NULL;\n\tstruct libusb_device *dev;\n\tsize_t i = 0;\n\tint j = 0;\n\tint count = 0;\n\tint device_error_code = 0;\n\n\tif (!s->_is_inited)\n\t{\n\t\treturn E_LIGHTSTONE_NOT_INITED;\n\t}\n\n\tif ((device_error_code = libusb_get_device_list(s->_context, &devs)) < 0)\n\t{\n\t\treturn E_LIGHTSTONE_DRIVER_ERROR;\n\t}\n\n\twhile ((dev = devs[i++]) != NULL)\n\t{\n\t\tstruct libusb_device_descriptor desc;\n\t\tdevice_error_code = libusb_get_device_descriptor(dev, &desc);\n\t\tif (device_error_code < 0)\n\t\t{\n\t\t\tlibusb_free_device_list(devs, 1);\n\t\t\treturn E_LIGHTSTONE_NOT_INITED;\n\t\t}\n\t\tfor(j = 0; j < LIGHTSTONE_VID_PID_PAIRS_COUNT; ++j)\n\t\t{\n\t\t\tif (desc.idVendor == lightstone_vid_pid_pairs[j][0] && desc.idProduct == lightstone_vid_pid_pairs[j][1])\n\t\t\t{\n\t\t\t\tif(count == device_index)\n\t\t\t\t{\n\t\t\t\t\tfound = dev;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t++count;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif(found) break;\n\t\t}\n\t\tif(found) break;\n\t}\n\n\tif (found)\n\t{\n\t\tdevice_error_code = libusb_open(found, &s->_device);\n\t\tif (device_error_code < 0)\n\t\t{\n\t\t\tlibusb_free_device_list(devs, 1);\n\t\t\treturn E_LIGHTSTONE_NOT_INITED;\n\t\t}\n\t}\n\telse\n\t{\n\t\treturn E_LIGHTSTONE_NOT_INITED;\t\t\n\t}\n\ts->_is_open = 1;\n\n\tif(libusb_kernel_driver_active(s->_device, 0))\n\t{\n\t\tlibusb_detach_kernel_driver(s->_device, 0);\n\t}\n\tret = libusb_claim_interface(s->_device, 0);\n\n\treturn ret;\n}\n\nLIGHTSTONE_DECLSPEC int lightstone_close(lightstone* s)\n{\n\tif(!s->_is_open)\n\t{\n\t\treturn E_LIGHTSTONE_NOT_OPENED;\n\t}\n\tif (libusb_release_interface(s->_device, 0) < 0)\n\t{\n\t\treturn E_LIGHTSTONE_NOT_INITED;\t\t\t\t\n\t}\n\tlibusb_close(s->_device);\n\ts->_is_open = 0;\n\treturn 0;\n}\n\nLIGHTSTONE_DECLSPEC void lightstone_delete(lightstone* dev)\n{\n\tfree(dev);\n}\n\nint lightstone_read(lightstone* dev, unsigned char* input_report)\n{\n\tint trans;\n\tint ret = libusb_bulk_transfer(dev->_device, LIGHTSTONE_IN_ENDPT, input_report, 8, &trans, 0x10);\n\treturn trans;\n}\n"
},
{
"alpha_fraction": 0.7612144351005554,
"alphanum_fraction": 0.7683260440826416,
"avg_line_length": 39.17582321166992,
"blob_id": "0859e05ed7157f2d9f8311a825b06f25509e8134",
"content_id": "b2a3420e59f50e9c2977dc40298ac8459d2d853e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "AsciiDoc",
"length_bytes": 3656,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 91,
"path": "/README.asciidoc",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "= liblightstone =\n\nby Kyle Machulis <[email protected]>\n\nNonpolynomial Labs - http://www.nonpolynomial.com\n\n== Description ==\n\nThis is a C-based development library for the Lightstone, a Biometric\nUSB device distributed with the Journey to Wild Divine Relaxation\nSoftware (http://www.wilddivine.com)\n\nliblightstone is released under the BSD License.\n\n== Package Information ==\n\nProject Information @ http://liblightstone.nonpolynomial.com\n\nSource repo @ http://www.github.com/qdot/liblightstone\n\nReleases @ http://www.sourceforge.net/projects/liblightstone\n\n== A Tale of Two Libraries ==\n\nliblightstone comes in two flavors: Win32 and libusb-1.0. The Win32\nversion uses direct Win32 DDK calls to access the windows HID system,\nand reads raw reports from there. libusb-1.0 works for all platforms\ncurrently supporting the library. As of this writing (late December\n2009), the libusb-1.0 version of liblightstone has been tested on OS X\nand linux.\n\n== Library Requirements (For Compilation) ==\n\n\n- CMake (Required on all platforms) - http://www.cmake.org\n- WDK (Windows Only) - http://www.microsoft.com/whdc/devtools/WDK/default.mspx\n- libusb-1.0 (All non-windows platforms) - http://www.libusb.org\n\n== Build Notes ==\n\nTo build liblightstone, make a subdirectory in the source dir (called\nwhatever you want), then go into that directory and run\n\"cmake ..\". This will generate the project files for your platform\n(makefiles for OS X and linux, visual studio for windows). For more\ninformation on project generators, look at the cmake documentation.\n\n== Notes on Compilation and Usage ==\n\n=== Mac OS X ===\n\nAssuming you have not installed the Journey to Wild Divine software\nand are not installing the dmg version of liblightstone, you will need\nto install the \"Lightstone OS X Extension\" in order to use the\nlightstone on OS X. \n\nSimply copy the LightstoneNullDriver.kext bundle included with the\npackage to the /System/Library/Extensions directory and reboot.\n\n=== Linux ===\n\nThe VID/PID pair of the lightstone will need to be blacklisted on\nlinux in order to use liblightstone there.\n\n== License ==\n\n---------------------\nCopyright (c) 2007-2011, Kyle Machulis/Nonpolynomial Labs\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of the Kyle Machulis/Nonpolynomial Labs nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY Kyle Machulis/Nonpolynomial Labs ''AS IS'' AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL Kyle Machulis/Nonpolynomial Labs BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\n---------------------\n"
},
{
"alpha_fraction": 0.5277073383331299,
"alphanum_fraction": 0.563020646572113,
"avg_line_length": 26.412370681762695,
"blob_id": "2b40e543ff87405b0a8b0df5076262b90dbd88b2",
"content_id": "750e978e7dfc8fd406974670d2746d7afd623aed",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5522,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 194,
"path": "/python/test.py",
"repo_name": "davr/liblightstone",
"src_encoding": "UTF-8",
"text": "import ctypes, pygame, sys\r\nfrom pygame.locals import *\r\nfrom ctypes import *\r\n\r\n# Contains lightstone device readings\r\nclass lightstone_info(Structure):\r\n _fields_ = [(\"hrv\", c_float), (\"scl\", c_float)]\r\n\r\nclass lightstone():\r\n def __init__(self):\r\n\r\n # Open DLL\r\n self.dll = ctypes.CDLL(\"d:\\\\lightstone\\py\\lightstone.dll\")\r\n\r\n # Define argument types & return types for all methods\r\n self._create = self.dll.lightstone_create\r\n self._create.argtypes = []\r\n self._create.restype = c_void_p\r\n\r\n self._delete = self.dll.lightstone_delete\r\n self._delete.argtypes = [c_void_p] \r\n \r\n self._count = self.dll.lightstone_get_count\r\n self._count.restype = c_int\r\n self._count.argtypes = [c_void_p]\r\n\r\n self._open = self.dll.lightstone_open\r\n self._open.restype = c_int\r\n self._open.argtypes = [c_void_p, c_uint]\r\n\r\n self._get_info = self.dll.lightstone_get_info\r\n self._get_info.restype = lightstone_info\r\n self._get_info.argtypes = [c_void_p]\r\n\r\n self._valid = self.dll.lightstone_valid\r\n self._valid.restype = c_int\r\n self._valid.argtypes = [c_void_p]\r\n\r\n self._close = self.dll.lightstone_close\r\n self._close.restype = c_int\r\n self._close.argtypes = [c_void_p]\r\n\r\n # Call lightstone_create\r\n self.ptr = self._create()\r\n\r\n # Destructor, calls lightstone_delete\r\n def __del__(self):\r\n self._delete(self.ptr) \r\n\r\n # Return count of lightstone devices\r\n def count(self):\r\n return self._count(self.ptr)\r\n\r\n # Open given lightstone device\r\n def open(self, idx):\r\n return self._open(self.ptr, idx)\r\n\r\n # Return device readings\r\n def get_info(self):\r\n return self._get_info(self.ptr)\r\n\r\n # Returns > 0 if device is initalized, 0 otherwise\r\n def valid(self):\r\n return self._valid(self.ptr)\r\n\r\n # Close opened lightstone device\r\n def close(self):\r\n return self._close(self.ptr)\r\n\r\nWID=1500\r\nHGT=480\r\npygame.init()\r\nfpsClock = pygame.time.Clock()\r\nwindowSurfaceObj = pygame.display.set_mode((WID,HGT))\r\npygame.display.set_caption('Lightstone')\r\nred = pygame.Color(255,0,0)\r\nblue = pygame.Color(0,0,255)\r\nwhite = pygame.Color(255,255,255)\r\ngrey = pygame.Color(200,200,200)\r\n \r\nstone = lightstone()\r\nprint \"Counted lightstones: \"+str(stone.count())\r\nprint \"Opening lightstone: \"\r\nres = stone.open(0)\r\nif res == 0:\r\n print \"Success!\"\r\nelse:\r\n print \"Failure: \"+str(res)\r\n\r\nval = val2 = stone.get_info()\r\nx2 = 0\r\nx = 1\r\n\r\nwindowSurfaceObj.fill(white)\r\n\r\nfont = pygame.font.Font('freesansbold.ttf', 16)\r\n\r\nhearts = list()\r\nrate=0\r\navgbeat = 0\r\n\r\nfor i in range(200):\r\n hearts.append((i/1000, i))\r\n\r\nprint [xx[0] for xx in hearts[i-3:i+3]]\r\n#sys.exit(0)\r\nx=0\r\nirate=irate2=0\r\nrate2=0\r\nFWD=14\r\nREV=14\r\nwhile True:\r\n \r\n val = stone.get_info()\r\n while val.hrv == 0.0:\r\n val = stone.get_info()\r\n\r\n hearts.append((val.hrv, pygame.time.get_ticks()))\r\n s = 0\r\n for i in hearts:\r\n s = s + i[0]\r\n avg = s / len(hearts)\r\n #avg = sum(hearts) / len(hearts)\r\n t0 = 0\r\n beats = list()\r\n for i in range(REV, len(hearts)-FWD):\r\n v = hearts[i][0]\r\n t = hearts[i][1]\r\n if v > avg and v >= max([xx[0] for xx in hearts[i-REV:i+FWD]]):\r\n if t0 > 0:\r\n beats.append(t-t0)\r\n t0 = t\r\n \r\n if len(beats) > 0 and sum(beats) > 0:\r\n weights = list()\r\n for i in range(len(beats)):\r\n weights.append(i)\r\n top = sum(beats[ii] * weights[ii] for ii in range(len(beats)))\r\n bot = sum(weights[ii] for ii in range(len(beats)))\r\n print str(top)+\" / \"+str(bot)\r\n avgbeat = float(top) / float(bot) if bot != 0 else 0\r\n rate = 60000.0 / float(avgbeat) if avgbeat != 0 else 0\r\n \r\n hearts.pop(0)\r\n\r\n pygame.draw.rect(windowSurfaceObj, white, (20, 10, WID, 50))\r\n \r\n msg = font.render(str(round(rate)), False, red)\r\n msgRect = msg.get_rect()\r\n msgRect.topleft = (20, 10)\r\n windowSurfaceObj.blit(msg, msgRect)\r\n\r\n msg = font.render(str(round(irate)), False, grey)\r\n msgRect = msg.get_rect()\r\n msgRect.topleft = (80, 10)\r\n windowSurfaceObj.blit(msg, msgRect)\r\n\r\n\r\n i = len(hearts) - 15\r\n\r\n irate = 60000.0 / float(beats[len(beats)-1]) if len(beats)>0 else 0\r\n \r\n pygame.draw.line(windowSurfaceObj, blue, (x-1, 480-hearts[i-1][0]*100), (x, 480-hearts[i][0]*100))\r\n\r\n pygame.draw.line(windowSurfaceObj, grey, (x2-1, 200-irate2), (x2, 200-irate))\r\n pygame.draw.line(windowSurfaceObj, red, (x2-1, 200-rate2), (x2, 200-rate))\r\n rate2 = rate\r\n irate2= irate\r\n\r\n v = hearts[i][0]\r\n if v > avg and v >= max([xx[0] for xx in hearts[i-REV:i+FWD]]):\r\n pygame.draw.rect(windowSurfaceObj, red, (x-2, 480-(hearts[i][0]*100)-2, 4, 4)) \r\n\r\n print str(v)+\" / \"+str(avg)+\" M: \"+str(max([xx[0] for xx in hearts[i-REV:i+FWD]]))+\" B: \"+str(beats[len(beats)-1] if len(beats)>0 else 0)\r\n\r\n val2 = val\r\n \r\n x = x+1\r\n if(x>=WID):\r\n pygame.draw.rect(windowSurfaceObj, white, (0, 170,WID,HGT))\r\n x = 1\r\n\r\n x2 = x2 + 0.25\r\n if(x2>=WID):\r\n pygame.draw.rect(windowSurfaceObj, white, (0, 0, WID,170))\r\n x2 = 1\r\n\r\n for e in pygame.event.get():\r\n if e.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n pygame.display.update()\r\n fpsClock.tick(30)\r\n \r\n"
}
] | 9 |
luiz158/FluentPython | https://github.com/luiz158/FluentPython | d74454f79693c57fd8deeea7b23d57866fa2d8b1 | 06d9a5b4440418fee0e2a4a280deb7d08c9d38e3 | 684f79ad6bb3ee4222b432d45957810a673b0ed0 | refs/heads/master | 2022-03-28T07:52:01.321696 | 2020-01-11T12:07:00 | 2020-01-11T12:07:00 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.58980393409729,
"alphanum_fraction": 0.5984313488006592,
"avg_line_length": 24.459999084472656,
"blob_id": "cb58673d4032d0149a06c84187d1482aca317c39",
"content_id": "f61428d95a8a7d295a3529a53bc7a034faff43ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1491,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 50,
"path": "/chapter03/03-04.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# setdefault를 대신해서 defaultdict를 사용합시다.\n# defaultdict는 존재하지 않은 키로 검색할 때 요청에 따라\n# 자동으로 항목을 생성하도록 설정되어 있습니다.\n\nimport sys\nimport re\nimport collections\n\nWORD_RE = re.compile(r'\\w+')\nindex = collections.defaultdict(list) # 없는 키를 검색하면 list를 생성함\nwith open('zen.txt', encoding='utf-8') as fp:\n for line_no, line in enumerate(fp, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n index[word].append(location)\n\n\nfor word in sorted(index, key=str.upper):\n print(word, index[word])\n\n\nclass StrKeyDict(dict):\n\n def __missing__(self, key):\n # KeyError가 발생하면 __missing__을 실행합니다\n # key가 str인데 __missing__이라면 원래 없는 키이니 KeyError를 발생하고\n if isinstance(key, str):\n raise KeyError(key)\n # key가 str이 아니면 str으로 바꿔서 다시 한번 시도한다\n return self[str(key)]\n\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n\n def __contains__(self, key):\n return key in self.keys() or str(key) in self.keys()\n\n\nd = StrKeyDict([('2', 'two'), ('4', 'four')])\nprint(d['2'])\nprint(d[4])\nprint(d.get('2'))\nprint(d.get(4))\nprint(2 in d)\nprint(4 in d)\n\n\n"
},
{
"alpha_fraction": 0.6039823293685913,
"alphanum_fraction": 0.6183628439903259,
"avg_line_length": 17.367347717285156,
"blob_id": "ee418eb67a9c54745a21b0515d08908dbc517685",
"content_id": "68859d4213bf907b72bef583fdcdda2de331fe52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1190,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 49,
"path": "/chapter07/07-05.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n클로저 is not equal to 익명함수!\n\n클로저는 함수본체에서 정의핮 않고 참조하는 비전역변수를 포함한 확장범위를 가진 함수이다.\n함수 본체 외부에 정의되 ㄴ비전역변수에 접근할 수 잇다는 것이 중요하다.\n\n\"\"\"\nclass Average():\n\n def __init__(self):\n self.series = []\n\n def __call__(self, new_value):\n self.series.append(new_value)\n total = sum(self.series)\n return total / len(self.series)\n\navg = Average()\nprint(avg(10))\nprint(avg(11))\nprint(avg(12))\n\ndef make_averager():\n series = []\n def averager(new_value):\n series.append(new_value)\n total = sum(series)\n return total / len(series)\n\n return averager\n\navg = make_averager()\nprint(avg(10))\nprint(avg(11))\nprint(avg(12))\n\n\"\"\"\nseries는 자유변수: 지역 범위에 바인딩되어 있지 않은 변수를 말한다\n\"\"\"\n\nprint(avg.__code__.co_varnames)\nprint(avg.__code__.co_freevars)\nprint(avg.__closure__)\nprint(avg.__closure__[0].cell_contents)\n\n\"\"\"\n함수가 비전역 외부 변수를 다루는 경우는 그 함수가 다른 함수 안에 정의된\n경우뿐이라는 점에 주의하라.\n\"\"\"\n\n\n\n\n"
},
{
"alpha_fraction": 0.7408638000488281,
"alphanum_fraction": 0.7441860437393188,
"avg_line_length": 26.363636016845703,
"blob_id": "9a0a10e408e9359fca081bb7a54480429ce06a5f",
"content_id": "2d2793a4e90a573db776431944bd60886b4bd2b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 11,
"path": "/chapter03/03-05.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# collections.OrderedDict -> key가 입력된 순서대로 항목을 반복하는 순서를 예측할 수 있다\n# collections.ChainMap -> 매핑의 목록을 담고 있다\n# collections.Counter -> 모든 키가 각각 정수형 카운터가 있어서 기존 기를 갱신하면 카운터가 늘어난다\n\nimport collections\nct = collections.Counter('abracadabra')\nprint(ct)\n\nct.update('aaaaazzz')\nprint(ct)\nprint(ct.most_common(2))\n"
},
{
"alpha_fraction": 0.5668604373931885,
"alphanum_fraction": 0.5726743936538696,
"avg_line_length": 25.6842098236084,
"blob_id": "fde9f12f4b56a3ffb2d08d76d70c3787df5b9bcf",
"content_id": "e0e90dc09151924969ef12016d21584664fec0ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1088,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 38,
"path": "/chapter05/05-08.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "import bobo\n\[email protected]('/')\ndef hello(person):\n return 'Hello %s' % person\n\ndef clip(text, max_len=80):\n \"\"\"\n max_len 앞이나 뒤의 마지막 공백에서 잘라낸 텍스트를 반환한다.\n \"\"\"\n end = None\n if len(text) > max_len:\n # text.rfind returns index of sub from right side of text\n # rfind(sub, start, end)\n space_before = text.rfind(' ', 0, max_len)\n if space_before >= 0: end = space_before\n else:\n space_after = text.rfind(' ', 0, max_len)\n if space_after >= 0:\n end = space_after\n if end is None: # 공백이 없다.\n end = len(text)\n return text[:end].rstrip()\n\n\nprint(clip.__defaults__)\nprint(clip.__code__)\nprint(clip.__code__.co_varnames)\n\nfrom inspect import signature\nsig = signature(clip)\nprint(sig)\nfor name, param in sig.parameters.items():\n print(param.kind, ':', name, '=', param.default)\n\nmy_tag = {'name': 'img', 'title': 'Sunset Boulevard', 'src': 'sunset.jpg', 'cls': 'framed'}\nbound_args = sig.bind(**my_tag)\nprint(bound_args)\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6138952374458313,
"alphanum_fraction": 0.6400911211967468,
"avg_line_length": 30.35714340209961,
"blob_id": "b0189f7f1ab6c4ea68ab534e14e36c1529e7e1da",
"content_id": "cacc64bd532b234324e252b0ad1fc9eea13623f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 28,
"path": "/chapter04/04-09.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "import re\nre_numbers_str = re.compile(r'\\d+')\nre_words_str = re.compile(r'\\w+')\nre_numbers_byte = re.compile(rb'\\d+')\nre_words_byte = re.compile(rb'\\w+')\n\ntext_str = text_str = (\"Ramanujan saw \\u0be7\\u0bed\\u0be8\\u0bef\" # <3>\n \" as 1729 = 1³ + 12³ = 9³ + 10³.\")\nprint(text_str)\ntext_bytes = text_str.encode('utf_8')\nprint(text_bytes)\n\nprint('Text', repr(text_str), sep='\\n ')\nprint('Numbers')\nprint(' str :', re_numbers_str.findall(text_str))\nprint(' bytes :', re_numbers_byte.findall(text_bytes))\nprint('Words')\nprint(' str :', re_words_str.findall(text_str))\nprint(' bytes :', re_words_byte.findall(text_bytes))\n\nimport os\nprint(os.listdir('.'))\nprint(os.listdir(b'.'))\n\npi_name_bytes = os.listdir(b'.')[1]\npi_name_str = pi_name_bytes.decode('ascii', 'surrogateescape')\nprint(pi_name_str)\nprint(pi_name_str.encode('ascii', 'surrogateescape'))\n"
},
{
"alpha_fraction": 0.5954670310020447,
"alphanum_fraction": 0.6112637519836426,
"avg_line_length": 18.58108139038086,
"blob_id": "2d210c0335d0676d8ada5032556d496de804bf2a",
"content_id": "8bf432e2e6c93492ddd08d6fcf5a69d46f679319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1794,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 74,
"path": "/chapter08/08-04.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# 파이썬 함수의 매개변수는 call by sharing으로 전달됩니다.\n# 다른 말로 하면 함수 안의 매개변수는 실제 인수의 별명이다.\n\ndef f(a, b):\n a += b\n return a # 이렇게 하면 새로 정의된 함수의 reference를 return 하는 것임\n\nx = 1\ny = 2\nprint(f(x, y))\nprint(x, y)\n\na = [1, 2]\nb = [3, 4]\nprint(f(a, b))\nprint(a, b)\n\nt = (10, 20)\nu = (30, 40)\nprint(f(t, u))\nprint(t, u)\n\nclass HauntedBus:\n\n def __init__(self, passengers=[]):\n self.passengers = passengers\n\n def pick(self, name):\n self.passengers.append(name)\n\n def drop(self, name):\n self.passengers.remove(name)\n\n def __repr__(self):\n return '<Bus {}>'.format(repr(self.passengers))\n\n\nbus1 = HauntedBus(['Alice', 'Bill'])\nprint(bus1)\nbus1.pick('Charlie')\nprint(bus1)\n\nbus2 = HauntedBus()\nbus2.pick('Charlie')\nprint(bus2)\n\n# 함수가 로드될 때 []의 참조가 기본값으로 정해지고 그 참조가 계속 사용된다.\nbus3 = HauntedBus()\nprint(bus3)\n\n# 가변값을 받는 매개변수의 기본값으로 None을 사용하는 것이 좋다.\n# None이 아닌 경우 인수의 사본을 오브젝트에 저장하는 것이 좋다.\n\n\nclass TwilightBus:\n\n def __init__(self, passengers=None):\n if passengers is None:\n self.passengers = [] # 늘 새로운 []을 사용한다.\n else:\n self.passengers = list(passengers) # 모든 반복 가능한 객체를 받으므로 list를 사용할 수 있다.\n\n def pick(self, name):\n self.passengers.append(name)\n\n def drop(self, name):\n self.passengers.remove(name)\n\nbasketball_team = ['Sue', 'Tina', 'Maya', 'Diana', 'Pat']\nbus = TwilightBus(basketball_team)\nbus.drop('Tina')\nbus.drop('Pat')\nprint(basketball_team)\nprint(bus.passengers)\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6501429677009583,
"alphanum_fraction": 0.6577693223953247,
"avg_line_length": 21.826086044311523,
"blob_id": "95d4646e8e8fbf2242d057629446ef46104fd267",
"content_id": "2f79dc6a08c6f20685a710c60d6a9df1e4929d0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1325,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 46,
"path": "/chapter02/02-02.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "symbols = '$¢£¥€¤'\ncodes = []\nfor symbol in symbols:\n codes.append(ord(symbol))\n\nprint(codes)\n\ncodes = [ord(symbol) for symbol in symbols]\nprint(codes)\n\nbeyond_ascii = [ord(s) for s in symbols if ord(s) > 127]\nprint(beyond_ascii)\n\nbeyond_ascii = list(filter(lambda c: c > 127, map(ord, symbols)))\nprint(beyond_ascii)\n\ncolors = ['black', 'white']\nsizes = ['S', 'M', 'L']\ntshrits = [(color, size) for color in colors for size in sizes]\nprint(tshrits)\n\nfor color in colors:\n for size in sizes:\n print((color, size))\n\n'''\ngenerator expression을 사용하면 list나 tuple을 만들 필요없이\n다음 값을 하나씩 만들 수 있어 list전체를 만드는데 필요한 메모리를\n절약할 수 있습니다.\n'''\n\nprint(tuple(ord(symbol) for symbol in symbols))\nimport array\n\n# 배열생성자는 인수를 두 개 받으며 제너레이터 표현식 앞뒤에 반드시 괄호를\n# 넣어야 한다.\nprint(array.array('I', (ord(symbol) for symbol in symbols)))\n\n\n# 예제 2-6 제너레이터 표현식에서의 데카르트 곱\ncolors = ['black', 'white']\nsizes = ['S', 'M', 'L']\nfor tshrits in ('{} {}'.format(c, s) for c in colors for s in sizes):\n # generator는 괄호로 표현됩니다\n # 한 번에 하나씩 항목을 생성하며 리스트를 생성하지 않습니다\n print(tshrits)"
},
{
"alpha_fraction": 0.5852915644645691,
"alphanum_fraction": 0.6436031460762024,
"avg_line_length": 23.967391967773438,
"blob_id": "953a5d66a8cf054849324b7bed0b23e119e19279",
"content_id": "01f97e9c362f0c9344db2cbdf62d5b15a2cafcff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2327,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 92,
"path": "/chapter05/05-10.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "from functools import reduce\nfrom operator import mul\n\n\ndef fact(n):\n return reduce(lambda a, b: a * b, range(1, n + 1))\n\n\ndef fact2(n):\n return reduce(mul, range(1, n+1))\n\n\nmetro_areas = [\n ('Tokyo', 'JP', 36.933, (35.689722, 139.691667)), # <1>\n ('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),\n ('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),\n ('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),\n ('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),\n]\n\nfrom operator import itemgetter\n\ncc_name = itemgetter(1, 0)\n\nfor city in sorted(metro_areas, key=itemgetter(1)):\n print(city)\n\nfor city in metro_areas:\n print(cc_name(city))\n\nfrom collections import namedtuple\nLatLong = namedtuple('LatLong', 'lat long')\nMetropolis = namedtuple('Metropolis', 'name cc pop coord')\nmetro_areas = [Metropolis(name, cc, pop, LatLong(lat, long)) for name, cc, pop, (lat, long) in metro_areas]\nprint(metro_areas[0])\nprint(metro_areas[0].coord.lat)\n\nfrom operator import attrgetter\n\nname_lat = attrgetter('name', 'coord.lat')\nfor city in sorted(metro_areas, key=attrgetter('coord.lat')):\n print(name_lat(city))\n\nfrom operator import methodcaller\ns = 'The time has come'\nupcase = methodcaller('upper') # str의 upper method\nprint(upcase(s))\n\nhiphenate = methodcaller('replace', ' ', '-')\nprint(hiphenate(s)) # str의 replace method\n\nprint(s.replace(' ', '-'))\n\nfrom operator import mul\nfrom functools import partial\ntriple = partial(mul, 3)\nprint(triple(7))\n\nprint(list(map(triple, range(1, 10))))\n\nimport unicodedata, functools\nnfc = partial(unicodedata.normalize, 'NFC')\ns1 = 'café'\ns2 = 'cafe\\u0301'\n\nprint(s1, s2)\nprint(s1 == s2)\nprint(nfc(s1) == nfc(s2))\n\n\ndef tag(name, *content, cls=None, **attrs):\n '''하나 이상의 HTML 태그를 생성한다.'''\n if cls is not None:\n attrs['class'] = cls\n if attrs:\n attr_str = ''.join(' %s=\"%s\"' % (attr, value) for attr, value in sorted(attrs.items()))\n else:\n attr_str = ''\n if content:\n return '\\n'.join('<%s%s>%s</%s>' % (name, attr_str, c, name) for c in content)\n\n else:\n return '<%s%s />' % (name, attr_str)\n\n\nprint(tag)\npicture = partial(tag, 'img', cls='pic-frame')\nprint(picture(src='wumpus.jpeg'))\nprint(picture)\nprint(picture.func)\nprint(picture.args)\nprint(picture.keywords )\n\n"
},
{
"alpha_fraction": 0.49485597014427185,
"alphanum_fraction": 0.5493826866149902,
"avg_line_length": 18.019607543945312,
"blob_id": "39e94c487d59906c335da4c585d51c84566dbf44",
"content_id": "e790199a7ed1c2f9ca1b1e43bd22c399ffbdefc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1076,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 51,
"path": "/chapter02/02-04.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "import collections\n\nl = [10, 20, 30, 40, 50, 60]\nprint(l[:2]) # 마지막 인덱스 다음 혹은 개수 혹은 2번 인덱스에서 분할\nprint(l[2:])\nprint(l[:3])\nprint(l[3:])\n\n# s[a:b:c] -> a에서 시작해 b까지 c보폭만큼\ns = 'bicycle'\nprint(s[::3])\nprint(s[::-1])\nprint(s[::-2])\n\nCard = collections.namedtuple('Card', ['rank', 'suit'])\n\nclass FrenchDeck:\n ranks = [str(n) for n in range(2, 11)] + list('JQKA')\n suits = 'spades diamonds clubs hearts'.split()\n\n def __init__(self):\n self._cards = [Card(rank, suit) for suit in self.suits\n for rank in self.ranks]\n\n def __len__(self):\n return len(self._cards)\n\n def __getitem__(self, position):\n return self._cards[position]\n\ndeck = FrenchDeck()\nprint(deck[12::13])\n\n# 다차원 슬라이싱을 할 때 __getitem__()은\n# tuple을 인풋으로 받는다\n\nl = list(range(10))\nprint(l)\nl[2:5] = [20, 30]\nprint(l)\n\ndel l[5:7]\nprint(l)\nl[3::2] = [11, 22]\ntry:\n l[2:5] = 100\n print(l)\nexcept:\n print(l)\n l[2:5] = [100]\n print(l)\n\n\n"
},
{
"alpha_fraction": 0.6455696225166321,
"alphanum_fraction": 0.6518987417221069,
"avg_line_length": 38.5,
"blob_id": "4ad5b5c1579d1a55d7debc916c05351d9c14a772",
"content_id": "fef57b1af6460f590cfc559bbd2ab2de11f56df6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 4,
"path": "/chapter05/05-03.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# anonymous function\nfruits = ['strawberry', 'fig', 'apple', 'cherry', 'raspberry', 'banana'] # fig = 무화과\n\nprint(sorted(fruits, key=lambda word: word[::-1]))\n"
},
{
"alpha_fraction": 0.7371273636817932,
"alphanum_fraction": 0.7398374080657959,
"avg_line_length": 29.83333396911621,
"blob_id": "8bb86eb9bbfce7de6e2211a596ddb164999a59da",
"content_id": "7f1cddc7e58da0ffe78cc2f3f8cc56276300bdf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 12,
"path": "/chapter04/04-07.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "fruits = ['caju', 'atemoia', 'cajá', 'açaí', 'acerola']\nprint(sorted(fruits))\nimport locale\n# locale을 정의하는 거라서 처음 프로세스를 돌릴 때 실행합니다\nprint(locale.setlocale(locale.LC_COLLATE, 'pt_BR.UTF-8'))\nsorted_fruits = sorted(fruits, key=locale.strxfrm)\nprint(sorted_fruits)\n\nimport pyuca\ncoll = pyuca.Collator()\nsorted_fruits = sorted(fruits, key=coll.sort_key)\nprint(sorted_fruits)"
},
{
"alpha_fraction": 0.6389862298965454,
"alphanum_fraction": 0.6527366042137146,
"avg_line_length": 22.037267684936523,
"blob_id": "eee65aba6d9e3da9799005aecd3ef7314bc988d0",
"content_id": "af431d7c6fb74097a9a4b245d56be6a508e7599f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4358,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 161,
"path": "/chapter04/04-06.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# unicode에는 발음 구별기호(diacritical mark)가 있어\n# 인쇄할 때 앞 문자와 하나로 결합되어 출력된다.\n\ns1 = 'café'\ns2 = 'cafe\\u0301'\n\nprint(s1, s2)\nprint(len(s1))\nprint(len(s2))\nprint(s1 == s2)\n\n# 유니코드 표준에서는 이 두 개의 시퀸스를 규범적으로 동일하독 하며, 어플리케이션은\n# 이 두 시퀸스를 동일하게 처리해야 한다.\n# 하지만 파이썬은 서로 다른 두 개의 코드 포인트 시퀸스로 보고 이 둘을 서로\n# 동일하지 않다고 판단한다.\n\n# 해결책은 유니코드 정규화\n# 방법 1\n# Normalization Form C 문자와 코드포잉트를 조합해 가장 짧은 동일문자열로 구성\n# Normalization Form D 무조합된 문자를 기본문자와 별도의 결합문자로 분리한다.\nfrom unicodedata import normalize\n\nprint(len(normalize('NFC', s1)), len(normalize('NFC', s2)))\nprint(len(normalize('NFD', s1)), len(normalize('NFD', s2)))\n\nprint(normalize('NFC', s1) == normalize('NFC', s2))\nprint(normalize('NFD', s1) == normalize('NFD', s2))\n\n# 키보드는 일반적으로 NFC\n# 하지만 조심하자\n\nfrom unicodedata import normalize, name\n\nohm = '\\u2126'\nprint(ohm)\nprint(name(ohm))\nohm_c = normalize('NFC', ohm)\nprint(ohm_c)\nprint(name(ohm_c))\nprint(ohm == ohm_c)\nprint(normalize('NFC', ohm) == normalize('NFC', ohm_c))\n\n# 하나의 문자가 하나의 규범적인 코드를 가지는 것이 유니코드의 목표이지만\n# 예외도 있습니다.\n\nfrom unicodedata import normalize, name\n\nhalf = '½'\nprint(half)\nprint(normalize('NFKC', half))\nprint(len(normalize('NFKC', half)))\nfour_squared = '4²'\nprint(normalize('NFKC', four_squared))\nmicro = 'µ'\nmicro_kc = normalize('NFKC', micro)\nprint(micro, micro_kc)\nprint(ord(micro), ord(micro_kc))\nprint(name(micro), name(micro_kc))\n\n# 쿼리할 때는 NFKC, NFKD 정규화가 도움이 될 것입니다. 저장하거나 영구보관하는 것은 권하지 않습니다.\n\n# case folding\n# 모든 텍스트를 소문자로 변환하는 연산\nmicro = 'µ'\nprint(name(micro))\nmicro_cf = micro.casefold()\nprint(name(micro_cf))\nprint(micro, micro_cf)\n# eszett = '\\u00DF'\n# print(name(eszett))\n# eszett_cf = eszett.casefold()\n# print(name(eszett_cf))\n\nfrom unicodedata import normalize\n\n\ndef nfc_equal(str1, str2):\n return normalize('NFC', str1) == normalize('NFC', str2)\n\n\ndef fold_equal(str1, str2):\n return normalize('NFC', str1.casefold()) == normalize('NFC', str2.casefold())\n\n\ns3 = 'Straße'\ns4 = 'strasse'\n\nprint(s3 == s4)\nprint(nfc_equal(s3, s4))\nprint(fold_equal(s3, s4))\nprint(fold_equal(s1, s2))\nprint(fold_equal('A', 'a'))\n\nimport unicodedata\nimport string\n\n\ndef shave_marks(txt):\n norm_txt = unicodedata.normalize('NFD', txt)\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n return unicodedata.normalize('NFC', shaved)\n\n\norder = '“Herr Voß: • ½ cup of Œtker™ caffè latte • bowl of açaí.”'\nprint(shave_marks(order))\nGreek = 'Ζέφυρος, Zéfiro'\nprint(shave_marks(Greek))\n\n\ndef shave_marks_latin(txt):\n norm_txt = unicodedata.normalize('NFD', txt)\n latin_base = False\n keepers = []\n for c in norm_txt:\n if unicodedata.combining(c) and latin_base:\n continue\n keepers.append(c)\n\n if not unicodedata.combining(c):\n latin_base = c in string.ascii_letters\n shaved = ''.join(keepers)\n return unicodedata.normalize('NFC', shaved)\n\n\nprint(shave_marks_latin(order))\nprint(shave_marks_latin(Greek))\n\n'''\nPython string method translate() returns a copy of the string in which\nall characters have been translated using table\n(constructed with the maketrans() function in the string module),\noptionally deleting all characters found in the string deletechars.\n'''\n\nsingle_map = str.maketrans(\"\"\"‚ƒ„†ˆ‹‘’“”•–—˜›\"\"\", # <1>\n \"\"\"'f\"*^<''\"\"---~>\"\"\")\n\nmulti_map = str.maketrans({ # <2>\n '€': '<euro>',\n '…': '...',\n 'Œ': 'OE',\n '™': '(TM)',\n 'œ': 'oe',\n '‰': '<per mille>',\n '‡': '**',\n})\n\nmulti_map.update(single_map)\n\n\ndef dewinize(txt):\n return txt.translate(multi_map)\n\n\ndef asciize(txt):\n no_marks = shave_marks_latin(dewinize(txt))\n no_marks = no_marks.replace('ß', 'ss')\n return unicodedata.normalize('NFKC', no_marks)\n\nprint(dewinize(order))\nprint(asciize(order))\n"
},
{
"alpha_fraction": 0.5495049357414246,
"alphanum_fraction": 0.5643564462661743,
"avg_line_length": 12.266666412353516,
"blob_id": "800a0f492a190b461102081cc214d61923d86aed",
"content_id": "2bb82f1ccdd8986d818ef783e1bd48e4920b3efd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 202,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 15,
"path": "/chapter05/05-06.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "def factorial(n):\n '''returns n!'''\n return 1 if n < 2 else n * factorial(n-1)\n\nprint(dir(factorial))\n\nclass C:\n pass\n\nobj = C()\n\ndef func():\n pass\n\nprint(set(dir(func)) - set(dir(obj)))\n\n\n\n"
},
{
"alpha_fraction": 0.769784152507782,
"alphanum_fraction": 0.798561155796051,
"avg_line_length": 27,
"blob_id": "9ba1d77bc7908ff978a512b08da4b2f2b8e2b474",
"content_id": "851d55dd07361943e3e56de6d37e80853b4808e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 5,
"path": "/MyWin32Com/test_dispatch.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "import win32com.client\nimport os\n\nxl = win32com.client.Dispatch(\"Excel.Application\")\nxl.Application.Run(os.path.abspath(\"excelsheet.xlsm\"))"
},
{
"alpha_fraction": 0.6489831805229187,
"alphanum_fraction": 0.6763925552368164,
"avg_line_length": 19.962963104248047,
"blob_id": "c2a494ab4bc7d09301a8e267b665341a56b06661",
"content_id": "2d8835e3c6b49cc53101b2ec4948e3299cbe6203",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1433,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 54,
"path": "/chapter05/05-01.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''\n일급객체의 정의\n1) 런타임에 생성이 가능하다\n2) 데어터 구조체의 변수나 요소에 할당할 수 있다(일급객체를 하나의 묶음으로 다룰 수 있다)\n3) 함수 인수로 전달할 수 있다\n4) 함수 결과로 반환할 수 있다\n\n파이썬에서 function은 일급객체(first-class object)\n'''\n\ndef factorial(n):\n '''return n!'''\n return 1 if n < 2 else n * factorial(n-1)\n\nprint(factorial(42))\nprint(factorial.__doc__)\nprint(type(factorial))\n\n'''\n1) 함수를 변수에 할당하고\n2) 변수명을 통해 함수를 호출한다\n3) 함수를 map함수의 parameter로 전달한다\n'''\n\nfruits = ['strawberry', 'fig', 'apple', 'cherry', 'raspberry', 'banana']\nprint(sorted(fruits, key=len))\n\nfact = factorial\nprint(fact)\nprint(fact(5))\n# map(function, data)\nprint(list(map(fact, range(11))))\n\ndef reversed(word):\n return word[::-1]\n\nprint(reversed('testing'))\nprint(sorted(fruits, key=reversed))\n\n\n# 5.2.1 map(), filter(), reduce()의 대안\n# map, filter, reduce를 사용할 수도 있지만\n# 단순히 list를 만든다면 comprehensive list를 사용하세요\nprint(list(map(fact, range(6))))\nprint([fact(n) for n in range(6)])\n\nprint(list(map(factorial, filter(lambda n: n % 2, range(6)))))\nprint([factorial(n) for n in range(6) if n % 2])\n\n# reduce는 처음이라\nfrom functools import reduce\nfrom operator import add\nprint(reduce(add, range(100)))\nprint(sum(range(100)))"
},
{
"alpha_fraction": 0.6476578116416931,
"alphanum_fraction": 0.6537678241729736,
"avg_line_length": 13.028571128845215,
"blob_id": "7bc4a826fb842b21ae07af32e7d8df030dbfb2f7",
"content_id": "c8bb29fbcc115b1ad9adb2053ac3055ff70a3685",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 35,
"path": "/chapter07/07-01.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1) 파이썬이 데커레이터 구문을 평가하는 방식\n2) 변수가 지역변수인지 파이썬이 판단하는 방식\n3) 클로저의 존재 이유와 작동 방식\n을 알고 난 후\n\na) 잘 작동하는 데커레이터 구현하기\nb) 표준 라이브러리에서 제공하는 재미있는 데커레이터들\nc) 매개변수화된 데커레이터 구현하기\n를 살펴볼 것입니다.\n\"\"\"\n\n\"\"\"\n@decorator\ndef target():\n print('running target()')\n\n과 \n\ntarget = decorator(target)은 동일합니다.\n원래 정의된 target과 decorator의 반환형 target은 다른 함수객체입니다.\n\n\"\"\"\n\ndef deco(func):\n def inner():\n print('running inner()')\n return inner\n\n@deco\ndef target():\n print('running target()')\n\ntarget()\nprint(target)\n"
},
{
"alpha_fraction": 0.5447154641151428,
"alphanum_fraction": 0.5560975670814514,
"avg_line_length": 25.7391300201416,
"blob_id": "4f91a8eec2b50bfc8f0624ade8cf0cc4ba165152",
"content_id": "1b14c8bcc8a28a57f23bba8a04a815882b73c22c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 23,
"path": "/chapter05/05-09.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''함수 애너테이션은 python3부터 지원합니다.'''\n\ndef clip(text:str, max_len:'int > 0'=80) -> str:\n \"\"\"max_len 앞이나 뒤으 ㅣ마지막 공백에서 잘라낸 텍스트를 반환한다.\n \"\"\"\n end = None\n if len(text) > max_len:\n space_before = text.rfind(' ', 0, max_len)\n if space_before >= 0:\n end = space_before\n else:\n space_after = text.rfind(' ', max_len)\n if space_after >= 0:\n end = space_after\n if end is not None:\n end = len(text)\n\n return text[:end].rstrip()\n\nprint(clip.__annotations__)\nfrom inspect import signature\nsig = signature(clip)\nprint(sig.return_annotation)\n"
},
{
"alpha_fraction": 0.521327018737793,
"alphanum_fraction": 0.6270142197608948,
"avg_line_length": 24.08333396911621,
"blob_id": "0799799ac8c465484fe3d6db6b7029f427a98f9e",
"content_id": "e56d63ffa49107f869731e212c2e601093b68ea8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2244,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 84,
"path": "/chapter02/02-03.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "lax_coordinates = (33.9425, -118.408056)\ncity, year, pop, chg, area = ('Tokyo', 2003, 32450, 0.66, 8014)\n# tuple을 레코드처럼 사용하는 방법입니다\ntraveler_ids = [('USA', '311958955'), ('BRA', 'CE342567'), ('ESP', 'XDA205856')]\nfor passport in sorted(traveler_ids):\n print(\"{}/{}\".format(*passport))\n # print(\"%s/%s\" % passport) 로 사용할 수도 있습니다\n\n# tuple unpacking\nlax_coordinates = (33.9425, -118.408056)\nlatitude, longitude = lax_coordinates\n\nprint(latitude)\nprint(longitude)\n\nprint(divmod(20, 8))\n\nt = (20, 8)\nprint(divmod(*t))\n\nquotient, remainder = divmod(*t)\nprint(quotient)\nprint(remainder)\n\nimport os\n_, filename = os.path.split('/home/luciano/.ssh/idrsa.pub')\nprint(_)\nprint(filename)\n\n# 초과항목을 잡기 위해 * 사용\n\na, b, *rest = range(5)\nprint(a, b, rest)\n\na, b, *rest = range(3)\nprint(a, b, rest)\n\na, b, *rest = range(2)\nprint(a, b, rest)\n\na, *body, c, d = range(5)\nprint(a, body, c, d)\n\n*head, b, c, d = range(5)\nprint(head, b, c, d)\n\nmetro_areas = [\n ('Tokyo', 'JP', 36.933, (35.689722, 139.691667)), # <1>\n ('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),\n ('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),\n ('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),\n ('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),\n]\n# '^' -> Forces the field to be centered within the available space.\nprint(\"{:<30}\".format('left alignment'))\nprint(\"{:>30}\".format('right alignment'))\nprint(\"{:^30}\".format('center alignment'))\n\nprint(\"{:30}\".format('reserved spaces'))\n\nprint('{:15} | {:^9} | {:^9}'.format('', 'lat.', 'long.'))\nfmt = '{:15} | {:9.4f} | {:9.4f}'\n\n# f -> fixed point notation\nfor name, cc, pop, (latitude, longitude) in metro_areas:\n if longitude <= 0:\n print(fmt.format(name, latitude, longitude))\n\n# named tuple\n# named tuple은 tuple의 subclass입니다\n\nfrom collections import namedtuple\n\nCard = namedtuple('Card', (['rank', 'suit']))\n\n# Card는 factory 함수의 이름\n# subclass의 이름은 Card\n\nCity = namedtuple('City', 'name country population coordinates')\ntokyo = City('Tokyo', 'JP', 36933, (35.689722, 139.691))\nprint(tokyo)\n\n# 지금까지는 record로서의 tuple\n# 이제부터는 immutable list로서의 tuple\n\n\n\n"
},
{
"alpha_fraction": 0.5521172881126404,
"alphanum_fraction": 0.5895765423774719,
"avg_line_length": 15.131579399108887,
"blob_id": "6037167648790dfef95be44e7d23aa8dbbec15ce",
"content_id": "c6dc0957c00b9b59b4b89b359b92073ad392d942",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 38,
"path": "/chapter02/02-05.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "l = [1, 2, 3]\nprint(l * 5)\nprint(5 * 'abcd')\n# 생각해보면 list는 general container이니\n# 무슨 값이 들어가있는 줄 알고 연산을 하겠습니까\n# 그냥 길이를 늘이는 것이 안전할 따름입니다\n\n# 2.5.1 리스트의 리스트 만들기\nboard = [['_'] * 3 for i in range(3)]\nprint(board)\nboard[1][2] = 'X'\nprint(board)\n\nweird_board = [['_'] * 3] * 3\nprint(weird_board)\n\n# 동일한 리스트에 대한 참조를 가진 리스트를 수정하면\n# 원하는 결과가 나오지 않습니다\n\nweird_board[1][2] = '0'\nprint(weird_board)\n\nrow = ['_'] * 3\nboard = []\nfor i in range(3):\n board.append(row)\n\nprint(board)\n\nboard = []\nfor i in range(3):\n # 매 이터레이션마다 다른 참조를 생성합니다\n row = ['_'] * 3\n board.append(row)\n\nprint(board)\nboard[2][0] = 'X'\nprint(board)\n\n"
},
{
"alpha_fraction": 0.6743515729904175,
"alphanum_fraction": 0.690682053565979,
"avg_line_length": 21.365591049194336,
"blob_id": "45010ebb69999014fbe0e2b5c43a82791344e3c6",
"content_id": "9db48a69be257af33e75616064b054f7b7ddc4b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2698,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 93,
"path": "/chapter07/07-08.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "import functools\nfrom chapter07.clockdeco import clock\n\n@clock\ndef fibonacci(n):\n if n < 2:\n return n\n else:\n return fibonacci(n - 2) + fibonacci(n - 1)\n\n\nprint(fibonacci(6))\n\[email protected]_cache() # lru_cache()는 parameter를 받는 decorator\n@clock\ndef fibonacci2(n):\n if n < 2:\n return n\n else:\n return fibonacci2(n - 2) + fibonacci2(n - 1)\n\nprint(fibonacci2(6))\n\n\"\"\"\nlru_cache(maxsize=128, typed=False)\nmaxsize는 cache의 크기\ntyped는 type이 다르면 다른 결과로 보관할 것인지 여부\ndecorated된 함수는 hashable한 parameters를 사용해야 합니다.\n내부적으로 dictionary를 사용하기 때문입니다.\n\"\"\"\n\nimport html\n\ndef htmlize(obj):\n content = html.escape(repr(obj))\n return '<pre>{}</pre>'.format(content)\n\nprint(htmlize({1, 2, 3}))\nprint(htmlize(abs))\nprint(htmlize('Heimlich & Co. `n- a game'))\nprint(htmlize(42))\nprint(htmlize(['alpha', 66, {3, 2, 1}]))\n# 파이썬에서는 메서드나 함수의 오버로딩을 지원하지 않으므로\n# 서로 다르게 처리하고자 하는 자료형별로 서로 다른 시그너처를 가진 htmlize()를\n# 만들 수 없다.\n\nfrom functools import singledispatch\nfrom collections import abc\nimport numbers\nimport html\n\n\"\"\"\nhttps://soooprmx.com/archives/5852\nsingledispatch는 제네릭을 생성하는 표준라이브러리이다.\n\n단일 디스패치 제네릭을 구현하는 방법을 제공한다.\nsingledispatch는 함수를 패링하는 함수이고, 래핑된 결과는 각각의 타입에 대해\n어떻게 사용될 지 구체적으로 정의한다.\n\n한마디로 말하면 singledispatch는 decorator를 생성하는 decorator를 생성하는\ndecorator이다.\n\nsingledispatch를 사용해 임의의 함수 func를 만든다. func는 타입이 정의되지 않은\n함수로 기본적으로 일반 파이썬 함수와 동일하다.\n\nfunc.register(type)을 통해 특정 파라미터 타입에 대한 함수를 정의할 수 있다.\n\n\"\"\"\n\nfrom functools import singledispatch\nfrom collections import abc\nimport numbers\nimport html\n\n@singledispatch\ndef htmlize(obj):\n content = html.escape(repr(obj))\n return '<pre>{}</pre>'.format(content)\n\[email protected](str)\ndef _(text): # 특화된 함수의 이름이 필요없으므로 언더바로 함수명을 저장한다.\n content = html.escape(text).replace('\\n', '<br>\\n')\n return '<p>{}</p>'.format(content)\n\[email protected](numbers.Integral)\ndef _(n):\n return '<pre>{0} {0x{0:x})</pre>'.format(n)\n\[email protected](tuple)\[email protected](abc.MutableSequence)\ndef _(seq):\n inner ='</li>\\n<li>'.join(htmlize(item) for item in seq)\n return '<ul>\\n<li>' + inner + '</li>\\n<\\/ul>'\n\n\n"
},
{
"alpha_fraction": 0.5808823704719543,
"alphanum_fraction": 0.6029411554336548,
"avg_line_length": 12.300000190734863,
"blob_id": "4e7d14aa2d2c76967b5a1fb27316041bac6a9b23",
"content_id": "def45f8d67544983a7656e8bc350d01a5d47e15d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 10,
"path": "/chapter04/04-01.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "s = 'café'\nprint(s)\nprint(len(s))\n\nb = s.encode('utf8')\nprint(b)\nprint(len(b))\n\nprint(b.decode('utf8'))\nprint(len(b.decode('utf8')))\n\n\n\n"
},
{
"alpha_fraction": 0.6056034564971924,
"alphanum_fraction": 0.6239224076271057,
"avg_line_length": 16.846153259277344,
"blob_id": "95b4d77c12d3166e83998d117c405e6793db2199",
"content_id": "8cf06bf91cd4b16a701c2003bf9219aa5737ebdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 974,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 52,
"path": "/chapter03/03-08.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "l = ['spam', 'spam', 'egg', 'egg']\nprint(set(l))\n\nprint(list(set(l)))\n\n# intersection의 크기를 구하는 여러가지 방법\n\nneedles = set([1, 2, 3])\nhaystack = set(range(10))\n\n\nfound = len(needles & haystack)\nprint(found)\n\nfound = 0\nfor n in needles:\n for h in haystack:\n if n == h:\n found += 1\n\nprint(found)\n\nfound = len(set(needles) & set(haystack))\nprint(found)\nfound = len(set(needles).intersection(haystack))\nprint(found)\n\n# 공집합은 set()로 표기합니다\ns = {1}\nprint(type(s))\nprint(s)\nprint(s.pop())\nprint(s)\n\nfrom dis import dis\ndis('{1}')\ndis('set([1])')\nprint(frozenset(range(10)))\n\nfrom unicodedata import name\n'''\ndef name(*args, **kwargs): # real signature unknown\n \"\"\"\n Returns the name assigned to the character chr as a string.\n \n If no name is defined, default is returned, or, if not given,\n ValueError is raised.\n \"\"\"\n pass\n'''\n\nprint({chr(i) for i in range(32, 256) if 'SIGN' in name(chr(i), '')})\n"
},
{
"alpha_fraction": 0.6713460087776184,
"alphanum_fraction": 0.6787778735160828,
"avg_line_length": 17.600000381469727,
"blob_id": "04b63037c2899ee80a728ad31d943b37bd06dd38",
"content_id": "1475ee44c208bd454802102c1521cda5284de7bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2119,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 65,
"path": "/chapter06/06-02.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''\n명령 디자인 패턴\n명령패턴의 목적은 연산을 실행하는 객체(호출자 invoker)와\n연산을 구현하는 객체(수신자 receiver)를 분리하는 것이다.\n예를 들어 GUI의 메뉴항목이 호출자이고 편집되고 있는 문서나\n애플리케이션 자신이 수신자다.\n\n명령 객체를 수신자와 호출자 사이에 놓고 , 멸령은 execute()라는 단 사나의 메서드로 인터페이스를 구현한다.\nexecute()는 원하는 연산자를 수행하기 위해 수신자가 가지고 있는 메서드를 호출한다.\n\n호출자는 수신자의 인터페이스를 알 필요가 없고, 멸령의 서브클래스를 통해 서로 다른 수신자를 추가할 수 있다.\n\n초출자는 구체적인 명열으로 설정되며, 연산을 실행하기 위해 execute()메서드를 호출한다.\n\n명령은 콜백에 대한 객체지향식 대체물이다.\n\nhttps://mrw0119.tistory.com/69\nCommand 패턴은 특정 객체에 대한 명령을 캡슐화하여 처리하는 패턴이다.\n\n패턴의 주체는 Invoker, Command, Receiver이다.\n\n1) Invoker: 명령을 가지고 있으며 요청에 따라 명령을 실행시킨다.\n2) Command: 수신자에게 특정작업을 지시한다.\n3) Receiver: 명령의 지시대로 작업을 수행한다\n\n예)\n\nInvoker\n Command A\n Command B\n Command C\n\nA ~ ReceiverA\nB ~ ReceiverA\nC ~ ReceiverB\n\n예)\n\nSwitch\n On\n Off\n\nOn ~ Light\nOff ~ Light\n\nSwitch는 on, off 명령을 가지고 있는 invoker이고\nlight는 on, off 명령에 따라 작업을 수행하는 receiver이다.\n\n스위치는 온오프 명령을 받고, 해당 명령을 실행한다.\n해당 명령은 라이트에게 온오프를 지시한다.\n라이트는 명령에 따라 자신을 온오프한다.\n'''\n\nclass MacroCommand:\n \"\"\"명령 리스트를 실행하는 명령\"\"\"\n def __init__(self, commands):\n self.commands = list(commands)\n\n def __call__(self):\n for command in self.commands:\n command()\n\n'''\ninvoker가 command를 호출하면 됩니다. __call__()을 사용해 객체를 callable로 만들 수 있다.\n'''\n\n\n"
},
{
"alpha_fraction": 0.6425992846488953,
"alphanum_fraction": 0.7039711475372314,
"avg_line_length": 29.77777862548828,
"blob_id": "545eea777a1d099e1c9fae432b360ed7c0a0b07c",
"content_id": "54e0bf446a5b762d0d0f5d87c9c811526c2601c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 9,
"path": "/chapter04/04-04.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "city = 'São Paulo'\nprint(city.encode('utf_8'))\nprint(city.encode('utf_16'))\nprint(city.encode('iso8859_1'))\nprint(city.encode('cp437', errors='ignore'))\nprint(city.encode('cp437', errors='replace'))\nprint(city.encode('cp437', errors='xmlcharrefreplace'))\n\nprint('Olá, Mundo!')\n"
},
{
"alpha_fraction": 0.559183657169342,
"alphanum_fraction": 0.6244897842407227,
"avg_line_length": 15.704545021057129,
"blob_id": "9a3d85b102d4fb1595f7ecbf1b9eaba5b001e0db",
"content_id": "4752980002f37d2139ccfff79f9781d7ad152162",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 865,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 44,
"path": "/chapter07/07-02.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n데커레이터는 데커레이트된 함수가 정의된 직후에 실행된다.\n이는 일반적으로 파이썬이 로딩되는 시점, 즉 임포트 타임에 실행된다.\n\"\"\"\nregistry = list()\n\ndef register(func):\n print('running register(%s)' % func)\n registry.append(func)\n return func\n\n@register\ndef f1():\n print('running f1()')\n\n@register\ndef f2():\n print('running f2()')\n\ndef f3():\n print('running f3()')\n\ndef main():\n print('running main()')\n print('registry ->', registry)\n f1()\n f2()\n f3()\n\n\nif __name__ == '__main__':\n main()\n\n\"\"\"\nrunning register(<function f1 at 0x0388C420>)\nrunning register(<function f2 at 0x0388C588>)\nrunning main()\nregistry -> [<function f1 at 0x0388C420>, <function f2 at 0x0388C588>]\nrunning f1()\nrunning f2()\nrunning f3()\n\nmain()이 실행되기 전 decorator가 실행됩니다.\n\"\"\"\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 21,
"blob_id": "694458321b168bc933db8f12a8bba40502c9958a",
"content_id": "a64a88f691fd4580eeafb0ededac14f97bcee622",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/chapter07/07-09.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# decorator는 누적할 수 있다\n\n\n"
},
{
"alpha_fraction": 0.580322802066803,
"alphanum_fraction": 0.5895465016365051,
"avg_line_length": 23.074073791503906,
"blob_id": "322e5d66e97783253582a31d23a54e99f62c97ae",
"content_id": "be9d769756883d5e5aed2e044546cb5f97ea5a90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1371,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 54,
"path": "/chapter03/03-03.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''\nd[k]는 failed, when k is not in keys\n'''\n\nimport sys\nimport re\n\nWORD_RE = re.compile(r'\\w+')\nindex = {}\n\nwith open('zen.txt', encoding='utf-8') as fp:\n # enumerate(iterable, start=0)\n # start는 starting index를 무엇으로 할 것인지 지정한다\n # 기본값은 0\n for line_no, line in enumerate(fp, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n occurrences = index.get(word, []) # word에 해당하는 key가 없으면 none 대신 []을 return\n occurrences.append(location)\n index[word] = occurrences\n\nfor word in sorted(index, key=str.upper):\n print(word, index[word])\n\n\nimport sys\nimport re\n\nWORD_RE = re.compile(r'\\w+')\n\nindex = {}\n\nwith open('zen.txt', encoding='utf-8') as fp:\n for line_no, line in enumerate(fp, start=1): # file object는 iterator\n for match in WORD_RE.finditer(line):\n word = match.group() # group을 반환\n column_no = match.start() + 1\n location = (line_no, column_no)\n index.setdefault(word, []).append(location) #\n\nfor word in sorted(index, key=str.upper):\n print(word, index[word])\n\ncar = {\n \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964\n}\n\nx = car.setdefault(\"color\", \"White\")\n\nprint(car)\n\n"
},
{
"alpha_fraction": 0.5390475988388062,
"alphanum_fraction": 0.6171428561210632,
"avg_line_length": 19.19230842590332,
"blob_id": "e4a141fb9805e4b03411df885c37392f889975c4",
"content_id": "bfeb0575ad79d1a647677810495c41d949f0d5af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 26,
"path": "/chapter08/08-02.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "charles = {'name': 'Charles L. Dodgson', 'born': 1832}\nlewis = charles\nprint(lewis is charles)\n\nprint(id(charles), id(lewis))\nlewis['balance'] = 950\nprint(charles)\nalex = {'name': 'Charles L. Dodgson', 'born': 1832, 'balance': 950}\nprint(charles == alex)\nprint( alex is not charles)\n\n# is 연산자는 id를 (객체의 정체성을)\n# == 연산자는 객체의 값을 비교한다.\n\nt1 = (1, 2, [30, 40])\nt2 = (1, 2, [30, 40])\nprint(t1 == t2)\nprint(t1[-1])\nprint(id(t1[-1]))\nt1[-1].append(99)\nprint(t1)\nprint(t1 == t2)\n\"\"\"\ntuple이 불변이라는 것은 tuple이 담고있는 객체의 reference가 변하지 않는다는 뜻이지\n객체의 값은 바뀔 수 있다.\n\"\"\"\n"
},
{
"alpha_fraction": 0.5437158346176147,
"alphanum_fraction": 0.562841534614563,
"avg_line_length": 15.222222328186035,
"blob_id": "3e124562f663749be0f8cd6f982dea998cd2c42f",
"content_id": "7ee7597a741dba5aac961d3eb06a45a3fb7f3da5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 908,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 45,
"path": "/chapter07/07-06.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "\"\"\"\nmake_averager()는 효율적이지 않다.\n합계와 항목 수를 저장한 수 평균을 계산하는 것이 더 효율적이다.\n\"\"\"\n\n\n\"\"\"\n잘못된 closure\n\ndef make_averager():\n count = 0\n total = 0\n\n def averager(new_value):\n count += 1\n total += new_value\n return total / count\n\n return averager\n\navg = make_averager()\nprint(avg(10))\n\"\"\"\n\ndef make_averager():\n total = 0\n count = 0\n\n def averager(new_value):\n # 함수 안에서 nonlocal이 선언되면\n # 변수에 새로운 값이 할당되더라도\n # free variable로 취급한다.\n # 새로운 값으로 갱신되면 closure에\n # 저장된 바인딩이 갱신된다.\n nonlocal total, count\n total += new_value\n count += 1\n return total / count\n\n return averager\n\navg = make_averager()\nprint(avg(10))\nprint(avg(11))\nprint(avg(12))\n\n\n"
},
{
"alpha_fraction": 0.494638055562973,
"alphanum_fraction": 0.5683646202087402,
"avg_line_length": 19.63888931274414,
"blob_id": "cbf0b2d102796a8b4423dcf88536099ec1f7ace3",
"content_id": "5cd9a5bbe5869c8b1e74825bdabaa98ba0c77b28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 36,
"path": "/chapter03/03-09.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''\nfrom random import randint\nhaystack = [randint(1, 10 ** 7) for i in range(10 ** 7)]\nlocation = [randint(1, 10 ** 7) for i in range(500)]\nneedles = [haystack[l] for l in location]\n\nprint(haystack[0:10])\nprint(location[0:10])\nprint(needles[0:10])\n\nfound = 0\nfor n in needles:\n if n in haystack:\n found += 1\n\nprint(found)\n'''\nDIAL_CODES = [\n (86, 'China'),\n (91, 'India'),\n (1, 'United States'),\n (62, 'Indonesia'),\n (55, 'Brazil'),\n (92, 'Pakistan'),\n (880, 'Bangladesh'),\n (234, 'Nigeria'),\n (7, 'Russia'),\n (81, 'Japan'),\n]\n\nd1 = dict(DIAL_CODES)\nprint('d1:', d1.keys())\nd2 = dict(sorted(DIAL_CODES))\nprint('d2:', d2.keys())\nd3 = dict(sorted(DIAL_CODES, key=lambda x: x[1]))\nprint('d3:', d3.keys())\n\n\n\n"
},
{
"alpha_fraction": 0.6594594717025757,
"alphanum_fraction": 0.6594594717025757,
"avg_line_length": 25.428571701049805,
"blob_id": "27898cc5f8a14fda1308af09256e4e813e889799",
"content_id": "1f62bfc9d2b0782fabc6c05fc6c0b9eb4295cdeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 7,
"path": "/chapter05/05-02.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# 함수를 인자로 받거나\n# 함수를 결과로 반환하는 함수를\n# 고위함수(high-order function)이라고 한다\n\n\nfruits = ['strawberry', 'fig', 'apple', 'cherry', 'raspberry', 'banana'] # fig = 무화과\nprint(sorted(fruits, key=len))\n"
},
{
"alpha_fraction": 0.5275362133979797,
"alphanum_fraction": 0.5681159496307373,
"avg_line_length": 10.129032135009766,
"blob_id": "d1903beac62cd6bf70b4f2fa0b09b191eee1c4c3",
"content_id": "58d3c7845f942151fb47f0b22af751ae89c3b12a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 31,
"path": "/chapter07/07-04.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "\"\"\"\nb = 6\ndef f2(a):\n print(a)\n print(b)\n b = 9\n\nprint(f2(3))\n\"\"\"\n\"\"\"\nUnboundLocalError: local variable 'b' referenced before assignment\n\"\"\"\n\n\"\"\"\n파이썬이 함수 본체를 컴파일할 때 b가 함수 안에서 할당되므로 b를 지역변수로 판단한다.\n\n함수 본체 안에서 할당된 변수는 지역변수로 판단한다.\n\"\"\"\n\nb = 6\ndef f3(a):\n global b\n print(a)\n print(b)\n b = 9\n\nf3(3)\nprint(b)\nb = 30\nf3(3)\nprint(b)\n"
},
{
"alpha_fraction": 0.4923076927661896,
"alphanum_fraction": 0.5230769515037537,
"avg_line_length": 12.066666603088379,
"blob_id": "6262c9adda269a9e6e11c61546d4f76ee0d61043",
"content_id": "56cf3beb33b28423d2cd29e27d0043672d5350e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 15,
"path": "/chapter08/08-01.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# 변수는 이름표, 상자가 아니다.\n\na = [1, 2, 3]\nb = a\na.append(4)\nprint(b)\n\n# 객체가 변수에 할당되기 전에 생성된다.\nclass Gizmo:\n\n def __init__(self):\n print('Gizmo id: %s' % id(self))\n\nx = Gizmo()\ny = Gizmo() * 10"
},
{
"alpha_fraction": 0.5948051810264587,
"alphanum_fraction": 0.6259739995002747,
"avg_line_length": 21.52941131591797,
"blob_id": "4e8c8e5fe6b02238decfbbbb48bb4ae6c915e5b0",
"content_id": "7dfff24fdaa56cb0db04d3f7060bba449c519204",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 17,
"path": "/chapter05/05-04.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''\n() 은 연산자\n() 은 호출 연산자\n()은 __call__()을 호출합니다\n파이썬은 7개의 callable object가 있습니다.\n\n1) 사용자 정의 함수\n2) 내장함수\n3) 내장 메서드\n4) 메서드\n5) 클래스: 호출하면 __new__()로 객체를 생성하고, __init__()로 초기화 한 후 object를 반환합니다\n6) 클래스 객체: 클래스가 __call__() 메서드를 구현하면 함수로 호출 가능합니다\n7) 제너레이터 함수: yield 키워드를 사용하는 함수나 메서드는 제너레이터 객체를 반환합니다\n'''\n\nprint(repr(abs), repr(str), repr(13))\nprint([callable(obj) for obj in [abs, str, 13]])\n\n\n"
},
{
"alpha_fraction": 0.5929918885231018,
"alphanum_fraction": 0.6010781526565552,
"avg_line_length": 22.935483932495117,
"blob_id": "16ebdac3f39fbf8f956123c284ff5ca9716b0a5c",
"content_id": "fa94791ba9142ecaa3a19667428ed82faa7da1db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 810,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 31,
"path": "/chapter05/05-05.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "'''\n모든 파이썬 객체를 함수처럼 사용할 수 있습니다.\n__call__() 메서드를 추가하시면 가능합니다.\n'''\n\nimport random\n\nclass BingoCage:\n\n def __init__(self, items):\n self._items = list(items)\n random.shuffle(self._items)\n \"\"\"Shuffle list x in place, and return None.\n Optional argument random is a 0-argument function returning a\n random float in [0.0, 1.0); if it is the default None, the\n standard random.random will be used.\n \"\"\"\n\n def pick(self):\n try:\n return self._items.pop()\n except IndexError:\n raise LookupError('pick from empty BignCage')\n\n def __call__(self):\n return self.pick()\n\nbingo = BingoCage(range(3))\nprint(bingo.pick())\nprint(bingo())\nprint(callable(bingo))\n"
},
{
"alpha_fraction": 0.6473214030265808,
"alphanum_fraction": 0.6651785969734192,
"avg_line_length": 16,
"blob_id": "3c07fdf7f620a3a1c9b7740ed9d1d3afef501d84",
"content_id": "343eafa7913c1e6490fb16ea7004d371bc7c1fed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 13,
"path": "/chapter03/03-07.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# 사용자가 매핑을 변경하지 못하게 제한하고 싶을 때\nfrom types import MappingProxyType\n\nd = {1: 'A'}\nd_proxy = MappingProxyType(d)\nprint(d_proxy)\nprint(d_proxy[1])\n\nd[2] = 'B'\nprint(d_proxy[2])\n\n# d_proxy는 매핑을 변경할 수 없고\n# d를 통해 간접적으로만 접근이 가능하다\n\n\n\n"
},
{
"alpha_fraction": 0.6528066396713257,
"alphanum_fraction": 0.6798336505889893,
"avg_line_length": 17.461538314819336,
"blob_id": "990528711f57af8c1aee1d6a28efdcfd9c423d61",
"content_id": "4933315b76da619a2e9cdfe73e491f8877064da8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 26,
"path": "/chapter04/04-02.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# bytes는 immutable\n# bytearray는 mutable\n\ncafe = bytes('café', encoding='utf_8')\nprint(cafe)\nprint(cafe[0])\nprint(cafe[:1])\ncafe_arr = bytearray(cafe)\nprint(cafe_arr)\nprint(cafe_arr[-1:])\n\nimport array\nnumbers = array.array('h', [-2, -1, 0, 1, 2]) # short integer\noctets = bytes(numbers)\nprint(octets)\n\nimport struct\nfmt = '<3S3SHH'\nwith open('filter.gif', 'rb') as fp:\n img = memoryview(fp.read())\n\nheader = img[:10]\nbytes(header)\nstruct.unpack(fmt, header)\ndel header\ndel img\n\n"
},
{
"alpha_fraction": 0.6104952096939087,
"alphanum_fraction": 0.6614930033683777,
"avg_line_length": 18.22857093811035,
"blob_id": "09171abedb68a88fc0d09eb2c02804255b26c1ca",
"content_id": "d82a87553d160120baa5bb6f8ddb59758ede2230",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1413,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 70,
"path": "/chapter02/02-09.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# list보다 효율적인 자료구조형이 있을 때\n# array는 같은 type의 list\n\nfrom array import array\nfrom random import random\nfloats = array('d', (random() for i in range(10**7)))\nprint(floats[-1])\n\n# file mode write and binary\nfp = open('floats.bin', 'wb')\nfloats.tofile(fp)\nfp.close()\n\nfp = open('floats.bin', 'rb')\nfloats2 = array('d')\nfloats2.fromfile(fp, 10**7)\nfp.close()\nprint(floats2[-1])\nfloats2 == floats\n\nimport array\nnumbers = array.array('h', [-2, -1, 0, 1, 2] ) #\th: signed short\nmemv = memoryview(numbers)\n\nprint(len(memv))\nprint(memv[0])\n\nmemv_oct = memv.cast('B')\nprint(memv_oct.tolist())\nmemv_oct[5] = 4\nprint(numbers)\n\nimport numpy as np\na = np.arange(12)\nprint(a)\nprint(type(a))\nprint(a.shape)\na.shape = 3, 4\nprint(a)\nprint(a[2])\nprint(a[2, 1])\nprint(a[2, 1].shape) # J의 인덱스와 매우 유사하다\nprint(a[:, 1])\nprint(a.transpose())\n\nfloats = np.array([random() for x in range(10 ** 7)], dtype='float64')\nprint(floats[-3:])\nfrom time import perf_counter as pc # performance counter\nt0 = pc()\nfloats /= 3\nprint(pc() - t0)\nnp.save('floats-10M', floats)\nfloats2 = np.load('floats-10M.npy', 'r+')\nfloats2 *= 6\nfloats2[-3:]\n\n\nfrom collections import deque # double ends queue\ndq = deque(range(10), maxlen=10)\nprint(dq)\ndq.rotate(3)\nprint(dq)\ndq.rotate(-4)\nprint(dq)\ndq.appendleft(-1)\nprint(dq)\ndq.extend([11, 22, 33])\nprint(dq)\ndq.extendleft([10, 20, 30, 40])\nprint(dq)\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5544554591178894,
"alphanum_fraction": 0.6014851331710815,
"avg_line_length": 12.862069129943848,
"blob_id": "a1a6dcae75a1b8f3701efcf178f889a91df151eb",
"content_id": "f25ba580cc64bf03f7ee9143a3cb66d5cfd37304",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 510,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 29,
"path": "/chapter02/02-06.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "# Augmented Assignment Statement\n# Inplace ADD를 mutable 객체가 호출하면 i.e.\n# a += b 는 a.__iadd__(b) 를 호출하고\n# a 의 내용을 바꿉니다\nl = [1, 2, 3]\nprint(id(l))\nprint(l)\nl *= 2\nprint(id(l))\nprint(l)\n\n# immuutable 객체에 inplace operation을 적용하면\n# 새로운 객체를 생성합니다\n\nt = (1, 2, 3)\nprint(id(t))\nprint(t)\n\nt *= 2\nprint(id(t))\nprint(t)\n\n# 이상한 inplace operation\n\nt = (1, 2, [30, 40])\nt[2] += [50, 60]\nprint(t)\n\n# 하지만 나는 error가 뜨는데?\n\n\n"
},
{
"alpha_fraction": 0.5991726517677307,
"alphanum_fraction": 0.6161550283432007,
"avg_line_length": 24.92655372619629,
"blob_id": "d5ae323d205bfa9ab1881e2aa0b9528f5a6573c1",
"content_id": "5d2b6ab86ac968a994a9cc172fe91694e1f8c64f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4907,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 177,
"path": "/chapter06/06-01.py",
"repo_name": "luiz158/FluentPython",
"src_encoding": "UTF-8",
"text": "from abc import ABC, abstractmethod\nfrom collections import namedtuple\n\nCustomer = namedtuple('Customer', 'name fidelity')\njoe = Customer('Jone Doe', 0)\nann = Customer('Ann Smith', 1100)\n\n\nclass LineItem:\n\n def __init__(self, product, quantity, price):\n self.product = product\n self.quantity = quantity\n self.price = price\n\n def total(self):\n return self.quantity * self.price\n\n\nclass Order:\n\n def __init__(self, customer, cart, promotion=None):\n self.customer = customer\n self.cart = list(cart)\n self.promotion = promotion\n\n def total(self):\n if not hasattr(self, '__total'):\n self.__total = sum(item.total() for item in self.cart)\n return self.__total\n\n def due(self):\n if self.promotion is None:\n discount = 0\n else:\n discount = self.promotion.discount(self)\n return self.total() - discount\n\n def __repr__(self):\n fmt = '<Order total: {:.2f} due: {:.2f}>'\n return fmt.format(self.total(), self.due())\n\n\nclass Promotion(ABC):\n\n @abstractmethod\n def discount(self, order):\n '''할인액을 구체적인 숫자로 반환한다.'''\n\nclass FidelityPromo(Promotion):\n\n def discount(self, order):\n return order.total() * .05 if order.customer.fidelity >= 1000 else 0\n\n\nclass BulkItemPromo(Promotion):\n\n def discount(self, order):\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * .01\n return discount\n\nclass LargeOrderPromo(Promotion):\n\n def discount(self, order):\n distinct_items = {item.product for item in order.cart}\n if len(distinct_items) >= 10:\n return order.total() * .07\n return 0\n\ncart = [LineItem('banana', 4, .5),\n LineItem('apple', 10, 1.5),\n LineItem('watermellon', 5, 5.0)]\n\nprint(Order(joe, cart, FidelityPromo()))\nprint(Order(ann, cart, FidelityPromo()))\n\nbanana_cart = [LineItem('banana', 30, .5),\n LineItem('apple', 10, 1.5)]\n\n\nprint(Order(joe, banana_cart, BulkItemPromo()))\n\nlong_order = [LineItem(str(item_code), 1, 1.0) for item_code in range(10)]\nprint(Order(joe, long_order, LargeOrderPromo()))\nprint(Order(joe, cart, LargeOrderPromo()))\n\n# 함수지향전략\n\nfrom collections import namedtuple\nCustomer = namedtuple('Customer', 'name fidelity')\n\n\nclass LineItem:\n\n def __init__(self, product, quantity, price):\n self.product = product\n self.quantity = quantity\n self.price = price\n\n def total(self):\n return self.quantity * self.price\n\n\nclass Order:\n\n def __init__(self, customer, cart, promotion=None):\n self.customer = customer\n self.cart = list(cart)\n self.promotion = promotion\n\n def total(self):\n if not hasattr(self, '__total'):\n self.__total = sum(item.total() for item in self.cart)\n return self.__total\n\n def due(self):\n if self.promotion is None:\n discount = 0\n else:\n discount = self.promotion(self)\n return self.total() - discount\n\n def __repr__(self):\n fmt = '<Order total:{:.2f} due: {:.2f}>'\n return fmt.format(self.total(), self.due())\n\n\ndef fidelity_promo(order):\n \"\"\"충성도 포인트가 100점 이상인 고객에게 전체 5% 할인 적용\"\"\"\n return order.total() * .05 if order.customer.fidelity >= 1000 else 0\n\ndef bulk_item_promo(order):\n \"\"\"20개 이산의 동일 상품을 구입하면 10% 할인 적용\"\"\"\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * 0.1\n return discount\n\ndef large_order_promo(order):\n \"\"\"10종류 이상의 상품을 구입하면 전체 7% 할인 적용\"\"\"\n distinct_items = {item.product for item in order.cart}\n if len(distinct_items) >= 10:\n return order.total() * 0.07\n return 0\n\nprint(Order(joe, cart, fidelity_promo))\nprint(Order(ann, cart, fidelity_promo))\nprint(Order(joe, banana_cart, bulk_item_promo))\nprint(Order(joe, long_order, large_order_promo))\nprint(Order(joe, cart, large_order_promo))\n\n\npromos = [fidelity_promo, bulk_item_promo, large_order_promo]\n\ndef best_promo(order):\n return max(promo(order) for promo in promos)\n\nprint(Order(joe, cart, best_promo))\nprint(Order(joe, banana_cart, best_promo))\nprint(Order(joe, long_order, best_promo))\nprint(Order(joe, cart, best_promo))\n\n# globals()은 딕셔너리 객체를 반환하는데 이 딕셔너리는 현재 모듈에 대한 내용,\n# 함수나 메서드 안에서 호출할 때, 함수를 호출한 모듈이 아니라 함수가 정의된\n# 모듈을 나타낸다.\n\npromos =[globals()[name] for name in globals()\n if name.endswith('_promo')\n and name != 'best_promo']\n\nprint(promos)\n\nprint('\\n'.join(globals())) # module의 전역변수 딕셔너리를 반환한다.\n\n\n\n\n"
}
] | 40 |
adrianogil/git-tools | https://github.com/adrianogil/git-tools | e5d24ea8d0beec55d21b155e036a130a303e3c8d | 9cb7a85ed8a78df0a92b2d4a22e5a6755e113758 | 3381420c945233df7458864cdd015d4a10942d04 | refs/heads/main | 2023-04-01T10:08:01.608538 | 2023-03-23T23:23:17 | 2023-03-23T23:37:00 | 122,314,442 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7025782465934753,
"alphanum_fraction": 0.7044199109077454,
"avg_line_length": 31.909090042114258,
"blob_id": "debdf9c439dc2a43bfba0306ac29997c3005f1a2",
"content_id": "ef098619d2d7124c4529271f2fe512c35a17b51c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1086,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 33,
"path": "/bashrc.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "source ${GIT_TOOLS_DIR}/git_history.sh\nsource ${GIT_TOOLS_DIR}/git_smart.sh\nsource ${GIT_TOOLS_DIR}/git_files.sh\nsource ${GIT_TOOLS_DIR}/git_config.sh\nsource ${GIT_TOOLS_DIR}/personal_aliases.sh\nsource ${GIT_TOOLS_DIR}/git_repos.sh\nsource ${GIT_TOOLS_DIR}/git_stage.sh\nsource ${GIT_TOOLS_DIR}/git_commit.sh\nsource ${GIT_TOOLS_DIR}/git_internals.sh\nsource ${GIT_TOOLS_DIR}/git_attributes.sh\nsource ${GIT_TOOLS_DIR}/git_navigation.sh\nsource ${GIT_TOOLS_DIR}/git_remote.sh\nsource ${GIT_TOOLS_DIR}/git_branch.sh\nsource ${GIT_TOOLS_DIR}/git_tag.sh\nsource ${GIT_TOOLS_DIR}/git_merge.sh\nsource ${GIT_TOOLS_DIR}/git_unity_dev.sh\nsource ${GIT_TOOLS_DIR}/analysis/git_analysis_tools.sh\nsource ${GIT_TOOLS_DIR}/git_gerrit.sh\n\nif [ -z \"$GITTOOLS_PYTHON_PATH\" ]\nthen\n export GIT_TOOLS_PYTHON_PATH=$GIT_TOOLS_DIR/python/\n export PYTHONPATH=$GIT_TOOLS_PYTHON_PATH:$PYTHONPATH\nfi\n\n# @tool gt-fz: Git Tools\nfunction gt-fz()\n{\n gitaction=$(cat ${GIT_TOOLS_DIR}/git_*.sh | grep '# gtool' | cut -c9- | default-fuzzy-finder | tr \":\" \" \" | awk '{print $1}')\n\n eval ${gitaction}\n}\nalias g=\"gt-fz\"\n"
},
{
"alpha_fraction": 0.7146464586257935,
"alphanum_fraction": 0.7146464586257935,
"avg_line_length": 25.399999618530273,
"blob_id": "ad06b23b7cba60f3849cf42fa491e34ccdd2005f",
"content_id": "7cc8917bc14e0e0e023712e5beb6a76f4b21a503",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 15,
"path": "/python/gittools/abort.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\"\"\" Module responsible for ease abort operation\"\"\"\nfrom . import gittools\nfrom . import clitools\n\n\ngit_status = gittools.get_status()\n\nnext_command = None\nif \"git cherry-pick\" in git_status:\n next_command = \"git cherry-pick --abort\"\nelif \"git rebase\" in git_status:\n next_command = \"git rebase --abort\"\n\nif next_command is not None:\n git_continue_output = clitools.run_cmd(next_command)\n"
},
{
"alpha_fraction": 0.5408560037612915,
"alphanum_fraction": 0.5927367210388184,
"avg_line_length": 37.45000076293945,
"blob_id": "ce2c0a12a563144c1db219a71e470a7c5f0488ca",
"content_id": "45da445f1d3d93914d8b8954599d0fcdbd4b8b47",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 771,
"license_type": "permissive",
"max_line_length": 358,
"num_lines": 20,
"path": "/analysis/git_analysis_tools.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\nfunction gt-analysis-productivity()\n{\n git_flags=$1\n python3 ${GIT_TOOLS_DIR}/analysis/prod_analysis.py $git_flags\n}\n\nfunction gt-analysis-time-of-day()\n{\n target_author=$1\n\n # https://gist.github.com/bessarabov/674ea13c77fc8128f24b5e3f53b7f094\n git log --author=\"$target_author\" --date=iso | perl -nalE 'if (/^Date:\\s+[\\d-]{10}\\s(\\d{2})/) { say $1+0 }' | sort | uniq -c|perl -MList::Util=max -nalE '$h{$F[1]} = $F[0]; }{ $m = max values %h; foreach (0..23) { $h{$_} = 0 if not exists $h{$_} } foreach (sort {$a <=> $b } keys %h) { say sprintf \"%02d - %4d %s\", $_, $h{$_}, \"*\"x ($h{$_} / $m * 50); }'\n}\n\n# gtool gt-stats-by-author: Stats by author\nfunction gt-stats-by-author()\n{\n target_ref=HEAD\n git shortlog ${target_ref} --numbered --summary\n}\n\n"
},
{
"alpha_fraction": 0.4404669404029846,
"alphanum_fraction": 0.45525291562080383,
"avg_line_length": 31.961538314819336,
"blob_id": "9eac91afe03853a0558fc98f25428eb0d6398768",
"content_id": "ad8d28be3c8b72a90d8b864d7a561ed37be44858",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2570,
"license_type": "permissive",
"max_line_length": 131,
"num_lines": 78,
"path": "/git_unity_dev.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "# Unity dev\n\nalias gunity-all='git add Assets/ ProjectSettings/ '\n\nfunction gunity-meta-all()\n{\n f '*.meta' $1 | xargs -I {} git add {}\n}\n\n\nfunction gunity-check-meta()\n{\n ASSETS_DIR=\"$(git config --get unity3d.assets-dir || echo \"Assets\")\"\n\n if git rev-parse --verify HEAD >/dev/null 2>&1\n then\n against=HEAD\n else\n # Initial commit: diff against an empty tree object\n against=4b825dc642cb6eb9a060e54bf8d69288fbee4904\n fi\n\n # Redirect output to stderr.\n exec 1>&2\n\n git -c diff.renames=false diff --cached --name-only --diff-filter=A -z $against -- \"$ASSETS_DIR\" | while read -d $'\\0' f; do\n ext=\"${f##*.}\"\n base=\"${f%.*}\"\n filename=\"$(basename \"$f\")\"\n\n if [ \"$ext\" = \"meta\" ]; then\n if [ $(git ls-files --cached -- \"$base\" | wc -l) = 0 ]; then\n echo \"Meta file \\`$f' is added, but \\`$base' is not in the git index.\"\n return\n fi\n elif [ \"${filename##.*}\" != '' ]; then\n p=\"$f\"\n while [ \"$p\" != \"$ASSETS_DIR\" ]; do\n if [ $(git ls-files --cached -- \"$p.meta\" | wc -l) = 0 ]; then\n echo \"Asset \\`$f' is added, but \\`$p.meta' is not in the git index.\"\n echo \"Please add \\`$p.meta' to git as well.\"\n return\n fi\n p=\"${p%/*}\"\n done\n fi\n done\n\n ret=\"$?\"\n if [ \"$ret\" != 0 ]; then\n exit \"$ret\"\n fi\n\n git -c diff.renames=false diff --cached --name-only --diff-filter=D -z $against -- \"$ASSETS_DIR\" | while read -d $'\\0' f; do\n ext=\"${f##*.}\"\n base=\"${f%.*}\"\n\n if [ \"$ext\" = \"meta\" ]; then\n if [ $(git ls-files --cached -- \"$base\" | wc -l) != 0 ]; then\n echo \"Error: Missing meta file.\"\n echo \"Meta file \\`$f' is removed, but \\`$base' is still in the git index.\"\n echo \"Please revert the beta file or remove the asset file.\"\n return\n fi\n else\n p=\"$f\"\n while [ \"$p\" != \"$ASSETS_DIR\" ]; do\n if [ $(git ls-files --cached -- \"$p\" | wc -l) = 0 ] && [ $(git ls-files --cached -- \"$p.meta\" | wc -l) != 0 ]; then\n echo \"Error: Redudant meta file.\"\n echo \"Asset \\`$f' is removed, but \\`$p.meta' is still in the git index.\"\n echo \"Please remove \\`$p.meta' from git as well.\"\n return\n fi\n p=\"${p%/*}\"\n done\n fi\n done\n}"
},
{
"alpha_fraction": 0.619113564491272,
"alphanum_fraction": 0.6260387897491455,
"avg_line_length": 17.973684310913086,
"blob_id": "6a521b6bb2f3bdae37724c7cc0bbe42ebe96caea",
"content_id": "2e07b715356758c36083a8a6b525225481e65549",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 722,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 38,
"path": "/git_stage.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "alias gs='git status '\n# gtool gt-status\nfunction gt-status()\n{\n\tgit status | less\n}\nalias gss='git status | less'\n\nalias gsu='git status -uno'\nalias ga='git add '\nalias gaf='git add -f '\n\n# gtool gt-add-interactive\nfunction gt-add-interactive()\n{\n\tgit add -i\n}\nalias gai='git add -i'\n\nfunction gs-files()\n{\n # gs-files\n # Git status files\n if [ -z \"$1\" ]\n then\n git status --porcelain | awk '{print $2}'\n else\n extension=$1\n git status --porcelain | awk '{print $2}' | grep \\.$extension\n fi\n}\n\n# gtool gt-add-default-fuzzy-finder - Add file to be staged (alias gdefault-fuzzy-finder)\nfunction gt-add-fz()\n{\n git add $(gs-files $1 | default-fuzzy-finder)\n}\nalias gak=\"gt-add-fz\"\n\n"
},
{
"alpha_fraction": 0.5751789808273315,
"alphanum_fraction": 0.5799522399902344,
"avg_line_length": 28.928571701049805,
"blob_id": "b29b3902502f5afa0f92f2ae9e07c1f35c66f6f1",
"content_id": "5108f1d8a6b66c1fb19d8c4667ea0b685a221982",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 14,
"path": "/python/gittools/config/root.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from subprocess import *\n\nimport os\n\n\ndef get_git_root(p):\n \"\"\"Return None if p is not in a git repo, or the root of the repo if it is\"\"\"\n if call([\"git\", \"branch\"], stderr=STDOUT, stdout=open(os.devnull, 'w'), cwd=p) != 0:\n return None\n else:\n root = check_output([\"git\", \"rev-parse\", \"--show-toplevel\"], cwd=p)\n root = root.decode(\"utf8\")\n root = root.strip()\n return root\n"
},
{
"alpha_fraction": 0.6489226818084717,
"alphanum_fraction": 0.6514575481414795,
"avg_line_length": 30.479999542236328,
"blob_id": "33c8d4f5bf8c6483e5c38d8337f05877efb56aed",
"content_id": "f65bb915e217811cfcf2b3e331d65ddb3c9266a5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 25,
"path": "/python/gittools/cli/removeremotename.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n\ndef remove_remote_name(complete_branch_name):\n if '/' in complete_branch_name:\n branch_index = complete_branch_name.index('/')\n complete_branch_name = complete_branch_name[branch_index + 1:]\n return complete_branch_name\n\n\ndef get_remote_name(complete_branch_name):\n if '/' in complete_branch_name:\n branch_index = complete_branch_name.index('/')\n remote_name = complete_branch_name[:branch_index]\n return remote_name\n\n\nif __name__ == '__main__':\n import sys\n\n complete_branch_name = sys.argv[1]\n\n if '--get-only-remote' in sys.argv:\n remote_name = get_remote_name(complete_branch_name)\n print(remote_name)\n else:\n complete_branch_name = remove_remote_name(complete_branch_name)\n print(complete_branch_name)\n"
},
{
"alpha_fraction": 0.6011659502983093,
"alphanum_fraction": 0.6096090078353882,
"avg_line_length": 22.801435470581055,
"blob_id": "5f6023896a95b7f70cd3884dbc87d4ebca4ab8e1",
"content_id": "94aee6ca510329aa68e6f271be3cff2112e8d202",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 9949,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 418,
"path": "/git_config.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "# !/bin/bash\n# gtool gt-continue: Continue a rebase or cherry-pick operationx\nalias gcontinue=\"python3 -m gittools.continue\"\nalias gt-continue=\"gcontinue\"\n\nalias gabort=\"python3 -m gittools.abort\"\nalias gt-abort=\"gcontinue\"\n\n# gtool gt-config-show: Show the config file related to current git file\nfunction gt-config-show()\n{\n cat $(gt-get-root-path)/.git/config\n}\n\n# gtool gt-zip-repo: Zips a git commit\nfunction gt-zip-repo()\n{\n zip_name=$1\n if [ -z \"$2\" ]\n then\n target_ref=HEAD\n else\n target_ref=$2\n fi\n git archive -o ${zip_name}.zip ${target_ref}\n}\n\nfunction gt-cktout()\n{\n target_branch=$(git branch -r | cut -c3- | default-fuzzy-finder)\n echo \"Let's track a new branch: \"$target_branch\n git checkout --track ${target_branch}\n}\n\n\nfunction gt-send-branch()\n{\n if [ -z \"$2\" ]\n then\n if [ -z \"$1\" ]\n then\n complete_branch_name=$(gt-branches-fz)\n target_branch=$(python3 -m gittools.cli.removeremotename ${complete_branch_name})\n target_remote=$(python3 -m gittools.cli.removeremotename ${complete_branch_name} --get-only-remote)\n else\n target_branch=$1\n target_remote=$(git remote | default-fuzzy-finder)\n fi\n else\n target_branch=$1\n target_remote=$2\n fi\n\n if [ -z ${GIT_TOOLS_ALLOW_PUSH_TO_MAIN_BRANCH} ]; then\n # variable not set\n if [[ \"$target_branch\" == *\"main\"* ]]; then\n echo \"For the main branch you need to push it manually (to avoid mistakes)\"\n echo \"or set the variable 'GIT_TOOLS_ALLOW_PUSH_TO_MAIN_BRANCH'\"\n return 1\n fi\n fi\n\n echo \"Sending commits to branch \"${target_branch}\" on remote \"${target_remote}\n\n current_branch=$(git rev-parse --abbrev-ref HEAD)\n git push ${target_remote} ${current_branch}:${target_branch}\n}\nalias gp='gt-send-branch'\n\nfunction gt-send-branch-force()\n{\n if [ -z \"$1\" ]\n then\n target_branch=$(gbko)\n else\n target_branch=$1\n fi\n\n if [ -z ${GIT_TOOLS_ALLOW_PUSH_TO_MAIN_BRANCH} ]; then\n # variable not set\n if [[ \"$target_branch\" == *\"main\"* ]]; then\n echo \"For the main branch you need to push it manually (to avoid mistakes)\"\n echo \"or set the variable 'GIT_TOOLS_ALLOW_PUSH_TO_MAIN_BRANCH'\"\n return 1\n fi\n fi \n\n echo \"Sending commits to branch \"${target_branch}\n\n current_branch=$(git rev-parse --abbrev-ref HEAD)\n git push origin --force ${current_branch}:${target_branch}\n}\n\nfunction gt-branches-fz()\n{\n if [[ $(git branch -r | grep -v \"/HEAD \" | wc -l) -le 1 ]]; then\n git branch -r | grep -v \"/HEAD \" | cut -c3- | head -1\n else\n git branch -r | grep -v \"/HEAD \" | cut -c3- | default-fuzzy-finder\n fi\n \n}\nalias gbk='gt-branches-fz'\n\nfunction gt-branches-origin-fz()\n{\n complete_branch_name=$(gt-branches-fz)\n only_branch_name=$(python3 -m gittools.cli.removeremotename ${complete_branch_name})\n echo ${only_branch_name}\n}\nalias gbko='gt-branches-origin-fz'\n\nTMP_BUFFER_LAST_FETCH=/tmp/last_fetch\nfunction gt-fetch-save-buffer()\n{\n target_buffer_file=${TMP_BUFFER_LAST_FETCH}_$(basename $PWD).txt\n echo \"\" > ${target_buffer_file}\n git fetch $1 -v >& ${target_buffer_file}\n cat ${target_buffer_file}\n echo \"Saving git remote \"$(date +%F-%H:%M)\":\" >> ${target_buffer_file}\n echo \"\" >> ${target_buffer_file}\n echo \"Log saved at \"${target_buffer_file}\n}\nfunction gt-fetch-last()\n{\n cat $TMP_BUFFER_LAST_FETCH$(basename $PWD).txt\n}\n\n# gtool gt-fetch: Fetch new commits\nfunction gt-fetch()\n{\n if [ -z \"$1\" ]\n then\n if [[ $(git remote | wc -l) -le 1 ]]; then\n target_remote=$(git remote | head -1)\n else\n target_remote=$(git remote | default-fuzzy-finder)\n fi\n else\n target_remote=$1\n fi\n\n git remote update ${target_remote}\n\n # Updating tracking everytime the repo is updated\n gt-tracking-update\n}\nalias gr='gt-fetch'\nalias gro='gt-fetch origin'\nalias gr-last='gt-fetch-last'\n\nalias gt-get-root-path='git rev-parse --show-toplevel'\n# gtool gt-root: Go to root level of current repo\nfunction gt-root()\n{\n cd $(gt-get-root-path)\n}\nalias groot=\"gt-root\"\n\nfunction gs-count()\n{\n echo $(gs-files $1 | wc -l)\n}\n\nfunction ghard-reset()\n{\n # ghard-reset $target_commit\n if [ -z \"$1\" ]\n then\n target_commit=$(git for-each-ref --format='%(upstream:short)' $(git symbolic-ref -q HEAD))\n else\n target_commit=$1\n fi\n\n echo 'Git hard reset to ref '$target_commit\n git reset --hard $target_commit\n}\n\nfunction ghard-reset-fz()\n{\n target_commit=$(git branch -a | cut -c3- | default-fuzzy-finder)\n\n echo 'Git hard reset to ref '${target_commit}\n git reset --hard ${target_commit}\n}\nalias ghrk=\"ghard-reset-fz\"\n\nfunction ghard-reset-tags()\n{\n # ghard-reset $target_commit\n ghard-reset $(git tag -l | default-fuzzy-finder)\n}\nalias ghrt=\"ghard-reset-tags\"\n\nfunction ghard-reset-flog()\n{\n target_commit=$(gflog | default-fuzzy-finder | awk '{print $1}')\n ghard-reset ${target_commit}\n}\n\n# Based on http://scriptedonachip.com/git-sparse-checkout\nfunction gsparse-checkout()\n{\n git_url=$1\n target_folder=$2\n total_commits=$3\n\n git init\n git remote add origin $git_url\n git config core.sparsecheckout true\n echo $target_folder\"/*\" >> .git/info/sparse-checkout\n git pull --depth=$total_commits origin master\n}\n\nfunction ghard-reset-head()\n{\n ghard-reset HEAD\n}\n\nalias gupdate-hard=\"gr && ghard-reset\"\n\n# gtool gol: git clone and enter repo directory\nfunction gol()\n{\n # git clone and enter repo directory\n git_url=$1\n\n if [ -z \"$2\" ]\n then\n git_repo=$(basename $git_url)\n git_repo=${git_repo%.*}\n git clone $git_url\n cd $git_repo\n else\n git_repo=$2\n git clone $git_url $git_repo\n cd $git_repo\n fi\n}\n\nfunction golp()\n{\n # git clone with depth 1 and enter repo directory\n git_url=$1\n git_repo=$(basename $git_url)\n git_repo=${git_repo%.*}\n\n git clone $git_url --depth 1\n cd $git_repo\n}\n\nfunction gnew-commits()\n{\n if [ -z \"$1\" ]\n then\n target_commit=$(git for-each-ref --format='%(upstream:short)' $(git symbolic-ref -q HEAD))\n else\n target_commit=$1\n fi\n\n new_commits=$(git log HEAD..$target_commit --pretty=oneline| wc -l)\n\n echo $new_commits\" new commits\"\n}\n\n# gtool gcount: count commits in current ref\nfunction gcount()\n{\n total_commits=$(gh $1 | wc -l)\n echo 'There are'$total_commits' commits in current local branch'\n}\n\nfunction gcount-today()\n{\n total_commits=$(gh --since=\"1am\" | wc -l)\n echo 'Today, there are'$total_commits' commits in current local branch'\n}\n\n# gtool gcount-commits: count commits between two refs\nfunction gcount-commits()\n{\n old_commit=$1\n new_commit=$2\n\n number_commits=$(($(git rev-list --count $old_commit..$new_commit) - 1))\n\n echo 'There are '$number_commits' commits of difference between revisions'\n}\n\nfunction gcountbranches()\n{\n python3 $GIT_TOOLS_DIR/python/gcount_branch.py $1 $2\n}\n\n# gtool gstats-short: get commit stats\nfunction gstats-short()\n{\n git log --author=\"$1\" --oneline --shortstat $2\n}\n\nfunction random-commit-msg()\n{\n # generate a random commit message\n curl -s whatthecommit.com/index.txt\n}\n\n# gtool gcreate-random-commits: create random commits\nfunction gcreate-random-commits()\n{\n \n if [ -z \"$1\" ]\n then\n number_commits=1\n else\n number_commits=$1\n fi\n\n for i in `seq 1 ${number_commits}`;\n do\n number_files=$(( ( RANDOM % 20 ) + 1 ))\n\n for i in `seq 1 ${number_commits}`;\n do\n text_name_n1=$(( ( RANDOM % 10 ) + 1 ))\n text_name_n2=$(( ( RANDOM % 10 ) + 1 ))\n text_name_n3=$(( ( RANDOM % 10 ) * $text_name_n1 + $text_name_n2 ))\n text_file_name='text_file_'$text_name_n3'.txt'\n echo $text_file_name >> $text_file_name\n git add $text_file_name\n done\n # generate random messages\n git commit -m \"$(random-commit-msg)\"\n done\n}\n\n# Git Internals\n# function gstats-repo()\n# {\n# echo $1\n# }\n\n# gtool gremove-from-tree: remote file from git tree\nfunction gremove-from-tree()\n{\n remove_target=$1\n git filter-branch -f --tree-filter \"rm -rf $remove_target\" --prune-empty HEAD\n}\n\n# gtool gignore-file: add file to .gitignore file\nfunction gignore-file()\n{\n if [ -z \"$1\" ]\n then\n target_file=$(git ls-files --others --exclude-standard | default-fuzzy-finder)\n else\n target_file=$1\n fi\n\n python3 ${GIT_TOOLS_DIR}/python/gignore_file.py $(abspath $target_file)\n}\n\n# gtool gt-list-untracked-files: list untracked files\nfunction gt-list-untracked-files()\n{\n git ls-files --others --exclude-standard\n}\n\n# gtool gopen-commit-files-in-sublime: open commit files in sublime (alias gts)\nfunction gopen-commit-files-in-sublime()\n{\n if [ -z \"$1\" ]\n then\n target_ref=HEAD\n else\n target_ref=$1\n fi\n\n current_dir=$PWD\n\n gt-root\n\n for file_name in `git diff-tree --no-commit-id --name-only -r ${target_ref}`;\n do\n s $file_name\n done\n\n cd ${current_dir}\n}\nalias gts=\"gopen-commit-files-in-sublime\"\n\n# gtool gt-config-user: configure user name and email\nfunction gt-config-user()\n{\n username=$1\n email=$2\n\n if [ -z \"$username\" ]\n then\n echo \"Enter your Git username (default: gituser):\"\n read username\n username=${username:-gituser}\n fi\n\n if [ -z \"$email\" ]\n then\n echo \"Enter your Git email (default: [email protected]):\"\n read email\n email=${email:[email protected]}\n fi\n\n echo \"name: \"${username}\n echo \"email: \"${email}\n\n git config --global user.name \"$username\"\n git config --global user.email \"$email\"\n\n echo \"Git user name and email set successfully!\"\n}\n"
},
{
"alpha_fraction": 0.7130681872367859,
"alphanum_fraction": 0.7130681872367859,
"avg_line_length": 24.071428298950195,
"blob_id": "1d25d444c9129e67bd0b27ba50633d1a767a345d",
"content_id": "ec4f2e17d93304e03e0f14f49aaf3a62151658ae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 352,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 14,
"path": "/python/gittools/continue.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\nfrom . import gittools\nfrom . import clitools\n\n\ngit_status = gittools.get_status()\n\nnext_command = None\nif \"git cherry-pick\" in git_status:\n next_command = \"git cherry-pick --continue\"\nelif \"git rebase\" in git_status:\n next_command = \"git rebase --continue\"\n\nif next_command is not None:\n git_continue_output = clitools.run_cmd(next_command)\n"
},
{
"alpha_fraction": 0.5448979735374451,
"alphanum_fraction": 0.5489795804023743,
"avg_line_length": 19.375,
"blob_id": "1d14ba55373e8392f2b2c68e92d6218044c1e047",
"content_id": "b9011d85672626bdc3110078d313c3978cf6cb0e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 490,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 24,
"path": "/git_branch.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# gtool gt-delete-branch: Delete a target branch (local and remotely)\nfunction gt-delete-branch()\n{\n if [ -z \"$1\" ]\n then\n target_branch=$(gbko)\n else\n target_branch=$1\n fi\n\n if [ -z \"$target_branch\" ]\n then\n echo \"Branch to be deleted:\"\n read target_branch\n fi\n\n if [ -z \"$target_branch\" ]\n then\n echo \"No branch selected\"\n else\n git push origin :${target_branch}\n git branch -d ${target_branch}\n fi\n}\n"
},
{
"alpha_fraction": 0.5974025726318359,
"alphanum_fraction": 0.5974025726318359,
"avg_line_length": 20,
"blob_id": "09512deef78876afdd6be8697e778fe762b2864c",
"content_id": "a4a5318bbdbc77919ba3062d69e96412bfd6cba9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 231,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 11,
"path": "/python/gittools/history/log.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from gittools.clitools import run_cmd\n\n\ndef get_hash_log(args=None):\n if args is None:\n args = []\n\n hashes = run_cmd('git log --pretty=format:\"%h\" ' + \" \".join(args))\n hashes = hashes.split(\"\\n\")\n\n return hashes\n"
},
{
"alpha_fraction": 0.6282973885536194,
"alphanum_fraction": 0.6291966438293457,
"avg_line_length": 34.11579132080078,
"blob_id": "12406483a443f27978365a5df17d07c6bd1cc937",
"content_id": "f4e4db0075dea27006d4dcb1edb0548fd6ce5d3d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3336,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 95,
"path": "/python/gittools/commits/tracking/update.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\"\"\" \"\"\"\nimport datetime\n\nfrom gittools import clitools\nfrom gittools.commits import get_diverge_commits, get_total_commits\nfrom gittools.config.root import get_git_root\n\nimport json\nimport os\n\n\ndef update_tracking():\n current_path = os.getcwd()\n root_path = get_git_root(current_path)\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n tracking_json_path = os.environ[\"GIT_TOOLS_TRACKING_JSON\"]\n tracking_branches_data = {}\n tracking_data = {\n root_path: tracking_branches_data\n }\n\n # Update tracking data according to saved JSON file\n if os.path.exists(tracking_json_path):\n with open(tracking_json_path, 'r') as json_file:\n tracking_data = json.load(json_file)\n if root_path in tracking_data:\n tracking_branches_data = tracking_data[root_path]\n\n # Get list of remote branches\n branches = clitools.run_cmd(\"git branch -r \")\n branches = branches.split('\\n')\n\n remote_branches = []\n for b in branches:\n if '->' not in b:\n remote_branches.append(b.strip())\n\n # print(remote_branches) # For debug purposes\n\n current_hashes_by_branch = {}\n\n # Get current hash for each remote branch\n for remote_branch_name in remote_branches:\n try:\n git_hash = clitools.run_cmd(\"git rev-parse \" + remote_branch_name)\n current_hashes_by_branch[remote_branch_name] = git_hash\n except Exception as exception:\n print(\"error\" + str(exception))\n\n new_hashes_by_branch = {}\n last_hashes_by_branch = {}\n\n # Update tracking data\n for remote_branch_name in current_hashes_by_branch:\n current_hash = current_hashes_by_branch[remote_branch_name]\n\n tracking_branch_history = []\n if remote_branch_name in tracking_branches_data:\n tracking_branch_history = tracking_branches_data[remote_branch_name]\n\n # Check if hash is new in branch history\n if tracking_branch_history:\n last_branch_data = tracking_branch_history[-1]\n last_branch_hash = last_branch_data['hash']\n last_hashes_by_branch[remote_branch_name] = last_branch_hash\n if last_branch_hash == current_hash:\n continue\n\n new_hashes_by_branch[remote_branch_name] = current_hash\n # Add hash to branch history\n current_branch_data = {\n 'date': current_date,\n 'hash': current_hash\n }\n tracking_branch_history.append(current_branch_data)\n tracking_branches_data[remote_branch_name] = tracking_branch_history\n\n if new_hashes_by_branch:\n tracking_data[root_path] = tracking_branches_data\n\n print(\"Updates since last tracking:\")\n for branch in new_hashes_by_branch:\n if branch not in last_hashes_by_branch or last_hashes_by_branch[branch] is None:\n print(\"%s - %s commits\" % (branch, get_total_commits(new_hashes_by_branch[branch])))\n else:\n total_diverge = get_diverge_commits(new_hashes_by_branch[branch], last_hashes_by_branch[branch])\n print(\"%s - %s commits\" % (branch, int(total_diverge) + 1))\n\n with open(tracking_json_path, 'w') as json_file:\n json.dump(tracking_data, json_file, indent=4)\n\n\nif __name__ == '__main__':\n update_tracking()\n"
},
{
"alpha_fraction": 0.6085754036903381,
"alphanum_fraction": 0.658367931842804,
"avg_line_length": 21.59375,
"blob_id": "972c56de9319f723de5738e19a6fecbca60d05ae",
"content_id": "d9743931a23caf60d2b2fdbe14ad82945de04661",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 723,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 32,
"path": "/python/gittools/history/commitsdiff.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from gittools.history.log import get_hash_log\n\n\ndef get_commits_diff(ref1, ref2):\n\thashes_ref1 = get_hash_log([ref1])\n\thashes_ref2 = get_hash_log([ref2])\n\n\tdiff_hash1 = []\n\tfor hash1 in hashes_ref1:\n\t\tif hash1 not in hashes_ref2:\n\t\t\tdiff_hash1.append(hash1)\n\t\n\tprint(\"Missing commits from \" + str(ref1) + \" (%s commits)\" % (len(diff_hash1)))\n\tfor hash1 in diff_hash1:\n\t\tprint(hash1)\n\n\tdiff_hash2 = []\n\tfor hash2 in hashes_ref2:\n\t\tif hash2 not in hashes_ref1:\n\t\t\tdiff_hash2.append(hash2)\n\tprint(\"Missing commits from \" + str(ref1) + \" (%s commits)\" % (len(diff_hash2)))\n\tfor hash2 in diff_hash2:\n\t\tprint(hash2)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\tref1 = sys.argv[1]\n\tref2 = sys.argv[2]\n\n\tget_commits_diff(ref1, ref2)\n"
},
{
"alpha_fraction": 0.55524080991745,
"alphanum_fraction": 0.5637393593788147,
"avg_line_length": 21,
"blob_id": "dfd81ff78dd1ac1c6b436f8136fa34bb2b35cff8",
"content_id": "880a9e34dad6dbddfcf491f0d238f5d109431502",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 353,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 16,
"path": "/git_remote.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# gtool gt-rename-url - Rename url of a target remote\nfunction gt-rename-url()\n{\n if [ -z \"$2\" ]\n then\n echo \"Type target remote name: \"\n read remote_name\n echo \"Type new remote URL: \"\n read remote_url\n else\n remote_name=$1\n remote_url=$2\n fi\n\n git remote set-url ${remote_name} ${remote_url}\n}\n"
},
{
"alpha_fraction": 0.6711339950561523,
"alphanum_fraction": 0.6783505082130432,
"avg_line_length": 26.714284896850586,
"blob_id": "af9dbdb9d65ffbe32fff38ab39d12d3b3a357fbf",
"content_id": "5b413b2b6525f02a8143c431b9872a17f3b0ba3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1940,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 70,
"path": "/personal_aliases.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "alias gbupstream='git branch --set-upstream-to=origin/master'\nalias gbranch='git branch'\nalias gc='git commit '\nalias gm='git commit -m '\nalias gca='git commit --amend '\nalias gcg=\"git commit --author='Adriano Gil <[email protected]>'\"\nalias gd='git diff '\nalias gdc='git diff --cached'\nalias gk='gitk --all&'\nalias gx='gitx --all'\nalias gr-tags='git fetch --tags'\nalias gw='git whatchanged --pretty=oneline'\nalias greset='git reset '\nalias gsoft-reset='git reset '\nalias ghrme='git reset --hard HEAD'\nalias gshow='git show '\nalias gcereja='git cherry-pick '\nalias gflog=\"git reflog --format='%C(auto)%h %<|(17)%gd %C(blue)%ci%C(reset) %s'\"\n\nalias grb=\"gr && gb\"\nalias grbp=\"gr && gb && gp\"\n\nalias gco='git checkout '\nalias gckout='git checkout'\nalias gckt='git checkout --track'\n\n# alias gp='echo \"Lets push to repo\" && git push'\nalias gpupstream='git push --set-upstream origin master'\n\nalias gpick='python3 -m gittools.pick'\nalias gsquash='python3 $GIT_TOOLS_DIR/python/git_squash.py'\n\nalias gil='git '\n\nalias gl1='git log -1'\nalias gw1='git whatchanged -1 '\n\nalias gremotes=\"git remote -v\"\n\nalias gignore-file-hard='git update-index --assume-unchanged '\n\nalias git-author-update=\"gc --amend --author='Adriano Gil <[email protected]>'\"\n\n# Specific command related to my own scripts that exchange commits and CL between P4 and git repos\nalias perforce-push='git push local master:perforce-master'\n\n# Generate commit message\nfunction gcm()\n{\n commit_message=\"Updated changes at \"$(date +%F-%H:%M)\n echo \"Generating commit: \"$commit_message\n gc -m \"$commit_message\"\n}\n\nfunction gpush2gerrit()\n{\n if [ -z \"$1\" ]\n then\n target_branch=$(git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \\(.*\\)/\\1/ ')\n\n remote=$(git config \"branch.${target_branch}.remote\")\n else\n target_commit=$1\n remote=origin\n fi\n\n git push $remote HEAD:refs/for/$target_branch\n}\n\nalias gfind-big-files=$HOME'/Softwares/git/findbig/git_find_big.sh'\n"
},
{
"alpha_fraction": 0.7180851101875305,
"alphanum_fraction": 0.7234042286872864,
"avg_line_length": 22.5,
"blob_id": "e66a55adf40618ce9042083d37490b980ac07cf5",
"content_id": "eb1d604b593a11f7bdc6c78b750a0670028a3d07",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 188,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 8,
"path": "/git_internals.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "alias gdetails-obj-count='git count-objects -v '\nalias gt-internals-obj-count='git count-objects -v '\n\nfunction gdetails()\n{\n git cat-file -p $1\n}\nalias gt-internals-details=\"gdetails\"\n"
},
{
"alpha_fraction": 0.7012194991111755,
"alphanum_fraction": 0.7012194991111755,
"avg_line_length": 19.5,
"blob_id": "b94d99d4194b41d888cfc913630067ef84b9a10c",
"content_id": "b064ff8e6e2f93f2bd7443a53eefd4a9f0da00b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 8,
"path": "/python/gittools/gittools.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "import gittools.clitools as clitools\n\n\ndef get_status():\n cmd = \"git status\"\n get_git_status_output = clitools.run_cmd(cmd)\n\n return get_git_status_output\n"
},
{
"alpha_fraction": 0.6903553009033203,
"alphanum_fraction": 0.6964467167854309,
"avg_line_length": 24.921052932739258,
"blob_id": "ddc3b06541522d830ad9f81d096fa7b141f54617",
"content_id": "3b4c5c3df937e1017365266be7eff89ff2836787",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 985,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 38,
"path": "/git_tag.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# I have the habit of creating in each git workspace a local tag 'local/props'\n# with my local modification. So I can use this command to quickly load all\n# my private settings\n# gtool gt-load-local-properties: Load my local properties\nalias gt-load-local-properties='git cherry-pick local/props && git reset HEAD~1'\n\n# gtool gt-save-local-properties: Save my local properties\nfunction gt-save-local-properties()\n{\n git cherry-pick local/props\n}\n\n# gtool gt-bkp: Generate a backup tag\nfunction gt-bkp()\n{\n tag_sufix=$1\n bkp_tag=bkp-$(date +%F)${tag_sufix}\n echo \"Generating git tag BKP: \"$bkp_tag\n git tag $bkp_tag\n}\n\n# gtool gt-list-bkp: List backup tags\nfunction gt-tags-bkp()\n{\n git tag -l 'bkp-*' -n1\n}\n\n# gtool gt-tags: List all tags\nfunction gt-tags()\n{\n git tag -l -n1\n}\n\n# gtool gt-tags-by-date: List all tags ordered by date\nfunction gt-tags-by-date()\n{\n git for-each-ref --sort=creatordate --format '%(refname) %(creatordate)' refs/tags | cut -c11-\n}"
},
{
"alpha_fraction": 0.6203333139419556,
"alphanum_fraction": 0.6273333430290222,
"avg_line_length": 31.96703338623047,
"blob_id": "2a4badec7f31de028e7438a9ab132277fd9da658",
"content_id": "f778df59906f26bb8cf81f83f372325fc0c0d8be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3000,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 91,
"path": "/analysis/prod_analysis.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "import sys, os\nimport subprocess\nfrom git_tools import git_tools\nimport matplotlib.pylab as pylab\n\ncurrent_dir = os.getcwd()\n\nauthors_data = {}\n\ndef commit_analysis(commit_hash, initial_date):\n # print(\"commit: \" + commit_hash)\n\n author_name_cmd = \"git log -1 --pretty=format:'%an' \" + commit_hash\n author_name_output = subprocess.check_output(author_name_cmd, shell=True)\n author_name_output = author_name_output.decode(\"utf8\")\n author_name_output = author_name_output.strip()\n\n author_name = author_name_output\n\n commit_data = {}\n\n # total_line_changed_cmd = \"git log \" + commit_hash + \" -1 --pretty=tformat: --numstat | awk '{ loc += $1 + $2 } END { printf \\\"%s\\\", loc }'\"\n # total_line_changed_output = subprocess.check_output(total_line_changed_cmd, shell=True)\n # total_line_changed_output = total_line_changed_output.decode(\"utf8\")\n # total_line_changed_output = total_line_changed_output.strip()\n\n # commit_data['commit_size'] = int(total_line_changed_output)\n date_diff = (git_tools.get_commit_date(current_dir, commit_hash) - initial_date)\n commit_data['mins'] = date_diff.seconds / 60.0 + date_diff.days * 24 * 60\n\n # print(str(git_tools.get_commit_date(current_dir, commit_hash) - initial_date))\n # print(str(git_tools.get_commit_date(current_dir, commit_hash)))\n # print(str(initial_date))\n\n if author_name in authors_data:\n authors_data[author_name].append(commit_data)\n else:\n authors_data[author_name] = [commit_data]\n\n\ndef plot_repo_data(repo_data):\n\n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan', \"xkcd:crimson\", \"xkcd:lavender\"]\n cindex = 0\n for author in repo_data.keys():\n days = []\n commit_size = []\n\n total_size = 0\n\n repo_data[author] = sorted(repo_data[author], key=lambda x: x['mins'], reverse=False)\n\n for c in repo_data[author]:\n # total_size += c['commit_size']\n total_size += 1\n days.append(c['mins'])\n commit_size.append(total_size)\n\n pylab.plot(days, commit_size, '-o', color=colors[cindex % len(colors)], label=author)\n cindex += 1\n\n pylab.legend(loc='upper left')\n pylab.show()\n\n\nif __name__ == \"__main__\":\n # print(str(sys.argv))\n\n git_flags = \"\"\n if len(sys.argv) > 1:\n for a in range(1, len(sys.argv)):\n git_flags += sys.argv[a] + \" \"\n else:\n git_flags = \"HEAD\"\n\n git_hashes_cmd = \"git rev-list \" + git_flags\n\n print(git_hashes_cmd)\n\n git_hashes_output = subprocess.check_output(git_hashes_cmd, shell=True)\n git_hashes_output = git_hashes_output.decode(\"utf8\")\n git_hashes_output = git_hashes_output.strip()\n\n git_hashes = git_hashes_output.split(\"\\n\")\n\n initial_date = git_tools.get_commit_date(current_dir, git_hashes[-1])\n\n for h in reversed(git_hashes):\n commit_analysis(h, initial_date)\n\n plot_repo_data(authors_data)\n"
},
{
"alpha_fraction": 0.6607687473297119,
"alphanum_fraction": 0.6668700575828552,
"avg_line_length": 31.156862258911133,
"blob_id": "62583e45808616f89c3a11fb05e58f8ce51b12b8",
"content_id": "4a642480c0efa2e27283e6b5009f9046816e67d1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1639,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 51,
"path": "/python/git_squash.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport sys\nimport subprocess\n\ndef is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n return False\n\ncommits_order = []\n\nmin_commit_backtrack = subprocess.check_output(\"git log --oneline | wc -l\", shell=True)\nmin_commit_backtrack = min_commit_backtrack.decode(\"utf8\").strip()\nmax_commit_backtrack = 0\n\nfor i in range(1, len(sys.argv)):\n if is_int(sys.argv[i]):\n commits_order_number = int(sys.argv[i])\n if commits_order_number > max_commit_backtrack:\n max_commit_backtrack = commits_order_number\n if commits_order_number < min_commit_backtrack:\n min_commit_backtrack = commits_order_number\n\n commit = subprocess.check_output(\"git rev-parse --short HEAD~\" + sys.argv[i], shell=True)\n commit = commit.decode(\"utf8\").strip()\n commits_order.append(commit)\n print('debug: ' + commit)\n\nmax_commit_backtrack = max_commit_backtrack + 1\n\ncommits = []\nfor i in range(0, max_commit_backtrack):\n commit = subprocess.check_output(\"git rev-parse --short HEAD~\" + str(i), shell=True)\n commit = commit.decode(\"utf8\").strip()\n commits.append(commit)\n\nif min_commit_backtrack > 0:\n subprocess.check_output(\"git reset --hard HEAD~\" + str(min_commit_backtrack), shell=True)\nsubprocess.check_output(\"git reset HEAD~\" + str(max_commit_backtrack), shell=True)\n\nprint('max_commit_backtrack: ' + str(max_commit_backtrack))\n\nfor c in range(0, len()):\n subprocess.check_output(\"git cherry-pick \" + c, shell=True)\n\nfor c in reversed(commits_order):\n subprocess.check_output(\"git cherry-pick \" + c, shell=True)"
},
{
"alpha_fraction": 0.5496045351028442,
"alphanum_fraction": 0.5554802417755127,
"avg_line_length": 28.31125831604004,
"blob_id": "e22ed5f45c2606b0c9555f246411b244dc025961",
"content_id": "65cd3c0497eacec5195efa9222a43f4e6a974382",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4425,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 151,
"path": "/python/gcount_branch.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "import sys, subprocess\n\nimport utils\n\n\ndef count_commits_in_branch(args=[], extra_args=[]):\n branches = subprocess.check_output(\"git branch -r \", shell=True)\n branches = branches.decode(\"utf8\").strip().split('\\n')\n\n remote_branches = []\n for b in branches:\n if not '->' in b:\n remote_branches.append(b.strip())\n\n # print(remote_branches)\n\n for b in remote_branches:\n try:\n total_commits = subprocess.check_output(\"git log --oneline --graph \" + b + \" | wc -l\", shell=True)\n total_commits = total_commits.decode(\"utf8\").strip()\n\n extra_info = \"\"\n\n if '-t' in extra_args:\n first_commit = subprocess.check_output(\"git rev-list --max-parents=0 HEAD --abbrev-commit\", shell=True)\n first_commit = first_commit.decode(\"utf8\").strip()\n\n last_commit = subprocess.check_output(\"git rev-list HEAD --abbrev-commit -1\", shell=True)\n last_commit = last_commit.decode(\"utf8\").strip()\n\n first_commit_date = subprocess.check_output(\"git log -1 --format=\\%ai \" + first_commit, shell=True)\n first_commit_date = first_commit_date.decode(\"utf8\").strip()\n\n last_commit_date = subprocess.check_output(\"git log -1 --format=\\%ai \" + last_commit, shell=True)\n last_commit_date = last_commit_date.decode(\"utf8\").strip()\n\n extra_info = '[' + first_commit_date + ' - ' + last_commit_date + ']' \n\n\n print(b + \": \" + total_commits + ' commits ' + extra_info)\n except:\n pass\n\ndef count_commits_in_branch_and_sort(args=[], extra_args=[]):\n branches = subprocess.check_output(\"git branch -r \", shell=True)\n branches = branches.decode(\"utf8\").strip().split('\\n')\n\n remote_branches = []\n for b in branches:\n if not '->' in b:\n remote_branches.append(b.strip())\n\n branches_count = []\n\n for b in remote_branches:\n try:\n total_commits = subprocess.check_output(\"git log --oneline --graph \" + b + \" | wc -l\", shell=True)\n total_commits = total_commits.decode(\"utf8\").strip()\n\n branches_count.append((b,total_commits))\n # print(b + \": \" + total_commits)\n except:\n pass\n\n def get_key(item):\n return int(item[1])\n\n sorted(branches_count, key=get_key)\n\n for b in branches_count:\n print(b[0] + \": \" + b[1])\n\ndef count_commits_in_branch_and_reversesort(args=[], extra_args=[]):\n branches = subprocess.check_output(\"git branch -r \", shell=True)\n branches = branches.decode(\"utf8\").strip().split('\\n')\n\n remote_branches = []\n for b in branches:\n if not '->' in b:\n remote_branches.append(b.strip())\n\n branches_count = []\n\n for b in remote_branches:\n try:\n total_commits = subprocess.check_output(\"git log --oneline --graph \" + b + \" | wc -l\", shell=True)\n total_commits = total_commits.decode(\"utf8\").strip()\n\n branches_count.append((b,total_commits))\n # print(b + \": \" + total_commits)\n except:\n pass\n\n def get_key(item):\n return int(item[1])\n\n branches_count = sorted(branches_count, key=get_key, reverse=True)\n\n for b in branches_count:\n print(b[0] + \": \" + b[1])\n\n\ndef handle_no_args():\n # print(\"Default mode\\n\")\n count_commits_in_branch()\n\ncommands_parse = {\n '-rs' : count_commits_in_branch_and_reversesort,\n '-s' : count_commits_in_branch_and_sort,\n 'no-args' : handle_no_args,\n}\n\ndef parse_arguments():\n\n args = {}\n\n last_key = ''\n\n if len(sys.argv) == 1:\n handle_no_args()\n return None\n\n for i in range(1, len(sys.argv)):\n a = sys.argv[i]\n if a[0] == '-' and not utils.is_float(a):\n last_key = a\n args[a] = []\n elif last_key != '':\n arg_values = args[last_key]\n arg_values.append(a)\n args[last_key] = arg_values\n\n return args\n\ndef parse_commands(args):\n if args is None:\n return\n\n parse_count = 0\n\n # print('DEBUG: Parsing args: ' + str(args))\n for a in args:\n if a in commands_parse:\n commands_parse[a](args[a], args)\n parse_count = parse_count + 1\n\n if parse_count == 0:\n count_commits_in_branch([], args)\n\nargs = parse_arguments()\nparse_commands(args)"
},
{
"alpha_fraction": 0.6630630493164062,
"alphanum_fraction": 0.6684684753417969,
"avg_line_length": 25.33333396911621,
"blob_id": "d73332f727f6fe83c9495d333879526ea9612320",
"content_id": "5ca2a8db97051f7cdd8923c5e7bf630a3ea8ebfc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 555,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 21,
"path": "/git_merge.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# Rebase-related aliases\nalias gb='git rebase'\nalias gbc='git rebase --continue'\n\n# gtool gb-fz: rebase from a remote branch\nalias gb-fz='git rebase $(gbk)'\n\n\nfunction gt-rebase-local-branch()\n{\n target_branch=$(git branch -a | cut -c3- | sed 's/origin//g' | cut -c2- | default-fuzzy-finder)\n echo \"Let's merge branch: \"$target_branch\n git rebase ${target_branch}\n}\n\nfunction gt-merge-branch()\n{\n target_branch=$(git branch -a | cut -c3- | default-fuzzy-finder)\n echo \"Let's merge branch: \"$target_branch\n git merge ${target_branch}\n}\n\n"
},
{
"alpha_fraction": 0.6010830402374268,
"alphanum_fraction": 0.6055956482887268,
"avg_line_length": 29.77777862548828,
"blob_id": "af5dd42e36a20cf2551f3a8652282571ea1520a4",
"content_id": "97a59e6a49b22070a8c1ed9edc457f1044a1f98b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1108,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 36,
"path": "/python/gignore_file.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from subprocess import *\nimport subprocess\n\nimport os\nimport sys\n\n\ndef get_git_root(p):\n \"\"\"Return None if p is not in a git repo, or the root of the repo if it is\"\"\"\n if call([\"git\", \"branch\"], stderr=STDOUT, stdout=open(os.devnull, 'w'), cwd=p) != 0:\n return None\n else:\n root = check_output([\"git\", \"rev-parse\", \"--show-toplevel\"], cwd=p)\n root = root.decode(\"utf8\")\n root = root.strip()\n return root\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Error: you should provide a file to be ignored\")\n exit()\n\n target_file = sys.argv[1]\n target_path = os.path.dirname(target_file)\n git_repo = get_git_root(target_path)\n\n print(\"Let's ignore file: \" + target_file)\n\n if git_repo is None:\n print(\"Error: file is not inside a valid git repo\")\n exit()\n\n add_to_gitignore_cmd = \"echo '\" + target_file[len(git_repo)+1:] + \"' >> '\" + git_repo + \"/.gitignore'\"\n add_to_gitignore_output = subprocess.check_output(add_to_gitignore_cmd, shell=True)\n add_to_gitignore_output = add_to_gitignore_output.strip()\n"
},
{
"alpha_fraction": 0.7081966996192932,
"alphanum_fraction": 0.7114754319190979,
"avg_line_length": 36.875,
"blob_id": "a70b01b87f548f9aac51bf2bae688681781b0c69",
"content_id": "759a59ad6163cfd62fc03ff9a7437c95af603b69",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 305,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 8,
"path": "/git_navigation.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# gtool gt-navigate-to-local-branch: Change current branch\nfunction gt-navigate-to-local-branch()\n{\n target_branch=$(git branch -a | cut -c3- | default-fuzzy-finder)\n echo \"Let's checkout to branch: \"$target_branch\n git checkout ${target_branch}\n}\nalias gt-nav-go=\"gt-navigate-to-local-branch\"\n\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 27,
"blob_id": "db41f35aa6b621db81cc72346e08d99447c1c4c8",
"content_id": "3c3800cbc9e07e6d06221176c530df3e1748ddfd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 168,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 6,
"path": "/git_commit.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "# gtool gt-commit: Create a new commit for author '[email protected]'\nfunction gt-commit()\n{\n # Commit as Adriano Gil (personal email)\n git commit --author='Adriano Gil <[email protected]>'\n}\n"
},
{
"alpha_fraction": 0.6337579488754272,
"alphanum_fraction": 0.6369426846504211,
"avg_line_length": 23.153846740722656,
"blob_id": "6b4bd7ba29913e3deac92f6d9b6fe9b30ed9926a",
"content_id": "43c3c29ebc4ca92137c8137cdf91b5676da29c44",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 26,
"path": "/python/gittools/commits/commit.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from . import clitools\nfrom datetime import datetime\n\n\ndef get_commit_date(repo_dir, ref):\n cmd = \"git show -s --format=%ci \" + ref\n commit_date_output = clitools.run_cmd(cmd)\n\n commit_date = datetime.strptime(commit_date_output[:-9], \"%Y-%m-%d %H:%M\")\n\n return commit_date\n\n\ndef get_commits_with(file=\"\"):\n cmd = \"git log --pretty=format:'%%h' %s\" % (file,)\n get_hashes_output = clitools.run_cmd(cmd)\n\n hash_list = get_hashes_output.split(\"\\n\")\n\n return hash_list\n\n\ndef get_commit_files(ref):\n commit_info = clitools.run_cmd('git log --name-status --oneline -1 %s ' % (ref,))\n\n return commit_info\n"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 23,
"blob_id": "dbe14386d582737c598b461f5d4cc55f8a151651",
"content_id": "8fb3477d504bedbfbf0212d6d81c4e31e15671ac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 1,
"path": "/python/gittools/__init__.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from .gittools import *"
},
{
"alpha_fraction": 0.7265415787696838,
"alphanum_fraction": 0.7319034934043884,
"avg_line_length": 27.69230842590332,
"blob_id": "d020260eceb33df9d142c75ac7a5575658c00f85",
"content_id": "5bb681db51b4af30524d0b94492a082170aafb9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 373,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 13,
"path": "/git_gerrit.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# gtool gt-gerrit-patches: Show gerrit patches for current repo\nfunction gt-gerrit-patches()\n{\n\tgerrit patches\n}\n\n# gtool gt-gerrit-checkout: Select a gerrit patch and checkout it\nfunction gt-gerrit-checkout()\n{\n\ttarget_patch=$(gerrit patches | tail -n +2 | default-fuzzy-finder | awk '{print $1}')\n\techo \"Checkout patch \"${target_patch}\n\tgerrit checkout ${target_patch}\n}"
},
{
"alpha_fraction": 0.6353829503059387,
"alphanum_fraction": 0.6396979689598083,
"avg_line_length": 22.769229888916016,
"blob_id": "e3e5453a05c0360af335ef50beb1d45d1da4a1a5",
"content_id": "6487bdd08e3f1f2f3b59f70c54f766ba0204a439",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 927,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 39,
"path": "/python/gittools/pick.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nfrom . import clitools\n\nimport sys\n\n\ndef is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n return False\n\n\ncommits_order = []\n\nmax_commit_backtrack = 0\n\nfor i in range(1, len(sys.argv)):\n if is_int(sys.argv[i]):\n commits_order_number = int(sys.argv[i])\n if commits_order_number > max_commit_backtrack:\n max_commit_backtrack = commits_order_number\n\n # Get ref for each commit\n commit = clitools.run_cmd(\"git rev-parse --short HEAD~\" + sys.argv[i])\n commits_order.append(commit)\n print('debug: ' + commit)\n\nmax_commit_backtrack = max_commit_backtrack + 1\nclitools.run_cmd(\"git reset --hard HEAD~\" + str(max_commit_backtrack))\n\nprint('max_commit_backtrack: ' + str(max_commit_backtrack))\n\n# Apply each ref in the correct order\nfor c in reversed(commits_order):\n clitools.run_cmd(\"git cherry-pick \" + c)\n"
},
{
"alpha_fraction": 0.775086522102356,
"alphanum_fraction": 0.7785466909408569,
"avg_line_length": 31.11111068725586,
"blob_id": "5b5ae465bb8b1da982060c170f7b2b31ae926840",
"content_id": "573c9607df15638e8ff75e51adfecdb3e785a93a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 289,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 9,
"path": "/git_attributes.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "function gcreate-attributes-python()\n{\n path=$1\n\n py_attributes_from_github=https://raw.githubusercontent.com/alexkaratarakis/gitattributes/master/Python.gitattributes\n\n curl $py_attributes_from_github >> $path/.attributes\n}\nalias gt-attributes-python=\"gcreate-attributes-python\"\n"
},
{
"alpha_fraction": 0.5288640856742859,
"alphanum_fraction": 0.5363128781318665,
"avg_line_length": 28.83333396911621,
"blob_id": "35d9add4d31687cd2638dd49a1f29251e1d42dc7",
"content_id": "fbfd8c77090cb313ff62681bf1027865fa780293",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 537,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 18,
"path": "/git_smart.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "function gsmart-add()\n{\n min_py_proj_dir=2\n\n if [ \"$(ls *.py 2> /dev/null | wc -l)\" -gt \"$min_py_proj_dir\" ]; then\n echo 'Python project identified'\n git add *.py\n elif [ \"$(ls *.sh 2> /dev/null | wc -l)\" -gt \"$min_py_proj_dir\" ]; then\n echo 'Shell project identified'\n git add *.sh\n elif [ \"$(ls *.tex 2> /dev/null | wc -l)\" -gt \"$min_py_proj_dir\" ]; then\n echo 'LaTeX project identified'\n git add *.tex *.bib\n else\n echo 'Unknown project'\n fi\n}\nalias gas=\"gsmart-add\"\n"
},
{
"alpha_fraction": 0.53644859790802,
"alphanum_fraction": 0.5413084030151367,
"avg_line_length": 31.22891616821289,
"blob_id": "a59d784c2e50d1480f03be8f560bf5c5bc085019",
"content_id": "5ce530de7d6568ff097466c40f88d4fca33962d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2675,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 83,
"path": "/python/gittools/history/changes.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "from gittools.history.log import get_hash_log\nfrom gittools.commits.commit import get_commit_files\nimport sys\n\nhashes = get_hash_log(sys.argv[1:])\n\nfor git_hash in hashes:\n git_hash = git_hash.strip()\n commit_change_info = get_commit_files(ref=git_hash)\n # print(commit_change_info)\n\n commit_change_info = commit_change_info.split(\"\\n\")\n\n commit_info = commit_change_info[0].strip()\n\n commit_change = {\n \"added\": {\n \"files\": [],\n \"by_extension\": {}\n },\n \"updated\": {\n \"files\": [],\n \"by_extension\": {}\n }\n }\n\n files_added = []\n files_deleted = []\n files_updated = []\n\n for file_info in commit_change_info[1:]:\n file_info = file_info.strip()\n\n if file_info == \"\":\n continue\n\n if file_info[0] in [\"M\", \"R\"]:\n commit_change[\"updated\"][\"files\"].append(file_info.split(\"\\t\")[1])\n elif file_info[0] == \"A\":\n commit_change[\"added\"][\"files\"].append(file_info.split(\"\\t\")[1])\n\n # Added Files\n for file in commit_change[\"added\"][\"files\"]:\n file_extension = file.strip().split(\".\")[-1]\n\n if file_extension not in commit_change[\"added\"][\"by_extension\"]:\n commit_change[\"added\"][\"by_extension\"][file_extension] = []\n commit_change[\"added\"][\"by_extension\"][file_extension].append(file)\n msg_str = \"\"\n for extension in commit_change[\"added\"][\"by_extension\"]:\n if msg_str == \"\":\n msg_str = \"(Added: \"\n else:\n msg_str += \", \"\n msg_str += str(len(commit_change[\"added\"][\"by_extension\"][extension])) + \" \" + extension\n if msg_str != \"\":\n msg_str += \"; \"\n\n # Updated Files:\n for file in commit_change[\"updated\"][\"files\"]:\n file_extension = file.strip().split(\".\")[-1]\n\n if file_extension not in commit_change[\"updated\"][\"by_extension\"]:\n commit_change[\"updated\"][\"by_extension\"][file_extension] = []\n commit_change[\"updated\"][\"by_extension\"][file_extension].append(file)\n update_msg_str = \"\"\n for extension in commit_change[\"updated\"][\"by_extension\"]:\n if update_msg_str == \"\":\n update_msg_str = \"Updated: \"\n else:\n update_msg_str += \", \"\n update_msg_str += str(len(commit_change[\"updated\"][\"by_extension\"][extension])) + \" \" + extension\n # if update_msg_str != \"\":\n # update_msg_str += \") \"\n\n if msg_str == \"\":\n msg_str = \"(\" + update_msg_str + \")\"\n else:\n msg_str = msg_str + update_msg_str + \")\"\n\n if len(commit_info) > 60:\n commit_info = commit_info[:50] + \"...\"\n print(commit_info + \" \" + msg_str)\n"
},
{
"alpha_fraction": 0.5668683052062988,
"alphanum_fraction": 0.5793010592460632,
"avg_line_length": 26.80373764038086,
"blob_id": "524e348055f9683e300cc716b1f890220bbd5f7d",
"content_id": "26a9711c6e38c8dbfec112d9cd8ea1da68416464",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2976,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 107,
"path": "/git_files.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# gtool gt-file-previous-version: select a commit and then a file to see its previous version\nfunction gt-file-previous-version()\n{\n if [ -z \"$1\" ]\n then\n target_commit=$(gt-hist-pick-commit)\n else\n target_commit=$1\n fi\n\n target_file=$(git diff-tree --no-commit-id --name-only -r ${target_commit} | default-fuzzy-finder)\n\n echo \"Let's see previous version of \"${target_file}\" in ref \"${target_commit}\n\n git show ${target_commit}:${target_file}\n}\n\n# gtool gt-file-history: see change log for a specific file\nfunction gt-file-history()\n{\n if [ -z \"$1\" ]\n then\n target_file=$(find . -not -path '*/\\.*' | default-fuzzy-finder | cut -c3-)\n else\n target_file=$1\n fi\n\n gh ${target_file}\n}\n\n# gtool gt-file-history-version: select a file and then a commit to see its previous version\nfunction gt-file-history-version()\n{\n if [ -z \"$1\" ]\n then\n target_file=$(find . -not -path '*/\\.*' | default-fuzzy-finder | cut -c3-)\n else\n target_file=$1\n fi\n\n target_commit=$(gh ${target_file} | default-fuzzy-finder | cut -c3- | awk '{print $1}')\n\n echo \"Let's see previous version of \"${target_file}\" in ref \"${target_commit}\"\\n\"\n\n git show ${target_commit}:${target_file}\n}\n\n# gtool gt-file-checkout-version: change a target file to its version in a target commit\nfunction gt-file-checkout-version()\n{\n if [ -z \"$1\" ]\n then\n target_file=$(find . -not -path '*/\\.*' | default-fuzzy-finder | cut -c3-)\n else\n target_file=$1\n fi\n\n if [ -z \"$2\" ]\n then\n target_commit=$(gh ${target_file} | default-fuzzy-finder | cut -c3- | awk '{print $1}')\n else\n target_commit=$2\n fi\n\n echo \"Change file \"${target_file}\" to its previous version in ref \"${target_commit}\"\\n\"\n\n git checkout ${target_commit} ${target_file}\n}\n\n# gtool gls-files: see most recent commit that changed each file in a target directory\nfunction gls-files()\n{\n # gls-files $target_commit $target_directory\n # Github-like vision of repo, shows last commit that changed each\n # file in $2 directory\n\n if [ -z \"$1\" ]\n then\n target_directory=''\n target_commit='HEAD'\n else\n target_directory=$1\n\n if [ -z \"$2\" ]\n then\n target_commit='HEAD'\n else\n target_commit=$2\n fi\n fi\n\n target_files=$(git show $target_commit:$target_directory | tail -n +3)\n # target_files=$(echo -e $target_files | tr '\\n' ' ')\n\n line='----------------------------------------'\n\n if [ -z \"$target_directory\" ]\n then\n echo ${target_files} | xargs -I {} git log -n 1 --pretty=format:\"\"{}\" - %h%x09[%><(35,trunc)%s]%x09%ar\" -- ${target_commit} {}\n else\n echo ${target_files} | xargs -I {} git log -n 1 --pretty=format:\"\"{}\" - %h%x09[%><(35,trunc)%s]%x09%ar\" -- ${target_commit} $target_directory/{} \n fi\n\n total_files=$(echo $target_files | wc -l)\n echo ${total_files}\" files\"\n\n}\n"
},
{
"alpha_fraction": 0.586725652217865,
"alphanum_fraction": 0.595575213432312,
"avg_line_length": 27.9743595123291,
"blob_id": "7f308a9620b6f75987bf4139876d58c99085339c",
"content_id": "80d172428b477615480886c67d1ef4cad994e4fb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1130,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 39,
"path": "/python/git_update_track.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "import sys\nimport subprocess\n\n\ndef get_branches(line):\n branches = []\n print(line)\n\n for i in range(0, len(line)):\n if line[i:i + 7] == 'origin/':\n for j in range(i + 7, len(line)):\n if line[j] in '), ':\n branches.append(line[i + 7:j])\n break\n\n return branches\n\n\nget_hashes_cmd = \"git --no-pager log \"\nget_hashes_cmd += \"--simplify-by-decoration \"\nget_hashes_cmd += \"--tags --branches --remotes \"\nget_hashes_cmd += \"--date-order \"\nget_hashes_cmd += \"--decorate \"\nget_hashes_cmd += '--pretty=tformat:\"%Cblue %h %C(auto)%d%Creset\"'\nget_hashes_output = subprocess.check_output(get_hashes_cmd, shell=True)\nget_hashes_output = get_hashes_output.decode(\"utf8\")\nget_hashes_output = get_hashes_output.strip()\n\n# print(get_hashes_output)\n\nhashes_lines = get_hashes_output.split(\"\\n\")\n\nfor line in hashes_lines:\n hash_data = line.split(\" \")\n # print(str(hash_data))\n if len(hash_data) >= 2 and 'origin/' in hash_data[1]:\n branches = get_branches(hash_data[1])\n print(str(branches))\n # print(hash_data[0] + \" \" + hash_data[1])\n"
},
{
"alpha_fraction": 0.715575635433197,
"alphanum_fraction": 0.7200902700424194,
"avg_line_length": 25.84848403930664,
"blob_id": "c09daf04cb29bce92e08b0fbcab37d6b1befa15e",
"content_id": "89496c177a48fe8ab0fff5796c138cf1a51e7885",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1772,
"license_type": "permissive",
"max_line_length": 217,
"num_lines": 66,
"path": "/README.md",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "# git-tools\nA collection of aliases and tools for git\n\n## Commands\n\nAliases to basic commands (All defined at personal_aliases.sh):\n```\nga # git add\nga # git add -i\ngc # git commit\ngca # git commit --amend\ngflog # git reflog with a pretty format\n```\n\nCommands related to log history\n```\ngh # show commit graph, similar to git log --oneline --graph\n```\n\nCommands related to push commits\n```\ngp # alias to git push\ngt-send-to-branch # uses a fuzzy-finder to select a branch\n```\n\n## Planned features\n- Save and track each branch update\n- Suggest commit message from \"git diff --cached\"\n\n## Installation\n\n### Recommended setup\nInstall using [gil-install command](https://github.com/adrianogil/gil-tools)\n\n```\ncd /<path-to>/git-tools/\ngil-install -i\n```\n\n### Manual setup\n\nAdd the following lines to your bashrc:\n```\nexport GIT_TOOLS_DIR=/<path-to>/git-tools/\nsource $GIT_TOOLS_DIR/bashrc.sh\n```\n\nAnd you should also define an alias default-fuzzy-finder to the fuzzy-finder you want to use. For example:\n\n```bash\nalias default-fuzzy-finder='fzf'\n```\n\n## Contributing\n\nFeel free to submit PRs. I will do my best to review and merge them if I consider them essential.\n\n## Interesting Links\n\n* [awesome-git-addons](https://github.com/stevemao/awesome-git-addons): very interesting commands you should check out:\n * recent\n * git-standup\n * [git interactive rebase tool](https://github.com/MitMaro/git-interactive-rebase-tool)\n * [diff-so-fancy](https://github.com/so-fancy/diff-so-fancy)\n * [awesome-git](https://github.com/dictcp/awesome-git)\n * [method_log](https://github.com/freerange/method_log): tool to analyze the change history of methods (see more on [this blog post](https://www.urbanautomaton.com/blog/2014/09/22/tracking-method-history-in-git/))\n"
},
{
"alpha_fraction": 0.7355072498321533,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 26.600000381469727,
"blob_id": "35b9a49f2d0c67ff38a3a796075e3d02d9515727",
"content_id": "882cc81236a731f2591652ee6594165a0cf6e440",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 10,
"path": "/python/gittools/clitools.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "import subprocess\n\n\ndef run_cmd(cmd):\n subprocess_cmd = cmd\n subprocess_output = subprocess.check_output(subprocess_cmd, shell=True)\n subprocess_output = subprocess_output.decode(\"utf8\")\n subprocess_output = subprocess_output.strip()\n\n return subprocess_output\n"
},
{
"alpha_fraction": 0.6300597190856934,
"alphanum_fraction": 0.6443264484405518,
"avg_line_length": 23.104000091552734,
"blob_id": "83dbd653704886bc96c4db537a21fb6dca502d20",
"content_id": "e38d692e7060bc809c4c817f7f210c06941e9fb4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3014,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 125,
"path": "/git_history.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "\n# gtool git-hist: Show commits history\nalias git-hist=\"git log --pretty=format:'%C(red)%h%Creset %C(cyan)%ad%Creset | %s%C(magenta)%d%Creset [%C(blue)%an%Creset]' --graph --date=short\"\n\nalias gh='git-hist'\nalias gha='git-hist --all '\nalias gha-reflog='gh --decorate `git reflog | cut -d \" \" -f 1`'\n\nalias gh10='gh -10'\n\nalias gtoday='gh --since=\"1am\"'\n\nexport GIT_TOOLS_TRACKING_JSON=\"${GIT_TOOLS_DIR}/.gitdata\"\n\n#gtool gh-changes: see summarized changes of each commit\nalias gh-changes='python3 -m gittools.history.changes'\n\n# gtool gt-tracking-update: Track commits updates on branches\nalias gt-tracking-update='python3 -m gittools.commits.tracking.update'\n\n# gtool gt-hist-target-fz\nfunction gt-hist-target-fz()\n{\n target_ref=$(git branch -a | cut -c3- | default-fuzzy-finder)\n\n gh ${target_ref}\n}\n\nfunction gt-hist-pick-commit()\n{\n target_commit=$(gh | default-fuzzy-finder | cut -c3- | awk '{print $1}')\n echo ${target_commit} | pbcopy\n echo ${target_commit}\n}\n\n# gtool gt-hist-tag: search for a tag and shows its git logs\nfunction gt-hist-tag()\n{\n target_tag=$(git tag -l | default-fuzzy-finder)\n gh ${target_tag}\n}\nalias gh-tags=\"gt-hist-tag\"\n\n# gtool gt-hist-cp-hash\nfunction gt-hist-cp-hash()\n{\n echo \"Search for Hash\"\n\n target_commit=$(gha | default-fuzzy-finder | cut -c3- | awk '{print $1}')\n\n # Copy hash\n echo \"Found hash: \"$target_commit\n echo \"Commit:\"\n gh -1 $target_commit\n}\n\n# gtool gt-hist-find-string: Find string in all commit history\nfunction gt-hist-find-string()\n{\n if [ -z \"$1\" ]\n then\n read -p \"Target string: \" word\n else\n target_string=$1\n fi\n\n git log -S ${target_string} --source --all\n}\n\nfunction ghs()\n{\n gh $1 $2 | less\n}\n\n# See https://www.commandlinefu.com/commands/view/15063/list-offsets-from-head-with-git-log.\nfunction gh-count-from-head()\n{\n o=0\n git log --oneline | while read l; do printf \"%+9s %s\\n\" \"HEAD~${o}\" \"$l\"; o=$(($o+1)); done | less\n}\n\n# https://stackoverflow.com/questions/47142799/git-list-all-branches-tags-and-remotes-with-commit-hash-and-date\nfunction gh-branches()\n{\n git --no-pager log \\\n --simplify-by-decoration \\\n --tags --branches --remotes \\\n --date-order \\\n --reverse \\\n --decorate \\\n --pretty=tformat:\"%Cblue %h %Creset %<(25)%ci %C(auto)%d%Creset %s [%C(blue)%an%Creset]\"\n}\n\nalias gh-update=\"python3 $GIT_TOOLS_DIR/python/git_update_track.py\"\n\nfunction gfunction()\n{\n function_name=$1\n file_name=$2\n\n git log -L :$function_name:$file_name\n}\n\n# gtool gw-new-files: Log of commits in which files were added\nfunction gw-new-files()\n{\n if [ -z \"$1\" ]\n then\n echo \"Add params to log: (ex: *.js) \"\n read log_params\n else\n log_params=$1\n fi\n\n git whatchanged --diff-filter=A ${log_params}\n}\n\n# gtool gw-file: Log of commits in which a given file is included\nfunction gw-file()\n{\n target_file=$1\n\n git whatchanged -- ${target_file}\n}\n\nalias gh-diff=\"python3 -m gittools.history.commitsdiff\"\n"
},
{
"alpha_fraction": 0.5912636518478394,
"alphanum_fraction": 0.6006240248680115,
"avg_line_length": 22.740739822387695,
"blob_id": "2a4a2817050d561e29cde88462e311cd336247df",
"content_id": "7b9e863929a4a4ccb2add55ad57c89b1304d40ee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 641,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 27,
"path": "/git_repos.sh",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "# gtool gt-repos: Find repos\nfunction gfrepos()\n{\n f '.git' | xa echo {} | rev | cut -c6- | rev\n}\nalias gt-repos=\"gfrepos\"\n\n\nfunction gt-repos-urls-current-folder()\n{\n gfrepos | xa cat {}/.git/config | grep \"url = \" | cut -c8-\n}\n\n# gtool gt-repos-urls: List repo urls\nfunction gt-repos-urls()\n{\n cat $(git rev-parse --show-toplevel)/.git/config | grep \"url = \" | cut -c8-\n}\nalias gurls=\"gt-repos-urls\"\n\n# gtool gt-repos-open-site: Open repo site \nfunction gt-repos-open-site()\n{\n url_path=$(gt-repos-urls | head -1)\n repo_url=\"http://www.\"$(echo ${url_path}| cut -c5- | rev | cut -c5- | rev | tr \":\" \"/\")\n o ${repo_url}\n}\n"
},
{
"alpha_fraction": 0.6784232258796692,
"alphanum_fraction": 0.6908713579177856,
"avg_line_length": 31.133333206176758,
"blob_id": "d30c563b88607a748e1777e873754fd8bcd79c43",
"content_id": "1e28f635853a6587b899f17468f2a759221e6c66",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 15,
"path": "/python/gittools/commits/__init__.py",
"repo_name": "adrianogil/git-tools",
"src_encoding": "UTF-8",
"text": "import gittools.clitools as clitools\n\n\ndef get_diverge_commits(ref1, ref2):\n get_diverge_commits_command = 'git log %s..%s' % (ref1, ref2) + ' --pretty=oneline | wc -l'\n diverge_commits = clitools.run_cmd(get_diverge_commits_command)\n\n return diverge_commits\n\n\ndef get_total_commits(ref1):\n get_diverge_commits_command = 'git log %s' % (ref1,) + ' --pretty=oneline | wc -l'\n diverge_commits = clitools.run_cmd(get_diverge_commits_command)\n\n return diverge_commits\n"
}
] | 39 |
inenovsk1/Sudoku-Solver | https://github.com/inenovsk1/Sudoku-Solver | f4cc7d9faeb14d5358d7d6a682747cac4afc6500 | d8dade43f6287db61dd9928a9a5c1f54aed47b86 | 11434af74000aacb0e955a2b8ab2ce31b64a81ba | refs/heads/master | 2022-12-15T10:17:33.406912 | 2020-09-17T22:14:58 | 2020-09-17T22:14:58 | 287,962,532 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6902231574058533,
"alphanum_fraction": 0.7136025428771973,
"avg_line_length": 42.79069900512695,
"blob_id": "cb3f3578f876717faa5e1fc24a74a41235c0f935",
"content_id": "31355fc5d6a7adcd206e56502f4151653df89016",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1884,
"license_type": "permissive",
"max_line_length": 470,
"num_lines": 43,
"path": "/README.md",
"repo_name": "inenovsk1/Sudoku-Solver",
"src_encoding": "UTF-8",
"text": "# Sudoku-Solver\nSudoku Solver using the Backtracking algorithm and pygame for visualizations.\n\n# About Pygame\nPygame is a python library used for creating 2D and 3D games and applications. This program uses pygame as well, so before running it one must install pygame first. For more information on how to do that based on your platform please visit the [Getting Started](https://www.pygame.org/wiki/GettingStarted) pygame page. I have developed this application with pygame2 in mind and using version 2's documentation so it is preferable for one to use version 2 of the librray.\n\n# Usage\nPython version 3 is the recommended version to use when running this. File main.py provides a shebang, which defaults to the latest version of python 3 so one can simply run main.py in terminal like an executable:\n```\n./main.py [options]\n```\nFor more detailed information regarding the options, type:\n```\n./main.py --help\n```\n\n# Examples\n### Run a normal speed solver with the easy board\n```\n./main.py -p ./boards/game1.json\n```\n\n### Run a fast solver with the expert board\n```\n./main.py -p ./boards/expert_game.json --fast\n```\n\nTo restart the board press TAB.\nTo exit press either ESCAPE or the corresponding key combination to close apps on your OS. For example on macOS - ⌘+Q\n\n# Supplying one's own boards\nOne can also supply their own board and run the solver on it. To do it one needs to supply a json file in the following format:\n```json\n{\n \"board\": {\n \"row1\": [1, 2, 3, 4, 0, 0, 7, 8, 9],\n \"row2\": [1, 2, 3, 4, 5, 6, 7, 0, 0],\n ...\n \"row9\": [1, 2, 0, 4, 5, 6, 7, 8, 9]\n }\n}\n```\nThere's a global \"board\" key, which contains keys named \"row[1-9]\" each representing a list of all 9 numbers in the given row. When the number is 0, that means the position is empty. When a number is between 1 and 9 that means a position already given on the Sudoku board."
},
{
"alpha_fraction": 0.5645251274108887,
"alphanum_fraction": 0.5765362977981567,
"avg_line_length": 31.25225257873535,
"blob_id": "813597910fe2eb87198de009ec5a314f970b0766",
"content_id": "0a71056494d0cdc87bc46c82db45884ee8be5b27",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10740,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 333,
"path": "/main.py",
"repo_name": "inenovsk1/Sudoku-Solver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"Sudoku Solver using Pygame and the backtracking algorithm\n\"\"\"\n\nimport os\nimport sys\nimport enum\nimport json\nimport argparse\nimport pygame\nimport pygame.freetype\n\n\nclass Color(enum.Enum):\n Background = (38, 49, 50)\n Text = (255, 255, 255)\n CurrentlySolving = (255, 81, 81)\n Solved = (105, 240, 173)\n Grid = (96, 124, 139)\n\n\nclass Cell:\n \"\"\"Implementation of a single Sudoku cell. Use this class to keep track\n of a cell's color, coordinate positions, and current solution\n \"\"\"\n def __init__(self, row, col, width, height, number):\n self._row = row\n self._col = col\n self._xpos = col * width\n self._ypos = row * height\n self._color = Color.Background\n\n if number:\n self._solution = number\n else:\n self._solution = 0\n\n @property\n def xpos(self):\n return self._xpos\n\n @xpos.setter\n def xpos(self, new_xpos):\n self._xpos = new_xpos\n\n @property\n def ypos(self):\n return self._ypos\n\n @ypos.setter\n def ypos(self, new_ypos):\n self._ypos = new_ypos\n\n @property\n def solution(self):\n return self._solution\n\n @solution.setter\n def solution(self, num):\n self._solution = num\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, new_color):\n self._color = new_color\n\n def __str__(self):\n return str(self._solution) if self._solution else \"\"\n\n\nclass SudokuGame:\n \"\"\"Class that implements the logic of solving Sudoku by using\n the Backtracking algorithm\n \"\"\"\n def __init__(self, screen, game_font, path, fast, cell_width, cell_height):\n self._screen = screen\n self._game_font = game_font\n self._fast = fast\n self._cell_width = cell_width\n self._cell_height = cell_height\n self._grid = self.init_grid(path)\n\n @property\n def grid(self):\n return self._grid\n\n @grid.setter\n def grid(self, new_grid):\n self._grid = new_grid\n\n def init_grid(self, path):\n \"\"\"Initialize a grid from a JSON file\n\n Args:\n path (str): Path to a JSON file containing the grid\n\n Returns:\n list: List of lists representing the current Sudoku game\n \"\"\"\n grid = list()\n\n with open(path) as f:\n board_data = json.load(f)\n\n for row in range(len(board_data[\"board\"])):\n grid.append(list())\n col_checker = 0\n\n for col, number in enumerate(board_data[\"board\"][\"row\" + str(row + 1)]):\n grid[row].append(Cell(row, col, self._cell_width, self._cell_height, number))\n col_checker += 1\n\n if col_checker != len(board_data[\"board\"]):\n print(\"Number of rows does not match number of columns. Please verify your Sudoku board at {path}\".format(path=path))\n sys.exit(1)\n\n return grid\n\n def find_empty_position(self):\n \"\"\"Find if a position in a given row/col is empty and return it\n\n Returns:\n tuple: row and col of an empty position or -1, -1 if no such position exists\n as well as True or False whether a number has been assigned to that position or not\n \"\"\"\n empty_row = -1\n empty_col = -1\n\n for row in range(9):\n for col in range(9):\n if self._grid[row][col].solution == 0:\n empty_row = row\n empty_col = col\n return empty_row, empty_col, False\n\n return empty_row, empty_col, True\n\n def move_is_safe(self, suggestion, row, col):\n \"\"\"Check whether the suggestion move for the given row and col is safe to play\n by checking for the existence in the current row, current column, and current sub matrix\n\n Args:\n suggestion (int): The suggestion move to play\n row (int): Current row at which to place the suggestion\n col (int): Current col at which to place the suggestion\n\n Returns:\n bool: True whether a move with the current suggestion would be valid or not based on\n the current state of the sudoky board\n \"\"\"\n # Check current row\n for index in range(9):\n if self._grid[row][index].solution == suggestion:\n self._grid[row][col].color = Color.Background\n return False\n\n # Check current col\n for index in range(9):\n if self._grid[index][col].solution == suggestion:\n self._grid[row][col].color = Color.Background\n return False\n\n # Check current submatrix\n submat_row = (row // 3) * 3\n submat_col = (col // 3) * 3\n for sub_row in range(submat_row, submat_row + 3):\n for sub_col in range(submat_col, submat_col + 3):\n if self._grid[sub_row][sub_col].solution == suggestion:\n self._grid[row][col].color = Color.Background\n return False\n\n return True\n\n def solve_game(self):\n \"\"\"Function that solved the sudoku board by using Backtracking\n\n Returns:\n Bool: True if the board can be solved, False otherwise\n \"\"\"\n # Handle exiting while algo is running\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n \n row, col, assigned = self.find_empty_position()\n\n if assigned:\n return True\n\n # Currently checking this row and col\n self._grid[row][col].color = Color.CurrentlySolving\n refresh_screen(self._screen, self, self._game_font)\n if not self._fast:\n pygame.time.wait(50)\n\n # Check for a valid solution between 1 and 9\n for suggestion in range(1, 10):\n if self.move_is_safe(suggestion, row, col):\n # Possible Solution\n self._grid[row][col].solution = suggestion\n self._grid[row][col].color = Color.Solved\n refresh_screen(self._screen, self, self._game_font)\n if not self._fast:\n pygame.time.wait(50)\n\n # Continue with solution\n if self.solve_game():\n return True\n\n # Backtrack\n self._grid[row][col].solution = 0\n self._grid[row][col].color = Color.Background\n refresh_screen(self._screen, self, self._game_font)\n if not self._fast:\n pygame.time.wait(50)\n\n return False\n\n\ndef draw_grid_borders(screen, rows, cols, width, height):\n \"\"\"Draws the borders of the Sudoku game\n\n Args:\n screen (pygame.display): The surface to draw the grid on\n rows (int): Sudoku rows\n cols (int): Sudoku cols\n width (int): Width of a single Sudoku cell\n height (int): Height of a single Sudoku cell\n\n Returns:\n list: All points that need to be redrawn by pygame due to a change.\n This way one saves resources and does not redraw the entire screen.\n \"\"\"\n updated_points = list()\n\n for row in range(rows):\n line_width = 5 if row % 3 == 0 else 1\n\n dims1 = pygame.draw.line(screen, Color.Grid.value, (0, row * height), (width * rows, height * row), width=line_width)\n updated_points.append(dims1)\n\n for col in range(cols):\n line_width = 5 if col % 3 == 0 else 1\n\n dims2 = pygame.draw.line(screen, Color.Grid.value, (col * width, row * height), (col * width, row * height + height), width=line_width)\n updated_points.append(dims2)\n\n return updated_points\n\n\ndef refresh_screen(screen, game, game_font):\n \"\"\"Function that redraws the screen at each iteration of the main event loop\n\n Args:\n screen (pygame.display): Surface to redraw the game on\n game (SudokuGame): Class representing a single instance of a Sudoku game\n game_font (pygame.freetype.Font): The font to use when rendering numbers in the Sudoku game\n \"\"\"\n screen.fill(Color.Background.value)\n\n mid_point = 30\n updated_points = list()\n \n for row in range(9):\n for col in range(9):\n if game.grid[row][col].color != Color.Background:\n color = game.grid[row][col].color\n x = game.grid[row][col].xpos\n y = game.grid[row][col].ypos\n rect_dims = pygame.draw.rect(screen, color.value, (x, y, 90, 90))\n updated_points.append(rect_dims)\n \n text_surface, rect = game_font.render(str(game.grid[row][col]), Color.Text.value)\n rect = screen.blit(text_surface, pygame.Rect(col*90 + mid_point, row*90 + mid_point, 90, 90))\n updated_points.append(rect)\n\n updated_points += draw_grid_borders(screen, 9, 9, 90, 90)\n\n pygame.display.update(updated_points)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"\"\"Sudoku Solver using the Backtracking algorithm and pygame for visualizations.\n Usage instructions:\n 1. Press space to initialize the algorithm.\n 2. Press TAB to reset the board from the beginning.\"\"\", formatter_class=argparse.RawTextHelpFormatter)\n \n parser.add_argument('-p', '--path', dest='path', required=True, help='Path to a JSON file with a board to be solved')\n parser.add_argument('-f', '--fast', action='store_true', dest='fast', help='Whether to speed up animations or run slower')\n \n args = parser.parse_args()\n \n path = os.path.abspath(os.path.expanduser(args.path))\n fast = args.fast\n\n pygame.init()\n screen = pygame.display.set_mode((810, 810))\n pygame.display.set_caption(\"Sudoku Backtracking Solver\")\n\n font_size = 48\n game_font = pygame.freetype.Font(\"./fonts/Fira Code Bold Nerd Font Complete.ttf\", font_size)\n\n sudoku = SudokuGame(screen, game_font, path, fast, 90, 90)\n\n while True:\n refresh_screen(screen, sudoku, game_font)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n sudoku.solve_game()\n\n if event.key == pygame.K_TAB:\n sudoku = SudokuGame(screen, game_font, path, fast, 90, 90)\n\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n # Sleep for x milliseconds to release the CPU to other processes\n pygame.time.wait(10)\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
big-edd/System-Admin-Scripts | https://github.com/big-edd/System-Admin-Scripts | 3d5123dac64a884e45602dfed3cd5738be88d265 | 43335511ed1b215af45db08226ba3a5625d364bd | c1de4f2e866228bce436ca6c791c3bc2dd3167bb | refs/heads/main | 2023-09-01T14:42:18.717435 | 2021-11-04T07:45:00 | 2021-11-04T07:45:00 | 321,215,855 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.768932044506073,
"alphanum_fraction": 0.7721682786941528,
"avg_line_length": 41.88888931274414,
"blob_id": "421094ed9b15638d7ba73520375162b14d04e58e",
"content_id": "1d6b0afca8ec91f1371e52c908281e3bdffca270",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1545,
"license_type": "no_license",
"max_line_length": 360,
"num_lines": 36,
"path": "/README.md",
"repo_name": "big-edd/System-Admin-Scripts",
"src_encoding": "UTF-8",
"text": "# System-Admin-Scripts\n\nGoing to place some scripts here that I think might be handy for others. \n\n## Windows\n\n### check_app_disconnect.ps1\n\nOriginally intended to clean up MYOB processes running in disconnected RDP sessions, and remove the lockfile. The lock file (with .flk file extension) needed to be removed before a disconnected user could log on again. \n\nTested extensively with disconnected sessions running cmd.exe, with a text file to delete, and found to be working well. \nDid not quite make it to production before the decision was made to move away from the MYOB via RDP, so this script became something that was no longer required. \n\nThis could potentially be helpful for any program used via RDP, which needs disconnected sessions to be managed. \n\n## PowerCLI\n\n### vCenter_Alarm_emails_audit.ps1\n\nCheck email address currently being used for alarms. \n\n### vCenter_Email_alerts_refresh.ps1\n\nSet email address to use for alarms. \n\n### vCenter_Alarm_history.ps1\n\nCheck alarm emails sent. Number of days of history can be set. \n\n## Linux/Unix\n\n### dat2csv.py\n\nAt a previous role (way back) there were some Informix C-ISAM databases (and later D-ISAM I believe). \n\nAs the development team were always too busy for ad hoc checks on the data, I often found myself writing script to search for information in the database files. So I wrote this script to read data out of a database table, and output this to a CSV file. People interested in this data could then use Excel to manipulate, filter and sort to their hearts content. \n"
},
{
"alpha_fraction": 0.5533754825592041,
"alphanum_fraction": 0.5626870393753052,
"avg_line_length": 23.922412872314453,
"blob_id": "8e6987ec19250682c6a60304c76a8b806dd24fa8",
"content_id": "96c4ec77a3498587b8347eb6f8c7cfbbd3bb0db2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3007,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 116,
"path": "/dat2csv.py",
"repo_name": "big-edd/System-Admin-Scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\r\n\r\n##\r\n## Written 2010-10-07 by Eddie F.\r\n##\r\n## For outputting a dat file in CSV format\r\n## (according to fmt file info).\r\n##\r\n## Use the dat file as the argument.\r\n##\r\n## This CSV data could then be pasted into excel for any required\r\n## text manipulation, filtering and sorting.\r\n##\r\n\r\n\r\n#\r\n## Delimiter\r\nvalue_delim = \",\"\r\n#value_delim = \";\"\r\n#value_delim = \"\"\r\n\r\n#\r\n## Delimiter\r\n#entry_delim = \",\"\r\n#entry_delim = \";\"\r\nentry_delim = \"\"\r\n\r\n\r\nimport os\r\nimport sys\r\n\r\n\r\n\r\nif len(sys.argv) < 2:\r\n print(\"\")\r\n print(\" Need the dat file as the argument.\")\r\n print(\"\")\r\n \r\nelif len(sys.argv) > 2:\r\n print(\"\")\r\n print(\" Too many arguments. Just need to specify the dat file to process.\")\r\n print(\"\")\r\n \r\nelse:\r\n #dat_file_name = \"c_pos.dat\"\r\n dat_file_name = sys.argv[1]\r\n #print(\"dat_file = \" + dat_file_name)\r\n fmt_file_name = dat_file_name[:len(dat_file_name)-3] + \"fmt\"\r\n #print(\"fmt_file = \" + fmt_file_name)\r\n #print(\"\")\r\n\r\n try:\r\n fmt_file = open(fmt_file_name, \"r\")\r\n except:\r\n print(\"\")\r\n print(\" Can't open the dat file.\")\r\n print(\" Need to be in the apropriate opdirs directory.\")\r\n print(\"\")\r\n print(\"\")\r\n\r\n dictionary_section = False\r\n dictionary = list()\r\n for fmt_line in fmt_file:\r\n fmt_line = fmt_line.replace('\\n', \"\")\r\n if fmt_line != \"\":\r\n if dictionary_section:\r\n words = fmt_line.split()\r\n entry_desc = words[0]\r\n entry_start = int(words[3])\r\n entry_end = int(words[3]) + int(words[4])\r\n dictionary.append([entry_desc, entry_start, entry_end])\r\n if fmt_line.find(\"# Dictionary\") == 0:\r\n dictionary_section = True\r\n else:\r\n dictionary_section = False\r\n\r\n #print(\" Dictionary...\")\r\n #for item in dictionary:\r\n # print(item)\r\n #print(\"\")\r\n\r\n fmt_file.close()\r\n\r\n try:\r\n dat_file = open(dat_file_name, \"r\")\r\n except:\r\n print(\"\")\r\n print(\" Can't open the fmt file.\")\r\n print(\" Need to be in the apropriate opdirs directory.\")\r\n print(\"\")\r\n print(\"\")\r\n\r\n output_line = \"\"\r\n for i in range(0, len(dictionary)):\r\n output_line = output_line + (dictionary[i])[0]\r\n if i != (len(dictionary)-1):\r\n output_line = output_line + value_delim\r\n output_line = output_line + entry_delim\r\n print(output_line)\r\n \r\n for dat_line in dat_file:\r\n dat_line = dat_line.replace('\\n', \"\")\r\n output_line = \"\"\r\n if dat_line != \"\":\r\n for i in range(0, len(dictionary)):\r\n #output_line = output_line + (dat_line[int((dictionary[i])[1]):int((dictionary[i])[2])])\r\n #output_line = output_line + (dat_line[int((dictionary[i])[1]):int((dictionary[i])[2])]).strip()\r\n output_line = output_line + \"\\\"\" + (dat_line[int((dictionary[i])[1]):int((dictionary[i])[2])]).strip() + \"\\\"\" \r\n if i != (len(dictionary)-1):\r\n output_line = output_line + value_delim\r\n output_line = output_line + entry_delim\r\n print(output_line)\r\n else:\r\n print(\" ... Blank line...\")\r\n\r\n dat_file.close()\r\n"
}
] | 2 |
SergeyLankevich/funp_intro | https://github.com/SergeyLankevich/funp_intro | 59a60081b657a1b6a36dbafa2584a093467ea230 | 5e1093f0fd61e67c56d751aba16d4e07f4f95798 | 43b5eb42305a813ac755245413fcad074ce908d3 | refs/heads/master | 2022-12-14T13:24:21.211884 | 2020-09-09T02:35:42 | 2020-09-09T02:35:42 | 293,982,385 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6243902444839478,
"alphanum_fraction": 0.6243902444839478,
"avg_line_length": 17.636363983154297,
"blob_id": "e4d31b6440955882ee5ff5270460b85628eb088f",
"content_id": "50128fc12b2887e92bc355f971d5ea577b7713cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/to_yaml.py",
"repo_name": "SergeyLankevich/funp_intro",
"src_encoding": "UTF-8",
"text": "import yaml\nfrom functools import wraps\n\n\ndef to_yaml(func):\n @wraps(func)\n def wrapper(*args):\n result = yaml.dump(func(*args))\n print(result)\n return result\n return wrapper\n"
}
] | 1 |
Xpheriono/SimsThings | https://github.com/Xpheriono/SimsThings | c31444565b71335cdc1b90d9d3304f02d6474a18 | b4ef1ef6eeaac88ab3b9a7818de67f728f9a4f4b | 1d219d7495e876d4116564025f71e25da15de827 | refs/heads/master | 2021-07-14T23:00:37.694361 | 2020-10-21T15:50:06 | 2020-10-21T15:50:06 | 218,315,600 | 4 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7651006579399109,
"alphanum_fraction": 0.7776845693588257,
"avg_line_length": 50.826087951660156,
"blob_id": "b4dfdbaaa884eff72fa62e35c4b680b2e4dab055",
"content_id": "3a487d615b751e84b44cb5e2cef237c3cf378029",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1192,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 23,
"path": "/README.md",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "# SimsThings\n\nJust messing around with modding The Sims 4.\n\n# Decompiler\nI decided to enhance the decompiler script going around with multiprocessing, this should (depending on the system) make decompiling the files MUCH faster, around 45 seconds on my Ryzen 3800x\n\ndecompile_all_multi.py can be used with Python 3.7.5 to decompile game files locally.\n\nsettings.py must always be within the same directory as Python script that is calling it.\n\n# NOTE\nYou will need Python (3.7.5 to be safe) installed to run these files.\n\nIf you want to decompile Sims 4's Python scripts just follow these steps:\n * Place settings.py, decompile_all_multi.py, and Utilities in the same directory\n * Open a terminal in the working directory and type \"py decompile_all_multi.py\" without the quotations to run the script\n * Don't be worried if you see some files failing to decompile, haven't found a way around that\n * Once done you'll have a folder called EA in your working directory with all the decompiled python files\n \n compile_mod.py is run the same way but Utilities is not needed, be sure to run The Sims 4 BEFORE trying to compile a mod.\n \n settings.py contains fields that need to be filled out.\n"
},
{
"alpha_fraction": 0.5430183410644531,
"alphanum_fraction": 0.5458391904830933,
"avg_line_length": 37.69091033935547,
"blob_id": "bddcab4978e1d925a1ab2d1ae01170b2dea10f32",
"content_id": "f8b02d789fde09f493981ecaca82aa568c8ded3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2127,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 55,
"path": "/decompile_all_multi.py",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "import os, multiprocessing, time, re, sys, threading\nimport fnmatch, shutil, io\nfrom zipfile import PyZipFile, ZIP_STORED\nfrom Utilities.unpyc3 import decompile\n\nclass SimsDecompiler():\n def __init__(self):\n self.delay_time = 2.0\n self.processes = []\n self.ea_folder = \"EA\"\n self.gameplay_folder_data = \"\"\n self.gameplay_folder_game = \"\"\n self.script_package_types = [\"*.zip\", \"*.ts4script\"]\n self.q = multiprocessing.Manager().Queue()\n\n if not os.path.exists(self.ea_folder):\n os.mkdir(self.ea_folder)\n\n def decompile_dir(self, p):\n try:\n py = decompile(p)\n with io.open(p.replace(\".pyc\", \".py\"), \"w\", encoding=\"utf-8\") as output_py:\n for statement in py.statements:\n output_py.write(str(statement) + \"\\r\")\n except Exception as ex:\n print(\"Failed to decompile %s\" % p)\n\n def fill_queue(self, curr_folder):\n for root, dirs, files in os.walk(curr_folder):\n for ext_filter in self.script_package_types:\n for filename in fnmatch.filter(files, ext_filter):\n src = os.path.join(root, filename)\n dst = os.path.join(self.ea_folder, filename)\n if src != dst:\n shutil.copyfile(src, dst)\n zip = PyZipFile(dst)\n out_folder = os.path.join(self.ea_folder, os.path.splitext(filename)[0])\n zip.extractall(out_folder)\n self.q.put(out_folder)\n\n def worker(self):\n filename = None\n if not filename:\n filename = self.q.get()\n\n grand_children = []\n pattern = '*.pyc'\n for root, dirs, files in os.walk(filename):\n for filename in fnmatch.filter(files, pattern):\n p = str(os.path.join(root, filename))\n thrd = threading.Thread(target=self.decompile_dir, args=(p,))\n grand_children.append(thrd)\n thrd.start()\n for thrd in grand_children:\n thrd.join(self.delay_time)"
},
{
"alpha_fraction": 0.6353833675384521,
"alphanum_fraction": 0.6417731642723083,
"avg_line_length": 33.79166793823242,
"blob_id": "53315907560785f33bea69a0c99ce71c1fbc16bb",
"content_id": "655f62d456fa96a5d0963a643c0fcb56b8f997ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2504,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 72,
"path": "/decompiler.py",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "import sys, subprocess, os, time, multiprocessing\nimport PyQt5.QtWidgets as QtWidgets\nimport PyQt5.QtGui\nimport settings\nimport decompile_all_multi\n\nclass App(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.title = \"Sims 4 Decompiler\"\n self.left = 200\n self.top = 200\n self.width = 640\n self.height = 480\n\n self.curr_settings = settings.Settings()\n self.curr_decompiler = decompile_all_multi.SimsDecompiler()\n\n self.init_ui()\n \n def init_ui(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n self.get_creator_name()\n self.get_game_folder()\n self.run_decompile_all_multi()\n\n #self.run_decompiler()\n\n self.show()\n\n def get_creator_name(self):\n creator_name, ok_pressed = QtWidgets.QInputDialog.getText(self, \"Creator's Name\",\"Your Creator Name:\", QtWidgets.QLineEdit.Normal, \"\")\n if ok_pressed and creator_name != \"\":\n self.curr_settings.set_creator_name(creator_name)\n\n def get_game_folder(self):\n self.curr_settings.set_game_folder(str(os.path.abspath(QtWidgets.QFileDialog.getExistingDirectory(self, \"Select directory with sims.exe\"))))\n\n def run_decompiler(self):\n button = QtWidgets.QPushButton(\"Run\")\n\n button.clicked.connect(self.run_decompile_all_multi())\n button.show()\n\n def run_decompile_all_multi(self):\n self.curr_decompiler.gameplay_folder_data = os.path.join(self.curr_settings.game_folder, \"Data\", \"Simulation\", \"Gameplay\")\n self.curr_decompiler.gameplay_folder_game = os.path.join(self.curr_settings.game_folder, \"Game\", \"Bin\", \"Python\")\n\n #alert = QtWidgets.QMessageBox()\n #alert.setText(\"Now running the decompiler...\")\n #alert.exec_()\n\n start_time = time.time()\n\n self.curr_decompiler.fill_queue(self.curr_decompiler.gameplay_folder_data)\n self.curr_decompiler.fill_queue(self.curr_decompiler.gameplay_folder_game)\n \n for i in range(4):\n proc = multiprocessing.Process(target=self.curr_decompiler.worker())\n self.curr_decompiler.processes.append(proc)\n proc.start()\n for proc in self.curr_decompiler.processes:\n proc.join()\n\n print(\"This run took %f seconds\" % (time.time()-start_time))\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())"
},
{
"alpha_fraction": 0.7052896618843079,
"alphanum_fraction": 0.7279596924781799,
"avg_line_length": 32.16666793823242,
"blob_id": "6df2d663f53bc31f6ae0c06ac87bd7f027675ab5",
"content_id": "80c431b66d6d259cd98b435a6b26df7c4bcd07eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 12,
"path": "/genders/Scripts/genders.py",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "import sims4.commands\n\[email protected]('genders', command_type=sims4.commands.CommandType.Live)\ndef get_gender(_connection=None):\n #sim_info = sim_obj.SimInfoNameData.DESCRIPTOR.gender\n #genders = EA.simulation.sims.sim_info_types.Gender()\n \n #setattr(genders, 'OTHER', 16384)\n\n output = sims4.commands.CheatOutput(_connection)\n output(\"Hello World\")\n #output(genders)"
},
{
"alpha_fraction": 0.6166180968284607,
"alphanum_fraction": 0.6180757880210876,
"avg_line_length": 26.479999542236328,
"blob_id": "55ea00ff027ecda93da2ac4b451eac3d7f94d8b6",
"content_id": "3c72e6d98daa920bbe133bfdcaf5491df3d79270",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 25,
"path": "/settings.py",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "import os\n\nclass Settings():\n def __init__(self):\n self.creator_name = \"\"\n self.mods_folder = os.path.expanduser(os.path.join('~', 'Documents', 'Electronic Arts', 'The Sims 4', 'Mods'))\n self.game_folder = \"\"\n\n def set_creator_name(self, creator_name):\n self.creator_name = creator_name\n\n def set_mods_folder(self, mods_folder):\n self.mods_folder = mods_folder\n\n def set_game_folder(self, game_folder):\n self.game_folder = game_folder\n\n def get_creator_name(self):\n return self.creator_name\n \n def get_mods_folder(self):\n return self.mods_folder\n\n def get_game_folder(self):\n return self.game_folder"
},
{
"alpha_fraction": 0.6325581669807434,
"alphanum_fraction": 0.6418604850769043,
"avg_line_length": 25.875,
"blob_id": "d7ce640b4f1114a478a560412cb77e13d2144930",
"content_id": "cac6430c2deafc3b5bc0e65c7516e6f59753456c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 8,
"path": "/genders/settings.py",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "import os\n\ncreator_name = 'Xpherion'\nmods_folder = os.path.expanduser(\n os.path.join('~', 'Documents', 'Electronic Arts', 'The Sims 4', 'Mods')\n)\n\ngame_folder = os.path.join('D:', os.sep, 'Origin', 'The Sims 4')\n"
},
{
"alpha_fraction": 0.6497109532356262,
"alphanum_fraction": 0.6612716913223267,
"avg_line_length": 36.60869598388672,
"blob_id": "1103b7204438a3790efb3327a7e5c2d5793c053d",
"content_id": "f0cc4a3d88280aadcd27443a6eeb9b8851539512",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 865,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 23,
"path": "/genders/compile_mod.py",
"repo_name": "Xpheriono/SimsThings",
"src_encoding": "UTF-8",
"text": "import os, shutil\nfrom zipfile import PyZipFile, ZIP_STORED\nfrom settings import *\n\nroot = os.path.dirname(os.path.realpath('__file__'))\nmod_name = None\n\nif __name__ == \"__main__\":\n mod_name = input(\"Type the name of your mod and hit enter or just hit enter to skip naming: \")\n src = os.path.join(root, 'Scripts')\n if not mod_name:\n mod_name=os.path.basename(os.path.normpath(os.path.dirname(os.path.realpath('__file__'))))\n \n mod_name = creator_name + '_' + mod_name\n ts4script = os.path.join(root, mod_name + '.ts4script')\n\n ts4script_mods = os.path.join(os.path.join(mods_folder), mod_name + '.ts4script')\n\n zf = PyZipFile(ts4script, mode='w', compression=ZIP_STORED, allowZip64=True, optimize=2)\n for folder, subs, files in os.walk(src):\n zf.writepy(folder)\n zf.close()\n shutil.copyfile(ts4script, ts4script_mods)\n"
}
] | 7 |
putaodoudou/robotframework-datatype | https://github.com/putaodoudou/robotframework-datatype | 47773984432dab6f86f6907c89b40d179b8c9257 | 68f5b4c96e8f6746aefde1ee90b7083063e74877 | a0d2844e728ffa38a07d23e7defc188fdd705cda | refs/heads/master | 2021-05-27T03:38:43.651859 | 2014-03-10T20:29:12 | 2014-03-10T20:29:12 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5738045573234558,
"alphanum_fraction": 0.5738045573234558,
"avg_line_length": 31.772727966308594,
"blob_id": "92fded3962590ac48d189f9e9168a77c537e27c6",
"content_id": "721048e50fdc478ec0042254d25057358f17d45b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1443,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 44,
"path": "/DataTypeLibrary.py",
"repo_name": "putaodoudou/robotframework-datatype",
"src_encoding": "UTF-8",
"text": "from robot.api import logger\n\n\nclass DataTypeLibrary(object):\n\n def should_be_integer(self, obj):\n if not isinstance(obj, int):\n message = \"%s his is not integer\" % obj\n raise AssertionError(message)\n\n def should_be_string(self, obj):\n if not isinstance(obj, basestring):\n message = \"%s his is not string\" % obj\n raise AssertionError(message)\n \n def should_be_digit(self, obj):\n if not obj.isdigit():\n message = \"%s his is not digit\" % obj\n raise AssertionError(message)\n \n def should_be_alphanumeric(self, obj):\n if not obj.isalnum():\n message = \"%s his is not alphanumeric\" % obj\n raise AssertionError(message)\n\n def should_be_alphabetic(self, obj):\n if not obj.isalpha():\n message = \"%s is not alphabetic\" % obj\n raise AssertionError(message)\n \n def should_be_numeric(self, obj):\n if not obj.isnumeric():\n message = \"%s is not numeric\" % obj\n raise AssertionError(message)\n\n def convert_to_lowercase(self, string):\n lowstr = string.lower()\n logger.info(\"%s has been converted to %s\" % (string, lowstr))\n return lowstr \n \n def convert_to_uppercase(self, string):\n upstr = string.upper()\n logger.info(\"%s has been converted to %s\" % (string, upstr))\n return upstr \n"
},
{
"alpha_fraction": 0.6864407062530518,
"alphanum_fraction": 0.6864407062530518,
"avg_line_length": 28.5,
"blob_id": "c8f2ec5269ac87cae38b71a69e3a544bd96235d4",
"content_id": "eb3a82ff499d036b5c9e2017759aa884cddf5534",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 118,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 4,
"path": "/README.md",
"repo_name": "putaodoudou/robotframework-datatype",
"src_encoding": "UTF-8",
"text": "robotframework-datatype\n=======================\n\nRobot Framework library for data type manipulation and verification.\n"
}
] | 2 |
TylerCampos144/Dodger | https://github.com/TylerCampos144/Dodger | 1e54c230f9536f9a4e13e5adc834d88076c71f6a | b1bd211e127c851399477816ea0fe1513621f384 | dcbd491572e5e601a9106333735b3cc833064493 | refs/heads/master | 2023-03-27T08:43:30.843796 | 2021-03-19T10:28:46 | 2021-03-19T10:28:46 | 349,384,033 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5576992034912109,
"alphanum_fraction": 0.572704553604126,
"avg_line_length": 28.93048095703125,
"blob_id": "082bf495cc15786d5466e125f103b40425933cd8",
"content_id": "0d2a1b918b19a294db27860c66af682b610a83c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5598,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 187,
"path": "/Dodger.py",
"repo_name": "TylerCampos144/Dodger",
"src_encoding": "UTF-8",
"text": "import pygame\nimport random\n\nWindowWidth = 600\nWindowHeight = 600\nTextColor = (0, 0, 0)\nBackgroundColor = (255, 255, 255)\nFPS = 60\nEnemySizeMin = 10\nEnemySizeMax = 40\nEnemySpeedMin = 1\nEnemySpeedMax = 8\nEnemySpawnRate = 6\nPlayerMoveRate = 5\n\n\ndef terminate():\n pygame.quit()\n\n\n\ndef WaitForPlayerToPressKey():\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n if event.key == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n return\n\n\ndef PlayerHitsEnemy(PlayerRect, Enemy):\n for e in Enemy:\n if PlayerRect.colliderect(b['rect']):\n return True\n return False\n\n\ndef DrawText(text, font, surface, x, y):\n textobj = font.render(text, 1, TextColor)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n\n\n# Set up\n\npygame.init()\nMainClock = pygame.time.Clock()\nWindowSurface = pygame.display.set_mode((WindowWidth, WindowHeight))\npygame.display.set_caption('Dodger')\npygame.mouse.set_visible(False)\n\n# Fonts\nfont = pygame.font.SysFont(None, 48)\n\n# show start\nWindowSurface.fill(BackgroundColor)\nDrawText('Dodger', font, WindowSurface, (WindowWidth / 2,), (WindowHeight / 2))\nDrawText('press any key to start', font, WindowSurface, (WindowWidth / 3) - 30, (WindowHeight / 3) + 50)\npygame.display.update()\nWaitForPlayerToPressKey()\n\nTopScore = 0\nwhile True:\n # start of game\n Enemys = []\n score = 0\n PlayerRect.topleft = (WindowWidth / 2, WindowHeight - 50)\n MoveLeft = MoveRight = MoveUp = MoveDown = False\n ReverseCheat = SlowCheat = False\n EnemyAddCounter = 0\n pygame.mixer.music.play(-1, 0.0)\n\n while True:\n # the game loop runs while the game is playing\n score += 1 # increases score\n\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n\n if event.type == KEYDOWN:\n if event.type == K_z:\n ReverseCheat = True\n if event.type == K_x:\n SlowCheat = True\n if event.type == K_LEFT or event.key == K_a:\n MoveRight = False\n MoveLeft = True\n if event.type == K_RIGHT or event.key == K_d:\n MoveLeft = False\n MoveRight = True\n if event.type == K_UP or event.key == K_w:\n MoveDown = False\n MoveUp = True\n if event.type == K_DOWN or event.key == K_s:\n MoveUp = False\n MoveDown = True\n\n if event.type == KEYUP:\n if event.key == K_z:\n ReverseCheat = False\n score = 0\n if event.key == K_ESCAPE:\n terminate()\n\n if event.type == K_LEFT or event.key == K_a:\n MoveLeft = False\n if event.type == K_RIGHT or event.key == K_d:\n MoveRight = False\n if event.type == K_UP or event.key == K_w:\n MoveUp = False\n if event.type == K_DOWN or event.key == K_s:\n MoveDown = False\n\n if event.type == MOUSEMOTION:\n # if mouse moves, it moves the player\n PlayerRect.centerx = event.pos[0]\n PlayerRect.centery = event.pos[1]\n\n if not ReverseCheat and not SlowCheat:\n EnemyAddCounter += 1\n if EnemyAddCounter == EnemySpawnRate:\n EnemyAddCounter = 0\n EnemySize = random.randint(EnemySizeMin, EnemySizeMax)\n NewEnemy = {\n 'rect': pygame.Rect(random.randint(0, WindowWidth - EnemySize), 0 - EnemySize, EnemySize, EnemySize),\n 'speed': random.randint(EnemySpeedMin, EnemySpeedMax),\n 'surface': pygame.transform.scale(EnemySize, EnemySize)\n }\n\n Enemy.apend(NewEnemy)\n\n # Moves Player\n if MoveLeft and PlayerRect.left > 0:\n PlayerRect.move_ip(-1 * PlayerMoveRate, 0)\n if MoveRight and PlayerRect.right < WindowWidth:\n PlayerRect.move_ip(PlayerMoveRate, 0)\n if MoveUp and PlayerRect.top > 0:\n PlayerRect.move_ip(0, -1 * PlayerMoveRate)\n if MoveDown and PlayerRect.bottom < WindowHeight:\n PlayerRect.move_ip(0, PlayerMoveRate)\n\n # moves enemy\n for e in enemys:\n if not ReverseCheat and not SlowCheat:\n b['rect'].move_ip(0, b['speed'])\n elif ReverseCheat:\n b['rect'].move_ip(0, -5)\n elif SlowCheat:\n b['rect'].move_ip(0, 1)\n\n for e in enemys:\n if b['rect'].top > WindowHeight:\n Enemys.remove(b)\n\n # draw world Surface\n WindowSurface.fill(BackgroundColor)\n\n # Draw score\n DrawText('Score: %s' % (score), font, WindowSurface, 10, 0)\n DrawText('Top Score: %s' % (TopScore), font, WindowSurface, 10, 40)\n\n # Draw Player\n WindowSurface.blit(PlayerRect)\n\n # draw enemys\n for e in enemys:\n WindowSurface.blit(b['surface'], b['rect'])\n\n pygame.display.update()\n\n # check if enemy hit Player\n if PlayerHitsEnemy(PlayerRect, enemys):\n if Score > TopScore:\n TopScore = Score # sets new score\n break\n\n MainClock.tick(FPS)\n\n# stops game and shows game over\nDrawText('Game Over', font, WindowSurface, (WindowWidth / 3), (WindowHeight / 3))\nDrawText('Press a key to play again', font, WindowSurface, (WindowWidth / 3) - 80, (WindowHeight / 3) + 50)\npygame.display.update()\nwaitForPlayerToPressKey()\n\n"
}
] | 1 |
ravomavain/uselessness | https://github.com/ravomavain/uselessness | f61e9d359e24960394b920e277b3432433460f85 | b0df31c9ef09746502685704848fb25d5cf3085c | fc3e7d175aaa436dd008ca63486161c9cd684472 | refs/heads/master | 2020-04-06T06:40:35.426833 | 2015-07-14T18:12:22 | 2015-07-14T18:12:22 | 2,886,757 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6941789984703064,
"alphanum_fraction": 0.6985230445861816,
"avg_line_length": 24.021739959716797,
"blob_id": "16b8e0ffc645dac2d6430aa78c3ec3f6a5955c65",
"content_id": "c2db2326457c683c99df113a67a4a21383097f73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1151,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 46,
"path": "/znc-whorefilter.cpp",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "/**\n* ZNC Whore Filter\n*\n* Allows the user to redirect what boring ppl say to another (fake) chan.\n*\n* Copyright (c) 2012 Romain Labolle\n*\n* This program is free software; you can redistribute it and/or modify it\n* under the terms of the GNU General Public License version 2 as published\n* by the Free Software Foundation.\n*/\n\n#include \"main.h\"\n#include \"User.h\"\n#include \"Nick.h\"\n#include \"Modules.h\"\n#include \"Chan.h\"\n\n\nclass CWhoreMod : public CModule {\npublic:\n\tMODCONSTRUCTOR(CWhoreMod) {}\n\n\tvirtual bool OnLoad(const CString& sArgs, CString& sErrorMsg) {\n\t\tm_sHostmask = \"attentionwhor*!*srs@*\";\n\t\tm_sChannel = \"#srsbsns\";\n\t\tm_sNewChan = \"~#whorefilter\";\n\t\treturn true;\n\t}\n\n\tvirtual ~CWhoreMod() {}\n\n\tvirtual EModRet OnChanMsg(CNick& Nick, CChan& Channel, CString& sMessage) {\n\t\tif (Nick.GetHostMask().WildCmp(m_sHostmask) && Channel.GetName().AsLower().WildCmp(m_sChannel)) {\n\t\t\tPutUser(\":\" + Nick.GetHostMask() + \" PRIVMSG \" + m_sNewChan + \" :\" + sMessage);\n\t\t\treturn HALT;\n\t\t}\n\t\treturn CONTINUE;\n\t}\nprivate:\n\tCString m_sHostmask;\n\tCString m_sChannel;\n\tCString m_sNewChan;\n};\n\nMODULEDEFS(CWhoreMod, \"Filter redirect whore msg to another (fake) chan\")\n"
},
{
"alpha_fraction": 0.3267463147640228,
"alphanum_fraction": 0.41130515933036804,
"avg_line_length": 30.08571434020996,
"blob_id": "4992023e5a956d82273a692ddf3296cc3f58aeb1",
"content_id": "2699b5b7e7fe9f195ccf4f3415397f06bb170b63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2176,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 70,
"path": "/sha1.py",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8\nimport sys\n\nK = [0x5A827999L, 0x6ED9EBA1L, 0x8F1BBCDCL, 0xCA62C1D6L]\n\ndef S(x,k):\n return ((x<<k)|(x>>(32-k)))&0xffffffffL;\n\ndef int2str(num, l):\n return ''.join([chr((num>>((l-i-1)*8))&0xff) for i in range(l)])\n\ndef sha1(string):\n H = [0x67452301L, 0xEFCDAB89L, 0x98BADCFEL, 0x10325476L, 0xC3D2E1F0L]\n l = len(string)*8 # Longueur du texte\n z = (448-(l+1))%512 # nombre de zero a ajouter pour obtenir une langueur de 512n-64\n info = (1<<(z+64))|l # 1 suivi de z fois 0 et de l sur 64bits\n string += int2str(info, (1+z+64)/8)\n \n for n in range(len(string)/64):\n M = string[n*64:(n+1)*64]\n W = [ (ord(M[t*4+0])<<24) | (ord(M[t*4+1])<<16) | (ord(M[t*4+2])<<8) | ord(M[t*4+3]) for t in range(16) ]\n for i in range(16, 80):\n W.append(S(W[i-3] ^ W[i-8] ^ W[i-14] ^ W[i-16], 1))\n [a, b, c, d, e] = H\n \n for t in range(0, 20):\n tmp = S(a, 5) + ((b & c) | ((~ b) & d)) + e + W[t] + K[0]\n e = d\n d = c\n c = S(b, 30)\n b = a\n a = tmp & 0xffffffffL\n \n for t in range(20, 40):\n tmp = S(a, 5) + (b ^ c ^ d) + e + W[t] + K[1]\n e = d\n d = c\n c = S(b, 30)\n b = a\n a = tmp & 0xffffffffL\n \n for t in range(40, 60):\n tmp = S(a, 5) + ((b & c) | (b & d) | (c & d)) + e + W[t] + K[2]\n e = d\n d = c\n c = S(b, 30)\n b = a\n a = tmp & 0xffffffffL\n \n for t in range(60, 80):\n tmp = S(a, 5) + (b ^ c ^ d) + e + W[t] + K[3]\n e = d\n d = c\n c = S(b, 30)\n b = a\n a = tmp & 0xffffffffL\n \n H = [(H[0]+a) & 0xffffffffL,\n (H[1]+b) & 0xffffffffL,\n (H[2]+c) & 0xffffffffL,\n (H[3]+d) & 0xffffffffL,\n (H[4]+e) & 0xffffffffL]\n \n return hex((H[0]<<128 | H[1]<<96 | H[2]<<64 | H[3]<<32 | H[4]))[2:-1].zfill(40)\n\nif len(sys.argv) > 1:\n print(sha1(sys.argv[1]))\nelse:\n print(\"Usage: %s \\\"string to hash\\\"\"%sys.argv[0])\n"
},
{
"alpha_fraction": 0.6045602560043335,
"alphanum_fraction": 0.6185667514801025,
"avg_line_length": 25.465517044067383,
"blob_id": "cc9d41563479e5be2ad7aab1982ce9b26a2697c0",
"content_id": "20a6ce242023e14c3774d29e1a42fcebde857731",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3070,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 116,
"path": "/znc-dice.cpp",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "/**\n* ZNC dice bot\n*\n* Copyright (c) 2012 Romain Labolle\n*\n* This program is free software; you can redistribute it and/or modify it\n* under the terms of the GNU General Public License version 2 as published\n* by the Free Software Foundation.\n*/\n\n#include <znc/znc.h>\n#include <znc/Chan.h>\n#include <znc/User.h>\n#include <znc/Modules.h>\n\nclass CDiceMod : public CModule {\npublic:\n\tMODCONSTRUCTOR(CDiceMod) {}\n\n\tvirtual bool OnLoad(const CString& sArgs, CString& sErrorMsg) {\n\t\tuser = GetUser();\n\t\tHighScore = sArgs.Token(0).ToInt();\n\t\tPutModule(\"HighScore: \"+CString(HighScore));\n\t\tlastturn = false;\n\t\treturn true;\n\t}\n\n\tvirtual ~CDiceMod() {}\n\n\tvirtual void OnModCommand(const CString& sCommand) {\n\t\tif (sCommand.Token(0).Equals(\"high\")) {\n\t\t\tHighScore = sCommand.Token(1).ToInt();\n\t\t\tPutModule(\"HighScore: \"+CString(HighScore));\n\t\t}\n\t}\n\n\tvirtual EModRet OnChanMsg(CNick& Nick, CChan& Channel, CString& sMessage) {\n\t\tif (sMessage.Equals(\"!dice start\")) {\n\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice join\");\n\t\t\treturn CONTINUE;\n\t\t}\n\t\tCNick nick = user->GetNick();\n\t\tif (Nick.GetNick().Equals(\"Nimda3\"))\n\t\t{\n\t\t\tif (sMessage.Token(0).Equals(nick.GetNick()+\"\\'s\") && sMessage.Token(1).Equals(\"turn.\"))\n\t\t\t{\n\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice roll\");\n\t\t\t\tlastturn = false;\n\t\t\t\treturn CONTINUE;\n\t\t\t}\n\t\t\tif (sMessage.Token(0).Equals(nick.GetNick()) && sMessage.Token(1).Equals(\"rolls\"))\n\t\t\t{\n//\t\t\t\travomavain rolls a 2. Points: 49 + 2 => 51 - roll again or stand?\n\t\t\t\tint value = sMessage.Token(3).ToInt();\n\t\t\t\tif (value == 6) {\n\t\t\t\t\treturn CONTINUE;\n\t\t\t\t}\n\t\t\t\tint saved = sMessage.Token(5).ToInt();\n\t\t\t\tint temp = sMessage.Token(7).ToInt();\n\t\t\t\tint total = saved + temp;\n\t\t\t\tif (lastturn) {\n\t\t\t\t\tif (total < score) {\n\t\t\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice roll\");\n\t\t\t\t\t\treturn CONTINUE;\n\t\t\t\t\t}\n\t\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice stand\");\n\t\t\t\t\treturn CONTINUE;\n\t\t\t\t}\n\t\t\t\tif (total == 49 || total >= HighScore) {\n\t\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice stand\");\n\t\t\t\t\treturn CONTINUE;\n\t\t\t\t}\n\t\t\t\tif (total < 49)\n\t\t\t\t{\n\t\t\t\t\tif (temp >= 20)\n\t\t\t\t\t{\n\t\t\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice stand\");\n\t\t\t\t\t\treturn CONTINUE;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice roll\");\n\t\t\t\treturn CONTINUE;\n\t\t\t}\n\t\t\tif (sMessage.Left(37).Equals(\"You broke the highest sum record with\"))\n\t\t\t{\n\t\t\t\tHighScore = sMessage.Token(7).ToInt()+1;\n\t\t\t\tPutModule(\"HighScore: \"+CString(HighScore));\n\t\t\t\treturn CONTINUE;\n\t\t\t}\n\t\t\tif (sMessage.Left(26).Equals(\"Dice game has been started\"))\n\t\t\t{\n\t\t\t\tlastturn = false;\n\t\t\t\treturn CONTINUE;\n\t\t\t}\n\t\t\tif (sMessage.Left(39).Equals(\"It is a really bad idea to \\x02stand\\x02 now.\"))\n\t\t\t{\n\t\t\t\tPutIRC(\"PRIVMSG \" + Channel.GetName() + \" :!dice roll\");\n\t\t\t\treturn CONTINUE;\n\t\t\t}\n\t\t\tif (sMessage.WildCmp(\"*get one more chance to beat your score*\"))\n\t\t\t{\n\t\t\t\tlastturn = true;\n\t\t\t\tscore = sMessage.Token(11).ToInt();\n\t\t\t\treturn CONTINUE;\n\t\t\t}\n\t\t}\n\t\treturn CONTINUE;\n\t}\nprivate:\n\tCUser *user;\n\tint HighScore;\n\tbool lastturn;\n\tint score;\n};\n\nMODULEDEFS(CDiceMod, \"Dice bot\")\n"
},
{
"alpha_fraction": 0.6659103631973267,
"alphanum_fraction": 0.6732841730117798,
"avg_line_length": 22.824323654174805,
"blob_id": "0a43d65176b801b4fc8d1a098886e74b1d870bfc",
"content_id": "85cb42400a9690ce7d1cf0fdc012c3221bc6aba9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1763,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 74,
"path": "/znc-getop.cpp",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "/**\n* ZNC Get Op\n*\n* Allows the user to redirect what boring ppl say to another (fake) chan.\n*\n* Copyright (c) 2012 Romain Labolle\n*\n* This program is free software; you can redistribute it and/or modify it\n* under the terms of the GNU General Public License version 2 as published\n* by the Free Software Foundation.\n*/\n\n#include <znc/znc.h>\n#include <znc/Chan.h>\n#include <znc/User.h>\n#include <znc/Modules.h>\n#include <znc/IRCNetwork.h>\n\nusing std::map;\nusing std::vector;\n\nclass CGetOpMod : public CModule {\npublic:\n\tMODCONSTRUCTOR(CGetOpMod) {}\n\n\tvirtual bool OnLoad(const CString& sArgs, CString& sErrorMsg) {\n\t\treturn true;\n\t}\n\n\tvirtual ~CGetOpMod() {}\n\n\tvirtual void update(CChan& Channel) {\n\t\tconst map<CString,CNick>& Nicks = Channel.GetNicks();\n\n\t\tfor (map<CString,CNick>::const_iterator it = Nicks.begin(); it != Nicks.end(); ++it) {\n\t\t\tif (it->second.HasPerm('@'))\n\t\t\t\treturn;\n\t\t}\n\t\tPutIRC(\"PRIVMSG R :REQUESTOP \" + Channel.GetName() + \" \" + GetNetwork()->GetIRCNick().GetNick());\n\t\treturn;\n\t}\n\n\tvirtual void OnDeop(const CNick& OpNick, const CNick& Nick, CChan& Channel, bool bNoChange) {\n\t\tupdate(Channel);\n\t\treturn;\n\t}\n\n\tvirtual void OnPart(const CNick& Nick, CChan& Channel, const CString& sMessage) {\n\t\tupdate(Channel);\n\t\treturn;\n\t}\n\n\tvirtual void OnQuit(const CNick& Nick, const CString& sMessage, const vector<CChan*>& vChans) {\n\t\tfor(vector<CChan*>::const_iterator it = vChans.begin(); it != vChans.end(); ++it)\n\t\t{\n\t\t\tupdate(**it);\n\t\t}\n\t\treturn;\n\t}\n\t\n\tvirtual EModRet OnRaw(CString& sLine) {\n\t\t// :irc.server.com 366 nick #chan :End of /NAMES list.\n\t\tif (sLine.Token(1) == \"366\")\n\t\t{\n\t\t\tCChan* chan = GetNetwork()->FindChan(sLine.Token(3));\n\t\t\tif(chan)\n\t\t\t\tupdate(*chan);\n\t\t}\n\t\treturn CONTINUE;\n\t}\nprivate:\n};\n\nNETWORKMODULEDEFS(CGetOpMod, \"Get op\")\n"
},
{
"alpha_fraction": 0.7102696299552917,
"alphanum_fraction": 0.7223178148269653,
"avg_line_length": 30.672727584838867,
"blob_id": "39d714971a9b91e397e0b7effca4ce8adaab0726",
"content_id": "db6a0fb7edd4363c6d9514ae88294ff6fd277a27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1743,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 55,
"path": "/README.md",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "# Uselessness\n\nThat's where I'll upload few code samples I made, most of them are useless though.\n\n## Fibonacci irc bot (fibirc.vala)\n\nThis simple irc bot writen in vala connect to an irc channel and reply to '!fib n' commands with the nth Fibonacci number.\n\nCompile with :\n\n valac fibirc.vala --pkg gee-1.0 --pkg gio-2.0\n\n*It require valac compiler.*\n\nUsage :\n\n ./ircbot --help\n\n## Python sha1 implementation (sha1.py)\n\nThis is a python implementation of the sha1 algorithm, was originaly done for my brother who needed to do it as a programation homework (but since I finished it a bit too late -aka. after midnight- it was never used).\nIf you are looking for a fast and efficient implementation of sha1, you'd better to use the buildin hashlib's implementation, which is much faster.\n\nUsage:\n\n ./sha1.py \"string to hash\"\n\n## Huffman (huffman/*)\n\nThis is the ANSI C version of the Huffan decompression algorithm from [teeworlds' C++ version](https://github.com/oy/teeworlds/blob/master/src/engine/shared/huffman.cpp).\nIt's not a full implementation of the Huffman compression algorithm and was only created for [fisted's wireshark tw dissector](https://github.com/fisted/wireshark/tree/tw-dissect)\n\nCan be compiled with:\n\n gcc -ansi -pedantic -Wall -fPIC *.c -o huffman\n\nAlso work with less strict rules:\n\n gcc *.c -o huffman\n\n*main.c is only a test program*\n\n## bin2blob & blob2bin\n\nPHP scripts to convert sql blob data (0xAF49...) to binary data and vice versa.\n\nBoth scripts can be replaced by inline php commands:\n\n* bin2blob:\n\n php -r 'echo \"0x\".bin2hex(file_get_contents(\"php://stdin\"));'\n\n* blob2bin:\n\n php -r 'echo pack(\"H*\" , preg_replace(array(\"/^0x/i\",\"/[^0-9A-F]/i\"),\"\",file_get_contents(\"php://stdin\")));'\n\n"
},
{
"alpha_fraction": 0.6220855712890625,
"alphanum_fraction": 0.6291305422782898,
"avg_line_length": 21.328338623046875,
"blob_id": "d30c9323edae56d3e41ee2d0fd7b31dddf022f93",
"content_id": "f9842c2e45edc5a72aeba27387ed45360845e73a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 17885,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 801,
"path": "/urifs.c",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "/* urifs - add remote file from different locations in one filesystem.\n *\n * Copyright (C) 2011 ravomavain\n *\n * This library is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n * \n * This library is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n * Lesser General Public License for more details.\n * \n * You should have received a copy of the GNU Lesser General Public\n * License along with this library; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n *\n *\n * Requirement: libfuse, libcurl, libxml2 and libcrypto (openssl)\n *\n * Compile with: gcc -g -o urifs urifs.c -Wall -ansi -W -std=c99 -D_GNU_SOURCE `pkg-config --cflags --libs libxml-2.0 fuse libcurl libcrypto`\n *\n */\n\n#define FUSE_USE_VERSION 26\n\n#include <fuse.h>\n#include <stdio.h>\n#include <string.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <errno.h>\n#include <curl/curl.h>\n#include <pthread.h>\n#include <openssl/crypto.h>\n\n#include <libxml/tree.h>\n#include <libxml/parser.h>\n#include <libxml/xpath.h>\n#include <libxml/xpathInternals.h>\n\n#define MAX_ENTRIES\t512\n#define LOG_FILE\t\"/var/log/urifs.log\"\n\ntypedef struct {\n\tchar *uri;\n\tsize_t size;\n\tstruct curl_slist *header;\n} uri_fd;\n\nuri_fd ** opened_files = NULL;\n\nchar *source_xml;\n\nFILE *debug_f = NULL;\nFILE *error_f = NULL;\n\n#define DEBUG(...) { if(debug_f) { fprintf(debug_f,\"%s::%s:%d \",__FILE__,__func__,__LINE__);fprintf(debug_f,__VA_ARGS__); fprintf(debug_f,\"\\n\"); fflush(debug_f); } }\n\nstatic inline void xfree(void *p)\n{\n\tif(p)\n\t\tfree(p);\n\tp=NULL;\n}\n\nstatic pthread_mutex_t *lockarray;\n\nstatic void lock_callback(int mode, int type, char *file, int line)\n{\n\t(void)file;\n\t(void)line;\n\tif (mode & CRYPTO_LOCK) {\n\t\tpthread_mutex_lock(&(lockarray[type]));\n\t}\n\telse {\n\t\tpthread_mutex_unlock(&(lockarray[type]));\n\t}\n}\n \nstatic unsigned long thread_id(void)\n{\n\tunsigned long ret;\n \n\tret=(unsigned long)pthread_self();\n\treturn(ret);\n}\n \nstatic void init_locks(void)\n{\n\tint i;\n \n\tlockarray=(pthread_mutex_t *)OPENSSL_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t));\n\tfor (i=0; i<CRYPTO_num_locks(); i++) {\n\t\tpthread_mutex_init(&(lockarray[i]),NULL);\n\t}\n \n\tCRYPTO_set_id_callback((unsigned long (*)())thread_id);\n\tCRYPTO_set_locking_callback((void (*)())lock_callback);\n}\n \nstatic void kill_locks(void)\n{\n\tint i;\n \n\tCRYPTO_set_locking_callback(NULL);\n\tfor (i=0; i<CRYPTO_num_locks(); i++)\n\t\tpthread_mutex_destroy(&(lockarray[i]));\n \n\tOPENSSL_free(lockarray);\n}\n\n\nstatic char *xpath_from_path(const char *path)\n{\n\tchar *p;\n\tchar *xpath = NULL;\n\tsize_t len = strlen(path);\n\tchar *tmp = (char*)malloc(len+1);\n\tif (!tmp)\n\t\treturn NULL;\n\tstrncpy(tmp, path, len+1);\n\tif (asprintf(&xpath, \"/root\") == -1)\n\t{\n\t\txfree(tmp);\n\t\treturn NULL;\n\t}\n\tp = strtok(tmp, \"/\");\n\twhile( p != NULL )\n\t{\n\t\tchar *new;\n\t\tif (asprintf(&new, \"%s/\"\"*[@name=\\\"%s\\\"]\", xpath, p) == -1)\n\t\t{\n\t\t\txfree(xpath);\n\t\t\txfree(tmp);\n\t\t\treturn NULL;\n\t\t}\n\t\txfree(xpath);\n\t\txpath = new;\n\t\tp = strtok(NULL, \"/\");\n\t}\n\txfree(tmp);\n\treturn xpath;\n}\n\nstruct curl_buffer {\n\tchar *data;\n\tsize_t size;\n\tsize_t read;\n};\n\nstatic size_t curl_get_callback(void *contents, size_t size, size_t nmemb, void *userp)\n{\n\tDEBUG(\"args: void *contents = %p, size_t size = %lu, size_t nmemb = %lu, void *userp = %p\", contents, size, nmemb, userp)\n\tsize_t realsize = size * nmemb;\n\tstruct curl_buffer *buffer = (struct curl_buffer *)userp;\n\tif(buffer->read + realsize > buffer->size)\n\t\trealsize = buffer->size - buffer->read;\n\n\tmemcpy(buffer->data+buffer->read, contents, realsize);\n\tbuffer->read += realsize;\n\n\tDEBUG(\"return: %lu\", realsize)\n\treturn realsize;\n}\n\nstatic void header_cmd(uri_fd *fd, const char *cmd)\n{\n\tDEBUG(\"args: uri_fd *fd = %p, const char *cmd = \\\"%s\\\"\", fd, cmd)\n\tFILE *fp = popen(cmd, \"r\");\n\tchar *line = NULL;\n\tsize_t len = 0;\n\tssize_t read;\n\n\tif (fp != NULL)\n\t{\n\t\twhile ((read = getline(&line, &len, fp)) > 0 && line)\n\t\t{\n\t\t\tline[read-1] = '\\0';\n\t\t\tfd->header = curl_slist_append(fd->header,line);\n\t\t\tDEBUG(\"Added header \\\"%s\\\"\", line)\n\t\t}\n\t\txfree(line);\n\t\tpclose(fp);\n\t}\n}\n\nstatic int urifs_getattr(const char *path, struct stat *stbuf)\n{\n\tDEBUG(\"args: const char *path = \\\"%s\\\", struct stat *stbuf = %p\", path, stbuf)\n\txmlChar *value;\n\txmlNodeSetPtr nodes;\n\txmlNodePtr node;\n\txmlXPathObjectPtr xpathObj;\n\n\tchar *xpath = xpath_from_path(path);\n\tif (!xpath)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tDEBUG(\"xpath: %s\", xpath);\n\n\txpathObj = xmlXPathEvalExpression((xmlChar*)xpath, fuse_get_context()->private_data);\n\txfree(xpath);\n\tif(xpathObj == NULL)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tnodes = xpathObj->nodesetval;\n\n\tif (nodes && nodes->nodeNr > 0)\n\t{\n\t\tDEBUG(\"%d nodes found\", nodes->nodeNr)\n\t\tnode = nodes->nodeTab[0];\n\n\t\tif(strcmp((char*)node->name, \"dir\")==0 || strcmp((char*)node->name, \"root\")==0)\n\t\t{\n\t\t\tDEBUG(\"dir\")\n\t\t\tstbuf->st_mode = S_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tDEBUG(\"file\")\n\t\t\tstbuf->st_mode = S_IFREG;\n\t\t}\n\n\t\tstbuf->st_ino = 0;\n\t\tstbuf->st_mode |= S_IRUSR | S_IRGRP | S_IROTH /*| S_IWUSR | S_IWGRP | S_IWOTH*/;\n\t\tstbuf->st_nlink = 0;\n\t\tstbuf->st_uid = 0;\n\t\tstbuf->st_gid = 0;\n\t\tstbuf->st_rdev = 0;\n\t\tstbuf->st_size = 1;\n\t\tstbuf->st_blksize = 1;\n\t\tstbuf->st_blocks = 1;\n\t\tstbuf->st_atime = 0;\n\t\tstbuf->st_mtime = 0;\n\t\tstbuf->st_ctime = 0;\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"size\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_blocks = stbuf->st_size = atoll((char*)value);\n\t\t\txmlFree(value);\n\t\t}\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"uid\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_blocks = stbuf->st_size = atoll((char*)value);\n\t\t\txmlFree(value);\n\t\t}\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"gid\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_uid = atoi((char*)value);\n\t\t\txmlFree(value);\n\t\t}\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"mode\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_mode &= S_IFMT;\n\t\t\tstbuf->st_mode |= strtol ((char*)value, NULL, 8);\n\t\t\txmlFree(value);\n\t\t}\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"ctime\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_ctime = atoi((char*)value);\n\t\t\txmlFree(value);\n\t\t}\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"atime\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_atime = atoi((char*)value);\n\t\t\txmlFree(value);\n\t\t}\n\n\t\tvalue = xmlGetProp(node, (xmlChar*)\"mtime\");\n\t\tif(value)\n\t\t{\n\t\t\tstbuf->st_mtime = atoi((char*)value);\n\t\t\txmlFree(value);\n\t\t}\n\t\tDEBUG(\"mode: %d\", stbuf->st_mode)\n\n\t\txmlXPathFreeObject(xpathObj);\n\t\tDEBUG(\"return: 0\")\n\t\treturn 0;\n\t}\n\telse\n\t\tDEBUG(\"Invalid Xpath\")\n\n\txmlXPathFreeObject(xpathObj);\n\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nstatic int urifs_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi)\n{\n\t(void)offset;\n\t(void)fi;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", void *buf = %p, fuse_fill_dir_t filler = %p, off_t offset = %lu, int fi->fh = %lu\", path, buf, filler, offset, fi->fh)\n\tint i;\n\tint nb;\n\tchar *xpath;\n\txmlChar *value;\n\txmlNodeSetPtr nodes;\n\txmlXPathObjectPtr xpathObj;\n\n\tchar *tmp = xpath_from_path(path);\n\tif (!tmp)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tif (asprintf(&xpath, \"%s/*[@name]\", tmp) == -1)\n\t{\n\t\txfree(tmp);\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\txfree(tmp);\n\n\tDEBUG(\"xpath: %s\", xpath);\n\n\txpathObj = xmlXPathEvalExpression((xmlChar*)xpath, fuse_get_context()->private_data);\n\txfree(xpath);\n\tif(xpathObj == NULL)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tnodes = xpathObj->nodesetval;\n\n\tif (nodes)\n\t{\n\t\tnb = nodes->nodeNr;\n\t\tDEBUG(\"%d nodes found\", nb)\n\n\t\tfiller(buf, \".\", NULL, 0);\n\t\tfiller(buf, \"..\", NULL, 0);\n\t\tfor(i=0;i<nb;i++)\n\t\t{\n\t\t\tvalue = xmlGetProp(nodes->nodeTab[i], (xmlChar*)\"name\");\n\t\t\tif(value)\n\t\t\t{\n\t\t\t\tfiller(buf, (char*)value, NULL, 0);\n\t\t\t\txmlFree(value);\n\t\t\t}\n\t\t}\n\t\txmlXPathFreeObject(xpathObj);\n\t\tDEBUG(\"return: 0\")\n\t\treturn 0;\n\t}\n\telse\n\t{\n\t\tDEBUG(\"Invalid Xpath\")\n\t}\n\n\txmlXPathFreeObject(xpathObj);\n\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nstatic int urifs_create(const char *path, mode_t mode, struct fuse_file_info *fi)\n{\n\t(void)mode;\n\t(void)fi;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", mode_t mode = %d, int fi->fh = %lu\", path, mode, fi->fh)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nstatic int urifs_open(const char *path, struct fuse_file_info *fi)\n{\n\tDEBUG(\"args: const char *path = \\\"%s\\\", int fi->fh = %lu\", path, fi->fh)\n\tDEBUG(\"here\")\n\tint i = 0;\n\turi_fd *fd = NULL;\n\txmlChar *value;\n\txmlNodeSetPtr nodes;\n\txmlNodePtr node;\n\txmlXPathObjectPtr xpathObj;\n\n\tchar *xpath = xpath_from_path(path);\n\tif (!xpath)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tDEBUG(\"xpath: %s\", xpath);\n\n\txpathObj = xmlXPathEvalExpression((xmlChar*)xpath, fuse_get_context()->private_data);\n\txfree(xpath);\n\tif(xpathObj == NULL)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tnodes = xpathObj->nodesetval;\n\n\tif (nodes && nodes->nodeNr > 0)\n\t{\n\t\tDEBUG(\"%d nodes found\", nodes->nodeNr)\n\t\tnode = nodes->nodeTab[0];\n\n\t\tfd = (uri_fd*)malloc(sizeof(uri_fd));\n\t\tif (fd)\n\t\t{\n\t\t\tfd->uri = NULL;\n\t\t\tfd->header = NULL;\n\t\t\tvalue = xmlGetProp(node, (xmlChar*)\"size\");\n\t\t\tif(value)\n\t\t\t{\n\t\t\t\tfd->size = atoll((char*)value);\n\t\t\t\txmlFree(value);\n\t\t\t\tvalue = xmlGetProp(node, (xmlChar*)\"uri\");\n\t\t\t\tif(value)\n\t\t\t\t{\n\t\t\t\t\tfd->uri = strdup((char*)value);\n\t\t\t\t\txmlFree(value);\n\t\t\t\t\tif(fd->uri)\n\t\t\t\t\t{\n\t\t\t\t\t\tvalue = xmlGetProp(node, (xmlChar*)\"header\");\n\t\t\t\t\t\tif(value)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfd->header = curl_slist_append(fd->header,(char*)value);\n\t\t\t\t\t\t\tDEBUG(\"Added header \\\"%s\\\"\", (char*)value)\n\t\t\t\t\t\t\txmlFree(value);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvalue = xmlGetProp(node, (xmlChar*)\"header-cmd\");\n\t\t\t\t\t\tif(value)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\theader_cmd(fd, (char*)value);\n\t\t\t\t\t\t\txmlFree(value);\n\t\t\t\t\t\t}\n\t\t\t\t\t\twhile((i < MAX_ENTRIES)&&(opened_files[i]))\n\t\t\t\t\t\t\ti++;\n\t\t\t\t\t\tif(i < MAX_ENTRIES)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfi->fh = i;\n\t\t\t\t\t\t\topened_files[i] = fd;\n\t\t\t\t\t\t\txmlXPathFreeObject(xpathObj);\n\t\t\t\t\t\t\tDEBUG(\"return: 0\")\n\t\t\t\t\t\t\treturn 0;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcurl_slist_free_all(fd->header);\n\t\t\t\t\t\txfree(fd->uri);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\txfree(fd);\n\t\t}\n\t}\n\telse\n\t\tDEBUG(\"0 node found\")\n\n\txmlXPathFreeObject(xpathObj);\n\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nstatic int urifs_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *fi)\n{\n\t(void)path;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", char *buf = %p, size_t size = %lu, off_t offset = %lu, int fi->fh = %lu\", path, buf, size, offset, fi->fh)\n\tsize_t bytes;\n\turi_fd *fd;\n\tchar *range;\n\tCURL *curl_handle;\n\tCURLcode res;\n\tstruct curl_buffer buffer;\n\tlong http_code = 0;\n\n\tif(size == 0)\n\t{\n\t\tDEBUG(\"return: 0\")\n\t\treturn 0;\n\t}\n\n\tfd = opened_files[fi->fh];\n\n\tif (!fd)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tif((size + offset) >= fd->size)\t{\n\t\tbytes = fd->size - offset;\n\t} else {\n\t\tbytes = size;\n\t}\n\n\tif(asprintf(&range, \"%llu-%llu\", (unsigned long long)offset, (unsigned long long)offset+(unsigned long long)bytes-1) == -1)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\tDEBUG(\"Range: %s (bytes: %llu)\", range, (unsigned long long)bytes);\n\n\tcurl_handle = curl_easy_init();\n\tcurl_easy_setopt(curl_handle, CURLOPT_URL, fd->uri);\n\n\tcurl_easy_setopt(curl_handle, CURLOPT_NOSIGNAL, 1);\n\tcurl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, curl_get_callback);\n\tcurl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&buffer);\n\tcurl_easy_setopt(curl_handle, CURLOPT_RANGE, range);\n\tcurl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, fd->header);\n\tcurl_easy_setopt(curl_handle, CURLOPT_FAILONERROR, 1);\n\n\tbuffer.data = buf;\n\tbuffer.size = bytes;\n\tbuffer.read = 0;\n\tres = curl_easy_perform(curl_handle);\n\tcurl_easy_getinfo (curl_handle, CURLINFO_RESPONSE_CODE, &http_code);\n\tDEBUG(\"curl_easy_perform() HTTP code %li\", http_code);\n\t\n\tif(res != CURLE_OK)\n\t{\n\t\tDEBUG(\"curl_easy_perform() failed: %s (%s)\", curl_easy_strerror(res),fd->uri);\n\t\tcurl_easy_cleanup(curl_handle);\n\t\txfree(range);\n\t\treturn -ENOENT;\n\t}\n\n\tcurl_easy_cleanup(curl_handle);\n\txfree(range);\n\n\tDEBUG(\"return: %lu\", buffer.read)\n\treturn buffer.read;\n}\n\nstatic int urifs_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi)\n{\n\t(void)path;\n\t(void)buf;\n\t(void)size;\n\t(void)offset;\n\t(void)fi;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", char *buf = %p, size_t size = %lu, off_t offset = %lu, int fi->fh = %lu\", path, buf, size, offset, fi->fh)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nstatic int urifs_fsync(const char *path, int datasync, struct fuse_file_info *fi)\n{\n\t(void)path;\n\t(void)datasync;\n\t(void)fi;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", int datasync = %d, int fi->fh = %lu\", path, datasync, fi->fh)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nstatic int urifs_release(const char *path, struct fuse_file_info *fi)\n{\n\t(void)path;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", int fi->fh = %lu\", path, fi->fh)\n\tif(fi->fh > MAX_ENTRIES)\n\t{\n\t\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\t\treturn -ENOENT;\n\t}\n\n\tDEBUG(\"closing file %lu\",fi->fh)\n\tcurl_slist_free_all(opened_files[fi->fh]->header);\n\txfree(opened_files[fi->fh]->uri);\n\txfree(opened_files[fi->fh]);\n\topened_files[fi->fh] = NULL;\n\n\tDEBUG(\"return: 0\")\n\treturn 0;\n}\n\nvoid urifs_cleanup(void *data)\n{\n\t(void)data;\n\tDEBUG(\"args: void *data = %p\", data)\n\tDEBUG(\"here\")\n\txmlXPathContextPtr xpathCtx = fuse_get_context()->private_data;\n\txmlDocPtr doc = xpathCtx->doc;\n\tint i;\n\tfor(i=0; i<MAX_ENTRIES; i++)\n\t{\n\t\tif(opened_files[i]!=NULL)\n\t\t{\n\t\t\tcurl_slist_free_all(opened_files[i]->header);\n\t\t\txfree(opened_files[i]->uri);\n\t\t\txfree(opened_files[i]);\n\t\t}\n\t}\n\txfree(opened_files);\n\tcurl_global_cleanup();\n\n\txmlXPathFreeContext(xpathCtx);\n\txmlFreeDoc(doc);\n\txmlCleanupParser();\n}\n\nint urifs_flush(const char *path, struct fuse_file_info *fi)\n{\n\t(void)path;\n\t(void)fi;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", int fi->fh = %lu\", path, fi->fh)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nint urifs_statfs(const char *path, struct statvfs *stats)\n{\n\t(void)path;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", struct statvfs *stats = %p\", path, stats)\n\tstats->f_bsize = 0;\n\tstats->f_frsize = 0;\n\tstats->f_blocks = 0;\n\tstats->f_bfree = 0;\n\tstats->f_bavail = 0;\n\tstats->f_namemax = 512;\n\tstats->f_files = 1000000000;\n\tstats->f_ffree = 1000000000;\n\tDEBUG(\"return: 0\")\n\treturn 0;\n}\n\nint urifs_truncate(const char *path, off_t size)\n{\n\t(void)path;\n\t(void)size;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", off_t size = %lu\", path, size)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nint urifs_ftruncate(const char *path, off_t size, struct fuse_file_info *fi)\n{\n\t(void)path;\n\t(void)size;\n\t(void)fi;\n\tDEBUG(\"args: const char *path = \\\"%s\\\", off_t size = %lu, int fi->fh = %lu\", path, size, fi->fh)\n\tDEBUG(\"here\")\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nint urifs_unlink(const char *path)\n{\n\t(void)path;\n\tDEBUG(\"args: const char *path = \\\"%s\\\"\", path)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nint urifs_rename(const char *from, const char *to)\n{\n\t(void)from;\n\t(void)to;\n\tDEBUG(\"args: const char *from = \\\"%s\\\", const char *to = \\\"%s\\\"\", from, to)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nint urifs_mkdir(const char *dir, mode_t ignored)\n{\n\t(void)dir;\n\t(void)ignored;\n\tDEBUG(\"args: const char *dir = \\\"%s\\\", mode_t ignored = %u\", dir, ignored)\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn -ENOENT;\n}\n\nvoid *urifs_init(struct fuse_conn_info *conn)\n{\n\t(void)conn;\n\tDEBUG(\"args: struct fuse_conn_info *conn = %p\", conn)\n\tint i;\n\txmlDocPtr doc;\n\txmlXPathContextPtr xpathCtx;\n\n\tDEBUG(\"mounting %s\",source_xml)\n\n\txmlInitParser();\n\tLIBXML_TEST_VERSION\n\n\tdoc = xmlParseFile(source_xml);\n\n\tif (doc == NULL)\n\t{\n\t\tDEBUG(\"Can't parse %s\", source_xml)\n\t\texit(1);\n\t}\n\n\txpathCtx = xmlXPathNewContext(doc);\n\tif (xpathCtx == NULL)\n\t{\n\t\txmlFreeDoc(doc);\n\t\tDEBUG(\"Can't get xmlXPathNewContext\")\n\t\texit(1);\n\t}\n\n\topened_files = (uri_fd**)malloc(sizeof(uri_fd*)*MAX_ENTRIES);\n\tfor(i=0;i<MAX_ENTRIES;i++)\n\t\topened_files[i] = NULL;\n\n\tcurl_global_init(CURL_GLOBAL_ALL);\n\n\tDEBUG(\"return: -ENOENT(%d)\", -ENOENT)\n\treturn xpathCtx;\n}\n\nstatic struct fuse_operations urifs_oper = {\n\t.getattr = urifs_getattr,\n\t.statfs = urifs_statfs,\n\t.readdir = urifs_readdir,\n\t.mkdir = urifs_mkdir,\n\t.rmdir = urifs_unlink,\n\t.create = urifs_create,\n\t.open = urifs_open,\n\t.read = urifs_read,\n\t.write = urifs_write,\n\t.truncate = urifs_truncate,\n\t.ftruncate = urifs_ftruncate,\n\t.unlink = urifs_unlink,\n\t.rename = urifs_rename,\n\t.fsync = urifs_fsync,\n\t.release = urifs_release,\n\t.init = urifs_init,\n\t.destroy = urifs_cleanup\n};\n\nstatic int urifs_opt_proc(void *data, const char *arg, int key, struct fuse_args *outargs)\n{\n\t(void)data;\n\t(void)outargs;\n\tDEBUG(\"args: void *data = %p, const char *arg = \\\"%s\\\", int key = %d, struct fuse_args *outargs = %p\", data, arg, key, outargs)\n\tstatic int num = 0;\n\n\tswitch(key) {\n\t\tcase FUSE_OPT_KEY_OPT:\n\t\t\tif(strcmp(arg, \"--debug\") == 0) {\n\t\t\t\tdebug_f = fopen(LOG_FILE,\"w\");\n\t\t\t\tDEBUG(\"debug mode started\")\n\t\t\t\treturn 0;\n\t\t\t} else if(strcmp(arg, \"-oallow-other\") == 0) {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t\tbreak;\n\t\tcase FUSE_OPT_KEY_NONOPT:\n\t\t\tnum++;\n\t\t\tif(num == 1)\t{\n\t\t\t\tsource_xml = strdup(arg);\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t\tbreak;\n\t}\n\treturn 1;\n}\n\nint main(int argc, char *argv[])\n{\n\tstruct fuse_args args = FUSE_ARGS_INIT(argc, argv);\n\n\tif (argc < 2) {\n\t\treturn -1;\n\t}\n\n\tif(fuse_opt_parse(&args, NULL, NULL, urifs_opt_proc) == -1) {\n\t\treturn -1;\n\t}\n\tfuse_opt_add_arg(&args, \"-oallow_other\");\n\n\tif(source_xml == NULL)\n\t\treturn -1;\n\n\tinit_locks();\n\n\tint ret = fuse_main(args.argc, args.argv, &urifs_oper, NULL);\n\tDEBUG(\"return: %d\", ret)\n\n\tkill_locks();\n\n\treturn ret;\n}\n"
},
{
"alpha_fraction": 0.524193525314331,
"alphanum_fraction": 0.5483871102333069,
"avg_line_length": 61,
"blob_id": "696f81f61b7f8f8c437742ee2910b0427c9545ec",
"content_id": "ff11deb0f1f8879fd97a8764503c1d10ee00570d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 2,
"path": "/blob2bin",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/php\n<?php echo pack(\"H*\" , preg_replace(array(\"/^0x/i\",\"/[^0-9A-F]/i\"),\"\",file_get_contents(\"php://stdin\"))); ?>\n"
},
{
"alpha_fraction": 0.5030044317245483,
"alphanum_fraction": 0.5361058115959167,
"avg_line_length": 46.18159103393555,
"blob_id": "4fefd8884b3bdd492e3c4335be730baca1ea22f6",
"content_id": "0b79ed565ab3cc604638dbb48d6a8529cbc3d608",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18972,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 402,
"path": "/md4.py",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport psutil\nimport os\nfrom sympy import symbols, true, false, And, Or, Xor, Not, simplify, satisfiable\nfrom optparse import OptionParser\n\ndef memory_usage():\n process = psutil.Process(os.getpid())\n mem = process.memory_info()[0] / float(2 ** 20)\n return mem\n\nclass byte:\n SIZE = 32\n def __init__(self, bits):\n if type(bits) == int:\n bits = [true if i == \"1\" else false for i in '{:032b}'.format(bits)]\n while len(bits) < self.SIZE:\n bits.insert(0,false)\n assert(len(bits) == self.SIZE)\n self.bits = bits\n def op(self,o,other):\n return byte([o(self.bits[i], other.bits[i]) for i in range(self.SIZE)])\n def iop(self,o,other):\n for i in range(self.SIZE):\n self.bits[i] = o(self.bits[i], other.bits[i])\n return self\n def __or__(self,other):\n return self.op(Or,other)\n def __ior__(self,other):\n return self.iop(Or,other)\n def __and__(self,other):\n return self.op(And,other)\n def __iand__(self,other):\n return self.iop(And,other)\n def __xor__(self,other):\n return self.op(Xor,other)\n def __ixor__(self,other):\n return self.iop(Xor,other)\n def __invert__(self):\n return byte([Not(self.bits[i]) for i in range(self.SIZE)])\n def __lshift__(self,other):\n return byte([self.bits[(i+other)%self.SIZE] for i in range(self.SIZE)])\n def __ilshift__(self,other):\n tmp = [self.bits[(i+other)%self.SIZE] for i in range(self.SIZE)]\n for i in range(self.SIZE):\n self.bits[i] = tmp[i]\n return self\n def __rshift__(self,other):\n return byte([self.bits[(i+(self.SIZE-other))%self.SIZE] for i in range(self.SIZE)])\n def __irshift__(self,other):\n tmp = [self.bits[(i+(self.SIZE-other))%self.SIZE] for i in range(self.SIZE)]\n for i in range(self.SIZE):\n self.bits[i] = tmp[i]\n return self\n def __add__(self,other):\n l = []\n s = self.bits[self.SIZE-1] ^ other.bits[self.SIZE-1]\n c = self.bits[self.SIZE-1] & other.bits[self.SIZE-1]\n l.insert(0, s)\n for i in range(self.SIZE-2,-1,-1):\n s = self.bits[i] ^ other.bits[i] ^ c\n c = ( self.bits[i] & other.bits[i] ) | ( c & (self.bits[i] ^ other.bits[i]) )\n l.insert(0,s)\n return byte(l)\n def __iadd__(self,other):\n s = self.bits[self.SIZE-1] ^ other.bits[self.SIZE-1]\n c = self.bits[self.SIZE-1] & other.bits[self.SIZE-1]\n self.bits[self.SIZE-1] = s\n for i in range(self.SIZE-2,-1,-1):\n s = self.bits[i] ^ other.bits[i] ^ c\n c = ( self.bits[i] & other.bits[i] ) | ( c & (self.bits[i] ^ other.bits[i]) )\n self.bits[i] = s\n return self\n def __sub__(self, other):\n return self + ~(other) + byte(0x1)\n def __isub__(self, other):\n self += ~(other) + byte(0x1)\n return self\n def __getitem__(self,index):\n return self.bits[index]\n def __setitem__(self,index,value):\n self.bits[index] = value\n def __str__(self):\n return \"\\n\".join([str(b) for b in self.bits])\n def __repr__(self):\n return self.bits.__repr__()\n def copy(self):\n return byte([self.bits[i] for i in range(self.SIZE)])\n def subs(self, *args, **kwargs):\n for i in range(self.SIZE):\n self.bits[i] = self.bits[i].subs(*args, **kwargs)\n def simplify(self):\n for i in range(self.SIZE):\n self.bits[i] = simplify(self.bits[i])\n def subs(self, variables):\n for i in range(self.SIZE):\n self.bits[i] = self.bits[i].subs(variables)\n def toHex(self):\n return \"{:08X}\".format(int(\"\".join([\"1\" if self.bits[(i-8)+j] == True else \"0\" for i in range(self.SIZE,0,-8) for j in range(8)]),2))\n def toInt(self):\n return int(\"\".join([\"1\" if self.bits[i] == True else \"0\" for i in range(self.SIZE)]),2)\n \n\nclass md4:\n SQRT2 = byte(0x5a827999)\n SQRT3 = byte(0x6ed9eba1)\n INITA = byte(0x67452301)\n INITB = byte(0xefcdab89)\n INITC = byte(0x98badcfe)\n INITD = byte(0x10325476)\n def __init__(self, message, verbose=False):\n self.m = message\n self.verbose = verbose\n \n def step(self, f, a, b, c, d, x, s):\n print(\"Step {} ({}MB)\".format(self.cnt, memory_usage()))\n self.cnt += 1\n a += f(b, c, d) + x\n a <<= s\n if self.verbose:\n print(self)\n return a\n #a.simplify()\n def rstep(self, f, a, b, c, d, x, s):\n print(\"Step {} ({}MB)\".format(self.cnt, memory_usage()))\n self.cnt -= 1\n a >>= s\n a -= f(b, c, d) + x\n if self.verbose:\n print(self)\n return a\n #a.simplify()\n def F(self, x,y,z):\n return (z ^ (x & (y ^ z)))\n def G(self, x,y,z):\n return ((x & (y | z)) | (y & z))\n def H(self, x,y,z):\n return (x ^ y ^ z)\n def compute(self):\n self.cnt = 1\n self.a = self.INITA.copy()\n self.b = self.INITB.copy()\n self.c = self.INITC.copy()\n self.d = self.INITD.copy()\n if self.verbose:\n print(self)\n \n # Round 1\n self.step(self.F, self.a, self.b, self.c, self.d, self.m[0], 3)\n self.step(self.F, self.d, self.a, self.b, self.c, self.m[1], 7)\n self.step(self.F, self.c, self.d, self.a, self.b, self.m[2], 11)\n self.step(self.F, self.b, self.c, self.d, self.a, self.m[3], 19)\n self.step(self.F, self.a, self.b, self.c, self.d, self.m[4], 3)\n self.step(self.F, self.d, self.a, self.b, self.c, self.m[5], 7)\n self.step(self.F, self.c, self.d, self.a, self.b, self.m[6], 11)\n self.step(self.F, self.b, self.c, self.d, self.a, self.m[7], 19)\n self.step(self.F, self.a, self.b, self.c, self.d, self.m[8], 3)\n self.step(self.F, self.d, self.a, self.b, self.c, self.m[9], 7)\n self.step(self.F, self.c, self.d, self.a, self.b, self.m[10], 11)\n self.step(self.F, self.b, self.c, self.d, self.a, self.m[11], 19)\n self.step(self.F, self.a, self.b, self.c, self.d, self.m[12], 3)\n self.step(self.F, self.d, self.a, self.b, self.c, self.m[13], 7)\n self.step(self.F, self.c, self.d, self.a, self.b, self.m[14], 11)\n self.step(self.F, self.b, self.c, self.d, self.a, self.m[15], 19)\n \n # Round 2\n self.step(self.G, self.a, self.b, self.c, self.d, self.m[0] + self.SQRT2, 3)\n self.step(self.G, self.d, self.a, self.b, self.c, self.m[4] + self.SQRT2, 5)\n self.step(self.G, self.c, self.d, self.a, self.b, self.m[8] + self.SQRT2, 9)\n self.step(self.G, self.b, self.c, self.d, self.a, self.m[12] + self.SQRT2, 13)\n self.step(self.G, self.a, self.b, self.c, self.d, self.m[1] + self.SQRT2, 3)\n self.step(self.G, self.d, self.a, self.b, self.c, self.m[5] + self.SQRT2, 5)\n self.step(self.G, self.c, self.d, self.a, self.b, self.m[9] + self.SQRT2, 9)\n self.step(self.G, self.b, self.c, self.d, self.a, self.m[13] + self.SQRT2, 13)\n self.step(self.G, self.a, self.b, self.c, self.d, self.m[2] + self.SQRT2, 3)\n self.step(self.G, self.d, self.a, self.b, self.c, self.m[6] + self.SQRT2, 5)\n self.step(self.G, self.c, self.d, self.a, self.b, self.m[10] + self.SQRT2, 9)\n self.step(self.G, self.b, self.c, self.d, self.a, self.m[14] + self.SQRT2, 13)\n self.step(self.G, self.a, self.b, self.c, self.d, self.m[3] + self.SQRT2, 3)\n self.step(self.G, self.d, self.a, self.b, self.c, self.m[7] + self.SQRT2, 5)\n self.step(self.G, self.c, self.d, self.a, self.b, self.m[11] + self.SQRT2, 9)\n self.step(self.G, self.b, self.c, self.d, self.a, self.m[15] + self.SQRT2, 13)\n \n # Round 3\n self.step(self.H, self.a, self.b, self.c, self.d, self.m[0] + self.SQRT3, 3)\n self.step(self.H, self.d, self.a, self.b, self.c, self.m[8] + self.SQRT3, 9)\n self.step(self.H, self.c, self.d, self.a, self.b, self.m[4] + self.SQRT3, 11)\n self.step(self.H, self.b, self.c, self.d, self.a, self.m[12] + self.SQRT3, 15)\n self.step(self.H, self.a, self.b, self.c, self.d, self.m[2] + self.SQRT3, 3)\n self.step(self.H, self.d, self.a, self.b, self.c, self.m[10] + self.SQRT3, 9)\n self.step(self.H, self.c, self.d, self.a, self.b, self.m[6] + self.SQRT3, 11)\n self.step(self.H, self.b, self.c, self.d, self.a, self.m[14] + self.SQRT3, 15)\n self.step(self.H, self.a, self.b, self.c, self.d, self.m[1] + self.SQRT3, 3)\n self.step(self.H, self.d, self.a, self.b, self.c, self.m[9] + self.SQRT3, 9)\n self.step(self.H, self.c, self.d, self.a, self.b, self.m[5] + self.SQRT3, 11)\n self.step(self.H, self.b, self.c, self.d, self.a, self.m[13] + self.SQRT3, 15)\n self.step(self.H, self.a, self.b, self.c, self.d, self.m[3] + self.SQRT3, 3)\n self.step(self.H, self.d, self.a, self.b, self.c, self.m[11] + self.SQRT3, 9)\n self.step(self.H, self.c, self.d, self.a, self.b, self.m[7] + self.SQRT3, 11)\n self.step(self.H, self.b, self.c, self.d, self.a, self.m[15] + self.SQRT3, 15)\n \n self.a += self.INITA\n self.b += self.INITB\n self.c += self.INITC\n self.d += self.INITD\n if self.verbose:\n print(self)\n \n def rcompute(self, H):\n self.cnt = 48\n self.a, self.b, self.c, self.d = self.HtoABCD(H)\n \n if self.verbose:\n print(self)\n \n self.d -= self.INITD\n self.c -= self.INITC\n self.b -= self.INITB\n self.a -= self.INITA\n \n if self.verbose:\n print(self)\n \n # Reverse Round 3\n self.rstep(self.H, self.b, self.c, self.d, self.a, self.m[15] + self.SQRT3, 15)\n self.rstep(self.H, self.c, self.d, self.a, self.b, self.m[7] + self.SQRT3, 11)\n self.rstep(self.H, self.d, self.a, self.b, self.c, self.m[11] + self.SQRT3, 9)\n self.rstep(self.H, self.a, self.b, self.c, self.d, self.m[3] + self.SQRT3, 3)\n self.rstep(self.H, self.b, self.c, self.d, self.a, self.m[13] + self.SQRT3, 15)\n self.rstep(self.H, self.c, self.d, self.a, self.b, self.m[5] + self.SQRT3, 11)\n self.rstep(self.H, self.d, self.a, self.b, self.c, self.m[9] + self.SQRT3, 9)\n self.rstep(self.H, self.a, self.b, self.c, self.d, self.m[1] + self.SQRT3, 3)\n self.rstep(self.H, self.b, self.c, self.d, self.a, self.m[14] + self.SQRT3, 15)\n self.rstep(self.H, self.c, self.d, self.a, self.b, self.m[6] + self.SQRT3, 11)\n self.rstep(self.H, self.d, self.a, self.b, self.c, self.m[10] + self.SQRT3, 9)\n self.rstep(self.H, self.a, self.b, self.c, self.d, self.m[2] + self.SQRT3, 3)\n self.rstep(self.H, self.b, self.c, self.d, self.a, self.m[12] + self.SQRT3, 15)\n self.rstep(self.H, self.c, self.d, self.a, self.b, self.m[4] + self.SQRT3, 11)\n self.rstep(self.H, self.d, self.a, self.b, self.c, self.m[8] + self.SQRT3, 9)\n self.rstep(self.H, self.a, self.b, self.c, self.d, self.m[0] + self.SQRT3, 3)\n \n # Reverse Round 2\n self.rstep(self.G, self.b, self.c, self.d, self.a, self.m[15] + self.SQRT2, 13)\n self.rstep(self.G, self.c, self.d, self.a, self.b, self.m[11] + self.SQRT2, 9)\n self.rstep(self.G, self.d, self.a, self.b, self.c, self.m[7] + self.SQRT2, 5)\n self.rstep(self.G, self.a, self.b, self.c, self.d, self.m[3] + self.SQRT2, 3)\n self.rstep(self.G, self.b, self.c, self.d, self.a, self.m[14] + self.SQRT2, 13)\n self.rstep(self.G, self.c, self.d, self.a, self.b, self.m[10] + self.SQRT2, 9)\n self.rstep(self.G, self.d, self.a, self.b, self.c, self.m[6] + self.SQRT2, 5)\n self.rstep(self.G, self.a, self.b, self.c, self.d, self.m[2] + self.SQRT2, 3)\n self.rstep(self.G, self.b, self.c, self.d, self.a, self.m[13] + self.SQRT2, 13)\n self.rstep(self.G, self.c, self.d, self.a, self.b, self.m[9] + self.SQRT2, 9)\n self.rstep(self.G, self.d, self.a, self.b, self.c, self.m[5] + self.SQRT2, 5)\n self.rstep(self.G, self.a, self.b, self.c, self.d, self.m[1] + self.SQRT2, 3)\n self.rstep(self.G, self.b, self.c, self.d, self.a, self.m[12] + self.SQRT2, 13)\n self.rstep(self.G, self.c, self.d, self.a, self.b, self.m[8] + self.SQRT2, 9)\n self.rstep(self.G, self.d, self.a, self.b, self.c, self.m[4] + self.SQRT2, 5)\n self.rstep(self.G, self.a, self.b, self.c, self.d, self.m[0] + self.SQRT2, 3)\n \n # Reverse Round 1\n self.rstep(self.F, self.b, self.c, self.d, self.a, self.m[15], 19)\n self.rstep(self.F, self.c, self.d, self.a, self.b, self.m[14], 11)\n self.rstep(self.F, self.d, self.a, self.b, self.c, self.m[13], 7)\n self.rstep(self.F, self.a, self.b, self.c, self.d, self.m[12], 3)\n self.rstep(self.F, self.b, self.c, self.d, self.a, self.m[11], 19)\n self.rstep(self.F, self.c, self.d, self.a, self.b, self.m[10], 11)\n self.rstep(self.F, self.d, self.a, self.b, self.c, self.m[9], 7)\n self.rstep(self.F, self.a, self.b, self.c, self.d, self.m[8], 3)\n self.rstep(self.F, self.b, self.c, self.d, self.a, self.m[7], 19)\n self.rstep(self.F, self.c, self.d, self.a, self.b, self.m[6], 11)\n self.rstep(self.F, self.d, self.a, self.b, self.c, self.m[5], 7)\n self.rstep(self.F, self.a, self.b, self.c, self.d, self.m[4], 3)\n self.rstep(self.F, self.b, self.c, self.d, self.a, self.m[3], 19)\n self.rstep(self.F, self.c, self.d, self.a, self.b, self.m[2], 11)\n self.rstep(self.F, self.d, self.a, self.b, self.c, self.m[1], 7)\n self.rstep(self.F, self.a, self.b, self.c, self.d, self.m[0], 3)\n \n def __str__(self):\n A = self.a.toHex()\n B = self.b.toHex()\n C = self.c.toHex()\n D = self.d.toHex()\n return A+B+C+D\n \n def HtoABCD(self, H=None):\n if H is None:\n A = byte([symbols(var) for i in range(32,0,-8) for j in range(8) for var in [\"a_{}\".format((i-8)+j)]])\n B = byte([symbols(var) for i in range(32,0,-8) for j in range(8) for var in [\"b_{}\".format((i-8)+j)]])\n C = byte([symbols(var) for i in range(32,0,-8) for j in range(8) for var in [\"c_{}\".format((i-8)+j)]])\n D = byte([symbols(var) for i in range(32,0,-8) for j in range(8) for var in [\"d_{}\".format((i-8)+j)]])\n else:\n ha = '{:032b}'.format(int(H[0:8],16))\n A = byte([true if ha[(i-8)+j] == '1' else false for i in range(32,0,-8) for j in range(8) ])\n hb = '{:032b}'.format(int(H[8:16],16))\n B = byte([true if hb[(i-8)+j] == '1' else false for i in range(32,0,-8) for j in range(8) ])\n hc = '{:032b}'.format(int(H[16:24],16))\n C = byte([true if hc[(i-8)+j] == '1' else false for i in range(32,0,-8) for j in range(8) ])\n hd = '{:032b}'.format(int(H[24:32],16))\n D = byte([true if hd[(i-8)+j] == '1' else false for i in range(32,0,-8) for j in range(8) ])\n return A,B,C,D\n \n def solve(self, H):\n A,B,C,D = self.HtoABCD(H)\n eq = And(*[~(A[i] ^ self.a[i]) & ~(B[i] ^ self.b[i]) & ~(C[i] ^ self.c[i]) & ~(D[i] ^ self.d[i]) for i in range(32)])\n print(\"Equation:\",eq)\n return satisfiable(eq)\n \n def rsolve(self):\n A = self.INITA.copy()\n B = self.INITB.copy()\n C = self.INITC.copy()\n D = self.INITD.copy()\n eq = And(*[~(A[i] ^ self.a[i]) & ~(B[i] ^ self.b[i]) & ~(C[i] ^ self.c[i]) & ~(D[i] ^ self.d[i]) for i in range(32)])\n print(\"Equation:\",eq)\n return satisfiable(eq)\n \n def subs(self, variables):\n self.a.subs(variables)\n self.b.subs(variables)\n self.c.subs(variables)\n self.d.subs(variables)\n for i in range(16):\n self.m[i].subs(variables)\n \n def getpass(self):\n length = self.m[14].toInt()\n length += self.m[15].toInt() << 32\n length //= 16\n res = \"\"\n for i in range(length):\n b = self.m[i//2]\n c = 0\n e = 1\n for j in range(8):\n if b[31-(j+16*(i%2))] == True:\n c += e\n e *= 2\n res += \"{:c}\".format(c)\n return res\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-l\", \"--length\", dest=\"length\", default=-1, type=\"int\",\n help=\"Password length\", metavar=\"LEN\")\n parser.add_option(\"-H\", \"--hash\", dest=\"H\", default=None,\n help=\"Hash\", metavar=\"HASH\")\n parser.add_option(\"-p\", \"--password\", dest=\"password\", default=None,\n help=\"Password\", metavar=\"PASS\")\n parser.add_option(\"-r\", \"--reverse\",\n action=\"store_true\", dest=\"reverse\", default=False,\n help=\"Start from the hash and compute backward\")\n parser.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\", default=False,\n help=\"Verbose mode\")\n (options, args) = parser.parse_args()\n s = {}\n # Assuming UTF16 with only 8bits used\n for i in range(1,56,2):\n for j in range(8):\n s['m_{}'.format(i*8+j)] = false\n # msg size < 448, which is only 9bits out of 64. And it's a multiple of 16.\n for i in range(464,512):\n s['m_{}'.format(i)] = false\n for i in range(452,463):\n s['m_{}'.format(i)] = false\n if options.password is not None:\n options.length = len(options.password)\n if options.length >= 0:\n for i in range(0,options.length*2,2):\n s['m_{}'.format(i*8)] = false # only 7bits\n s['m_{}'.format(options.length*2*8)] = true\n for i in range(options.length*2*8+1,448,1):\n s['m_{}'.format(i)] = false\n for i in range(4):\n s['m_{}'.format(451-i)] = true if (options.length>>i)&1 else false\n s['m_463'] = true if (options.length > 15) else false\n else:\n #TODO: Find a boolean formula to link the size and the other bits\n pass\n if options.password is not None:\n i=0\n for c in options.password:\n bits = '{:08b}'.format(ord(c))\n for j in range(8):\n s['m_{}'.format(i*2*8+j)] = true if bits[j] == '1' else false\n i+=1 \n print(\"Guessed {} bits out of 512\".format(len(s)))\n M = [byte([symbols(var) if var not in s else s[var] for j1 in range(32,0,-8) for j2 in range(8) for j in [(j1-8)+j2] for var in [\"m_{}\".format(i*32+j)]]) for i in range(16)]\n MD4 = md4(M, verbose=options.verbose)\n if options.reverse:\n MD4.rcompute(options.H)\n sol = MD4.rsolve()\n else:\n MD4.compute()\n sol = MD4.solve(options.H)\n MD4.subs(sol)\n print(\"Hash:\",MD4)\n print(\"Pass:\",MD4.getpass())\n print(\"Message:\",*[MD4.m[i].toHex() for i in range(16)])\n \n"
},
{
"alpha_fraction": 0.39386188983917236,
"alphanum_fraction": 0.7145780324935913,
"avg_line_length": 33.28070068359375,
"blob_id": "6ed9cf8b6b912978d98a5a795e256606afebab97",
"content_id": "fbd0dbcafd3968368848f99f188ca0989c60cb77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1955,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 57,
"path": "/huffman/huffman.h",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "enum\n{\n\tHUFFMAN_EOF_SYMBOL = 256,\n\n\tHUFFMAN_MAX_SYMBOLS=HUFFMAN_EOF_SYMBOL+1,\n\tHUFFMAN_MAX_NODES=HUFFMAN_MAX_SYMBOLS*2-1,\n\n\tHUFFMAN_LUTBITS = 10,\n\tHUFFMAN_LUTSIZE = (1<<HUFFMAN_LUTBITS),\n\tHUFFMAN_LUTMASK = (HUFFMAN_LUTSIZE-1)\n};\n\ntypedef struct\n{\n\t/* symbol */\n\tunsigned m_Bits;\n\tunsigned m_NumBits;\n\n\t/* don't use pointers for this. shorts are smaller so we can fit more data into the cache */\n\tunsigned short m_aLeafs[2];\n\n\t/* what the symbol represents */\n\tunsigned char m_Symbol;\n} HuffmanNode;\n\ntypedef struct {\n\tHuffmanNode m_aNodes[HUFFMAN_MAX_NODES];\n\tHuffmanNode *m_apDecodeLut[HUFFMAN_LUTSIZE];\n\tHuffmanNode *m_pStartNode;\n\tint m_NumNodes;\n} Huffman;\n\ntypedef struct {\n\tunsigned short m_NodeId;\n \tint m_Frequency;\n} HuffmanConstructNode;\n\nvoid HuffmanSetbits_r(Huffman *hf, HuffmanNode *pNode, int Bits, unsigned Depth);\nvoid HuffmanConstructTree(Huffman *hf);\nvoid HuffmanInit(Huffman *hf);\nint HuffmanDecompress(Huffman *hf, const void *pInput, int InputSize, void *pOutput, int OutputSize);\n\n\nstatic const unsigned HuffmanFreqTable[256+1] = {\n\t1<<30,4545,2657,431,1950,919,444,482,2244,617,838,542,715,1814,304,240,754,212,647,186,\n\t283,131,146,166,543,164,167,136,179,859,363,113,157,154,204,108,137,180,202,176,\n\t872,404,168,134,151,111,113,109,120,126,129,100,41,20,16,22,18,18,17,19,\n\t16,37,13,21,362,166,99,78,95,88,81,70,83,284,91,187,77,68,52,68,\n\t59,66,61,638,71,157,50,46,69,43,11,24,13,19,10,12,12,20,14,9,\n\t20,20,10,10,15,15,12,12,7,19,15,14,13,18,35,19,17,14,8,5,\n\t15,17,9,15,14,18,8,10,2173,134,157,68,188,60,170,60,194,62,175,71,\n\t148,67,167,78,211,67,156,69,1674,90,174,53,147,89,181,51,174,63,163,80,\n\t167,94,128,122,223,153,218,77,200,110,190,73,174,69,145,66,277,143,141,60,\n\t136,53,180,57,142,57,158,61,166,112,152,92,26,22,21,28,20,26,30,21,\n\t32,27,20,17,23,21,30,22,22,21,27,25,17,27,23,18,39,26,15,21,\n\t12,18,18,27,20,18,15,19,11,17,33,12,18,15,19,18,16,26,17,18,\n\t9,10,25,22,22,17,20,16,6,16,15,20,14,18,24,335,1517};\n\n"
},
{
"alpha_fraction": 0.5380434989929199,
"alphanum_fraction": 0.592391312122345,
"avg_line_length": 19.44444465637207,
"blob_id": "ae24ec8205ee69602bcc3e6377a79f32c571d6f6",
"content_id": "76a0d2c9c8653bfb26e20143816502746271b1e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/bytestohr",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Convert input parameter (number of bytes) \n# to Human Readable form\n#\n\nSLIST=(bytes Ko Mo Go To Po Eo Zo Yo)\n\nPOWER=0\nVAL=$( echo \"scale=2; $1 / 1\" | bc)\nVINT=$( echo $VAL / 1024 | bc )\nwhile [[ $VINT != 0 && $POWER != 8 ]]\ndo\n\tlet POWER=POWER+1\n\tVAL=$( echo \"scale=2; $VAL / 1024\" | bc)\n\tVINT=$( echo $VAL / 1024 | bc )\ndone\n\necho $VAL ${SLIST[$POWER]}\n"
},
{
"alpha_fraction": 0.6266583800315857,
"alphanum_fraction": 0.6384742856025696,
"avg_line_length": 22.19230842590332,
"blob_id": "0577e387b35b10aac5680cb4b2fe916b95d629c4",
"content_id": "676465005528d68ad5037ffd398f2889c8c84d87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4824,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 208,
"path": "/huffman/huffman.c",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#include <string.h>\n#include \"huffman.h\"\n\nstatic void HuffmanBubbleSort(HuffmanConstructNode **ppList, int Size);\n\nvoid HuffmanSetbits_r(Huffman *hf, HuffmanNode *pNode, int Bits, unsigned Depth)\n{\n\tif(pNode->m_aLeafs[1] != 0xffff)\n\t\tHuffmanSetbits_r(hf, &hf->m_aNodes[pNode->m_aLeafs[1]], Bits|(1<<Depth), Depth+1);\n\tif(pNode->m_aLeafs[0] != 0xffff)\n\t\tHuffmanSetbits_r(hf, &hf->m_aNodes[pNode->m_aLeafs[0]], Bits, Depth+1);\n\n\tif(pNode->m_NumBits)\n\t{\n\t\tpNode->m_Bits = Bits;\n\t\tpNode->m_NumBits = Depth;\n\t}\n}\n\nstatic void HuffmanBubbleSort(HuffmanConstructNode **ppList, int Size)\n{\n\tint Changed = 1;\n\tint i;\n\tHuffmanConstructNode *pTemp;\n\n\twhile(Changed)\n\t{\n\t\tChanged = 0;\n\t\tfor(i = 0; i < Size-1; i++)\n\t\t{\n\t\t\tif(ppList[i]->m_Frequency < ppList[i+1]->m_Frequency)\n\t\t\t{\n\t\t\t\tpTemp = ppList[i];\n\t\t\t\tppList[i] = ppList[i+1];\n\t\t\t\tppList[i+1] = pTemp;\n\t\t\t\tChanged = 1;\n\t\t\t}\n\t\t}\n\t\tSize--;\n\t}\n}\n\nvoid HuffmanConstructTree(Huffman *hf)\n{\n\tHuffmanConstructNode aNodesLeftStorage[HUFFMAN_MAX_SYMBOLS];\n\tHuffmanConstructNode *apNodesLeft[HUFFMAN_MAX_SYMBOLS];\n\tint NumNodesLeft = HUFFMAN_MAX_SYMBOLS;\n\tint i;\n\n\t/* add the symbols */\n\tfor(i = 0; i < HUFFMAN_MAX_SYMBOLS; i++)\n\t{\n\t\thf->m_aNodes[i].m_NumBits = 0xFFFFFFFF;\n\t\thf->m_aNodes[i].m_Symbol = i;\n\t\thf->m_aNodes[i].m_aLeafs[0] = 0xffff;\n\t\thf->m_aNodes[i].m_aLeafs[1] = 0xffff;\n\n\t\tif(i == HUFFMAN_EOF_SYMBOL)\n\t\t\taNodesLeftStorage[i].m_Frequency = 1;\n\t\telse\n\t\t\taNodesLeftStorage[i].m_Frequency = HuffmanFreqTable[i];\n\t\taNodesLeftStorage[i].m_NodeId = i;\n\t\tapNodesLeft[i] = &aNodesLeftStorage[i];\n\n\t}\n\n\thf->m_NumNodes = HUFFMAN_MAX_SYMBOLS;\n\n\t/* construct the table */\n\twhile(NumNodesLeft > 1)\n\t{\n\t\t/* we can't rely on stdlib's qsort for this, it can generate different results on different implementations */\n\t\tHuffmanBubbleSort(apNodesLeft, NumNodesLeft);\n\n\t\thf->m_aNodes[hf->m_NumNodes].m_NumBits = 0;\n\t\thf->m_aNodes[hf->m_NumNodes].m_aLeafs[0] = apNodesLeft[NumNodesLeft-1]->m_NodeId;\n\t\thf->m_aNodes[hf->m_NumNodes].m_aLeafs[1] = apNodesLeft[NumNodesLeft-2]->m_NodeId;\n\t\tapNodesLeft[NumNodesLeft-2]->m_NodeId = hf->m_NumNodes;\n\t\tapNodesLeft[NumNodesLeft-2]->m_Frequency = apNodesLeft[NumNodesLeft-1]->m_Frequency + apNodesLeft[NumNodesLeft-2]->m_Frequency;\n\n\t\thf->m_NumNodes++;\n\t\tNumNodesLeft--;\n\t}\n\n\t/* set start node */\n\thf->m_pStartNode = &hf->m_aNodes[hf->m_NumNodes-1];\n\n\t/* build symbol bits */\n\tHuffmanSetbits_r(hf, hf->m_pStartNode, 0, 0);\n}\n\nvoid HuffmanInit(Huffman *hf)\n{\n\tint i;\n\n\t/* make sure to cleanout every thing */\n\tmemset(hf, 0, sizeof(*hf));\n\n\t/* construct the tree */\n\tHuffmanConstructTree(hf);\n\n\t/* build decode LUT */\n\tfor(i = 0; i < HUFFMAN_LUTSIZE; i++)\n\t{\n\t\tunsigned Bits = i;\n\t\tint k;\n\t\tHuffmanNode *pNode = hf->m_pStartNode;\n\t\tfor(k = 0; k < HUFFMAN_LUTBITS; k++)\n\t\t{\n\t\t\tpNode = &hf->m_aNodes[pNode->m_aLeafs[Bits&1]];\n\t\t\tBits >>= 1;\n\n\t\t\tif(!pNode)\n\t\t\t\tbreak;\n\n\t\t\tif(pNode->m_NumBits)\n\t\t\t{\n\t\t\t\thf->m_apDecodeLut[i] = pNode;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\tif(k == HUFFMAN_LUTBITS)\n\t\t\thf->m_apDecodeLut[i] = pNode;\n\t}\n}\n\nint HuffmanDecompress(Huffman *hf, const void *pInput, int InputSize, void *pOutput, int OutputSize)\n{\n\t/* setup buffer pointers */\n\tunsigned char *pDst = (unsigned char *)pOutput;\n\tunsigned char *pSrc = (unsigned char *)pInput;\n\tunsigned char *pDstEnd = pDst + OutputSize;\n\tunsigned char *pSrcEnd = pSrc + InputSize;\n\n\tunsigned Bits = 0;\n\tunsigned Bitcount = 0;\n\n\tHuffmanNode *pEof = &hf->m_aNodes[HUFFMAN_EOF_SYMBOL];\n\tHuffmanNode *pNode = 0;\n\n\twhile(1)\n\t{\n\t\t/* {A} try to load a node now, this will reduce dependency at location {D} */\n\t\tpNode = 0;\n\t\tif(Bitcount >= HUFFMAN_LUTBITS)\n\t\t\tpNode = hf->m_apDecodeLut[Bits&HUFFMAN_LUTMASK];\n\n\t\t/* {B} fill with new bits */\n\t\twhile(Bitcount < 24 && pSrc != pSrcEnd)\n\t\t{\n\t\t\tBits |= (*pSrc++) << Bitcount;\n\t\t\tBitcount += 8;\n\t\t}\n\n\t\t/* {C} load symbol now if we didn't that earlier at location {A} */\n\t\tif(!pNode)\n\t\t\tpNode = hf->m_apDecodeLut[Bits&HUFFMAN_LUTMASK];\n\n\t\tif(!pNode)\n\t\t\treturn -1;\n\n\t\t/* {D} check if we hit a symbol already */\n\t\tif(pNode->m_NumBits)\n\t\t{\n\t\t\t/* remove the bits for that symbol */\n\t\t\tBits >>= pNode->m_NumBits;\n\t\t\tBitcount -= pNode->m_NumBits;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t/* remove the bits that the lut checked up for us */\n\t\t\tBits >>= HUFFMAN_LUTBITS;\n\t\t\tBitcount -= HUFFMAN_LUTBITS;\n\n\t\t\t/* walk the tree bit by bit */\n\t\t\twhile(1)\n\t\t\t{\n\t\t\t\t/* traverse tree */\n\t\t\t\tpNode = &hf->m_aNodes[pNode->m_aLeafs[Bits&1]];\n\n\t\t\t\t/* remove bit */\n\t\t\t\tBitcount--;\n\t\t\t\tBits >>= 1;\n\n\t\t\t\t/* check if we hit a symbol */\n\t\t\t\tif(pNode->m_NumBits)\n\t\t\t\t\tbreak;\n\n\t\t\t\t/* no more bits, decoding error */\n\t\t\t\tif(Bitcount == 0)\n\t\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\n\t\t/* check for eof */\n\t\tif(pNode == pEof)\n\t\t\tbreak;\n\n\t\t/* output character */\n\t\tif(pDst == pDstEnd)\n\t\t\treturn -1;\n\t\t*pDst++ = pNode->m_Symbol;\n\t}\n\n\t/* return the size of the decompressed buffer */\n\treturn (int)(pDst - (const unsigned char *)pOutput);\n}\n"
},
{
"alpha_fraction": 0.5890411138534546,
"alphanum_fraction": 0.6164383292198181,
"avg_line_length": 35.5,
"blob_id": "8c1a8cac569c13b19ff9c363bcadc0bbe0bdf81e",
"content_id": "340a3b772797e75c340a3c204b4796b40e54e956",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 73,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 2,
"path": "/bin2blob",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#!/bin/php\n<?php echo \"0x\".bin2hex(file_get_contents(\"php://stdin\")); ?>\n"
},
{
"alpha_fraction": 0.500910758972168,
"alphanum_fraction": 0.5027322173118591,
"avg_line_length": 25.095237731933594,
"blob_id": "485a585116c329c0798c028579b387593c1705c2",
"content_id": "733fdf75ddb3268b27f87de7daaaba950303c461",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 21,
"path": "/utils.py",
"repo_name": "ravomavain/uselessness",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom distutils.util import strtobool\n\ndef ask(question, default=True):\n prompt = question\n if default == None:\n prompt += ' [y/n] '\n elif default:\n prompt += ' [Y/n] '\n else:\n prompt += ' [y/N] '\n while True:\n try:\n ans = input(prompt).strip()\n if ans == '' and default != None:\n return default\n return bool(strtobool(ans))\n except ValueError:\n print('Please respond with \\'y\\' or \\'n\\'.\\n')\n\n"
}
] | 13 |
weldonyang/Multiple-Linear-Regression-Practice | https://github.com/weldonyang/Multiple-Linear-Regression-Practice | 62b4c72307df47cf0e559e7e918fa7a85a03c4e7 | 466e3a06db0fae4f0c93caaf3dfaf55c09f7409c | ed499f1a5b2d16e2f438571c5c2d657aa2d3a3ef | refs/heads/main | 2023-02-16T15:54:24.094528 | 2021-01-13T01:52:20 | 2021-01-13T01:52:20 | 329,129,995 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6410971879959106,
"alphanum_fraction": 0.6741050481796265,
"avg_line_length": 42.918365478515625,
"blob_id": "138b14a818534e68950573933d1427e90dac3f52",
"content_id": "afc6eeccffca4cfa9d0508cad79600f497ddd44e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 49,
"path": "/multiple_linear_regression.py",
"repo_name": "weldonyang/Multiple-Linear-Regression-Practice",
"src_encoding": "UTF-8",
"text": "# Multiple Linear Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Encoding categorical data\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [3])], remainder='passthrough')\nX = np.array(ct.fit_transform(X))\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n# Training the Multiple Linear Regression model on the Training set\n# sklearn will avoid dummy variable trap\n# sklearn will pick the model with the lowest p-value \nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# NOTE: Due to there being more than one independent variable, we cannot simply plot it on a 2D graph (in this case, we would need a 5D graph)\n# so we will display 2 vectors: 1. real profits of test set and \n# 2. predicted profits of test set \n\n# Predicting the Test set results\n# np.set_printoptions(precision=2) displays any numerical values with 2 decimals \n# np.concatenate() expects 2 args: 1. tuple of arrays \n# 2. axis=0 or 1 (0 means vertical concatenation, 1 means horizontal )\n# .reshape() is a function of regressor that takes 2 args: 1. # of rows\n# 2. # of columns and reshapes the vector\ny_pred = regressor.predict(X_test)\nnp.set_printoptions(precision=2) \nprint(np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)) \n\n# Making a single prediction example\n# R&D = 160,000\n# Admin = 130,000\n# Marketing = 300,000\n# State = California [1, 0, 0]\nprint(regressor.predict([[1, 0, 0, 160000, 130000, 300000]]))"
},
{
"alpha_fraction": 0.8372092843055725,
"alphanum_fraction": 0.8372092843055725,
"avg_line_length": 63.5,
"blob_id": "72adac1481d2aa4ee7bfcbbc16264273b109f572",
"content_id": "30dd8ea2233bf86a004ec314ba9ee8f69c15e441",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 2,
"path": "/README.md",
"repo_name": "weldonyang/Multiple-Linear-Regression-Practice",
"src_encoding": "UTF-8",
"text": "# Multiple Linear Regression Practice\n Creating MLR model comparing R&D spending against marketing spending by looking at profit\n"
}
] | 2 |
mertcangokgoz/TimeTable | https://github.com/mertcangokgoz/TimeTable | 4b5bf7d570aa553345047c69e063a5f3c93ab585 | dec52ef0fa365c55c215d88d4e9cbfa378d75c4b | ee7ad584d22fb55389ec1a57bea4cfef61f65fe9 | refs/heads/master | 2022-11-06T14:00:59.281723 | 2022-10-24T18:20:20 | 2022-10-24T18:20:20 | 44,543,941 | 1 | 2 | null | 2015-10-19T15:21:56 | 2022-06-29T05:31:19 | 2022-10-24T18:20:20 | HTML | [
{
"alpha_fraction": 0.6435414552688599,
"alphanum_fraction": 0.6508094072341919,
"avg_line_length": 25.321739196777344,
"blob_id": "c6a6718a9949476dd0c146edde8d488be1c28d37",
"content_id": "3dc56987c6e94dc89654e81c5b412405af4ed212",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3027,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 115,
"path": "/server.py",
"repo_name": "mertcangokgoz/TimeTable",
"src_encoding": "UTF-8",
"text": "# /usr/bin/python3.4\n__author__ = 'Mertcan Gokgoz'\n\nfrom flask import Flask, send_from_directory\nfrom flask import Response\nfrom flask import render_template\nfrom flask import request\nfrom functools import wraps\nimport pymysql\nimport json\nimport collections\n\ndatabase = pymysql.connect(\"localhost\", \"username\", \"password\", \"databasename\")\nconn = database.cursor()\n\n\ndef check_authenticate(username, password):\n return username == 'Mertcan' and password == 'admin123'\n\n\ndef authenticate():\n return Response(\n 'Authorized User Login Area!', 401,\n {'WWW-Authenticate': 'Basic realm=\"User Login\"'})\n\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth or not check_authenticate(auth.username, auth.password):\n return authenticate()\n return f(*args, **kwargs)\n\n return decorated\n\n\n# define static files path\napp = Flask(__name__, static_url_path='/static')\n\n\[email protected]('/static/<path:path>')\ndef send_js(path):\n return send_from_directory('static', path)\n\n\[email protected]('/')\ndef root():\n return render_template('index.html')\n\n\[email protected]('/Program')\ndef show_timetable():\n conn.execute(\"SELECT * FROM TimeTable\")\n data = conn.fetchall()\n return render_template(\"timetable.html\", data=data[::-1])\n\n\n# MySQL datas convert to JSON format\[email protected]('/Api')\ndef ApiTech():\n conn.execute(\"SELECT * FROM TimeTable\")\n data = conn.fetchall()\n objects_list = []\n for row in data:\n selected = collections.OrderedDict()\n selected['id'] = row[0]\n selected['Type'] = row[1]\n selected['LectureTime'] = row[2]\n selected['LectureTeacher'] = row[3]\n selected['Lesson'] = row[4]\n selected['Place'] = row[5]\n selected['Days'] = row[6]\n objects_list.append(selected)\n\n return json.dumps(objects_list)\n\n\[email protected]('/Program/Add')\n@requires_auth\ndef AreaFill():\n conn.execute(\"SELECT * FROM TimeTable\")\n data = conn.fetchall()\n return render_template(\"Add.html\", data=data[::-1])\n\n\n# data post method\[email protected](\"/Program/Add/Send\", methods=['POST'])\n@requires_auth\ndef AddPostTable():\n Types = str(request.form['type'])\n LectureTime = str(request.form['lecturetime'])\n LectureTeacher = str(request.form['lectureteacher'])\n Lesson = str(request.form['lesson'])\n Place = str(request.form['place'])\n Days = str(request.form['days'])\n\n conn.execute('''INSERT INTO TimeTable VALUES (NULL,%s,%s,%s,%s,%s,%s)''',\n [Types, LectureTime, LectureTeacher, Lesson, Place, Days])\n database.commit()\n return \"<script>document.location ='/Program/Add'</script>\"\n\n\n# data delete method\[email protected](\"/Program/Delete/<id>/\", methods=['GET'])\n@requires_auth\ndef DeletePostItems(id):\n conn.execute(f\"DELETE FROM TimeTable WHERE id={id}\")\n database.commit()\n return \"Deleted Item\"\n\n\nif __name__ == '__main__':\n # define flask run adress and port\n app.run(host=\"0.0.0.0\", port=int(\"8080\"), debug=False)\n"
},
{
"alpha_fraction": 0.6224138140678406,
"alphanum_fraction": 0.6353448033332825,
"avg_line_length": 30.351350784301758,
"blob_id": "996b14209dbb89a2c10ceea09b137aa1514874a7",
"content_id": "c74d2cffafd1d4878c14002bfb26eb503aa7d695",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 37,
"path": "/initDB.py",
"repo_name": "mertcangokgoz/TimeTable",
"src_encoding": "UTF-8",
"text": "# /usr/bin/python3.4\n__author__ = 'Mertcan Gokgoz'\n\nimport pymysql\nimport sys\n\ntry:\n # database connection string\n database = pymysql.connect(\"localhost\", \"username\", \"password\", \"databasename\")\n # prepare cursor object this metod\n cursor = database.cursor()\n # create database\n create = '''CREATE DATABASE databasename'''\n\n cursor.execute(create)\n\n # drop table if it already exist\n cursor.execute(\"DROP TABLE IF EXISTS TimeTable\")\n\n # SQL Command line\n sql = \"\"\"CREATE TABLE TimeTable (\n id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,\n Types VARCHAR(45) NOT NULL COMMENT '',\n LectureTime VARCHAR(45) NOT NULL COMMENT '',\n LectureTeacher VARCHAR(45) NOT NULL COMMENT '',\n Lesson VARCHAR(45) NOT NULL COMMENT '',\n Place VARCHAR(45) NOT NULL COMMENT '',\n Days VARCHAR(45) NOT NULL COMMENT '')\"\"\"\n\n # execute current sql command from create table\n cursor.execute(sql)\n # disconnect from the server\n database.close()\n print(\"Database and Table Created\")\nexcept Exception as e:\n print(\"\\n[ Error ]\\n\\t Error Message:\\t \", e, \"\\n\")\n sys.exit(1)\n"
},
{
"alpha_fraction": 0.644490659236908,
"alphanum_fraction": 0.7193347215652466,
"avg_line_length": 24.3157901763916,
"blob_id": "f931e565160ac2eea2a836698c9204c86fa186fe",
"content_id": "c8e8ded6c0ba5ffe850d4dd8417c4eb7902bf6e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 19,
"path": "/README.md",
"repo_name": "mertcangokgoz/TimeTable",
"src_encoding": "UTF-8",
"text": "Requirements\n======\nThe easiest way to install the latest version\nis by using pip , must have root access to your machine.\n\n pip3 install pymysql\n pip3 install flask\n\nPython 3.3 & 3.4 are supported.\n\nUsage\n====\nStart initDB.py and wait for \"Database Created\" now start server.py\n\nDefault Authorized User Login İnformation User: Mertcan Pass: admin123\n\n - Main Page - 127.0.0.1:8080/\n - Timetable Page - 127.0.0.1:8080/Program\n - Add Lecture - 127.0.0.1:8080/Program/Add\n"
}
] | 3 |
morio-kino/hello-world | https://github.com/morio-kino/hello-world | 47ebb930f94aaa71f380086f6ba8c79b0cb82623 | 5ad8783c7024915846f9c1321e2481dced3d4422 | 8f3ce5e6ec8ad127ecc81248faea9ef3d704bc2c | refs/heads/master | 2019-07-12T03:59:40.626825 | 2018-07-22T05:27:22 | 2018-07-22T05:27:22 | 65,377,365 | 2 | 0 | null | 2016-08-10T11:32:31 | 2016-08-10T11:32:31 | 2016-08-10T12:13:29 | null | [
{
"alpha_fraction": 0.500202476978302,
"alphanum_fraction": 0.5369320511817932,
"avg_line_length": 33.778873443603516,
"blob_id": "bf0aff7f6961d0219248d71169fe9028e2ef0bbe",
"content_id": "3b2d14151e9748805671d54915624268f5be982e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27926,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 710,
"path": "/V07/GenBoneData.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport math\nimport csv\nimport random\nimport bpy\nimport sys\nimport io\n\nimport ReadBoneData\n\n# アーマチュア情報\n#armature_name1 = 'Armature'\n#armature_name2 = 'Armature.001'\narmature_name1 = 'metarig'\narmature_name2 = 'Armature'\n\ndef alpha(max):\n return (random.randint(0,max*2) - max)\n\n# アーマチュアを移動するためのパラメタ\n# [\n# [<アーマチュア名>,\n# [[<開始フレーム番号の差分>, <最小位置>, <最大位置>, <移動の増分>], #X軸\n# [<開始フレーム番号の差分>, <最小位置>, <最大位置>, <移動の増分>], #Y軸\n# [<開始フレーム番号の差分>, <最小位置>, <最大位置>, <移動の増分>]]], #Z軸\n# :\n# ]\n#armature_move_params = [\n# ['metarig', [[alpha(30), -0.4, 0.4, 0.4/(30+alpha(3))],\n# [alpha(30), -0.4, 0.4, 0.4/(30+alpha(3))],\n# [alpha(30), -0.4, 0.4, 0.4/(30+alpha(3))]]]\n# ]\n\n# アーマチュアを回転するためのパラメタ\n# [\n# [<アーマチュア名>,\n# [[<開始フレーム番号の差分>, <最小角度>, <最大角度>, <角度の増分>], #X軸\n# [<開始フレーム番号の差分>, <最小角度>, <最大角度>, <角度の増分>], #Y軸\n# [<開始フレーム番号の差分>, <最小角度>, <最大角度>, <角度の増分>]]], #Z軸\n# :\n# ]\ndef get_armature_rotation_params():\n '''\n param = [\n ['metarig', [[alpha(30), 0, math.pi*2, math.pi/(50+alpha(5))],\n [alpha(30), 0, math.pi*2, math.pi/(30+alpha(5))],\n [alpha(30), 0, math.pi*2, math.pi/(40+alpha(5))]]]\n ]\n '''\n '''\n 回転の制限版\n param = [\n ['metarig', [[alpha(150), -math.pi/6, math.pi/8, math.pi/(50+alpha(5))], # X軸の回転を制限\n [alpha(150), -math.pi/8, math.pi/8, math.pi/(30+alpha(5))], # Y軸の回転を制限\n [alpha(150), -math.pi*1.5, math.pi*1.5, math.pi/(40+alpha(5))]]]\n ]\n '''\n '''\n 回転抑止版\n '''\n param = [\n ['metarig', [[0, 0, 0, 0], # X軸の回転を制限\n [0, 0, 0, 0], # Y軸の回転を制限\n [0, 0, 0, 0]]]\n ]\n return param\n\n# ボーンを動かすためのパラメタ\n# [\n# [<ボーン名>,\n# [[<開始フレーム番号の差分>, <最小角度>, <最大角度>, <角度の増分>], #X軸\n# [<開始フレーム番号の差分>, <最小角度>, <最大角度>, <角度の増分>], #Y軸\n# [<開始フレーム番号の差分>, <最小角度>, <最大角度>, <角度の増分>]]], #Z軸\n# :\n# ]\ndef get_bone_move_params():\n param = [\n ['spine', [[alpha(100), -0.2, 0.2, 0.4/(30+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.2, 0.2, 0.4/(30+alpha(3))]]] ,\n ['chest01', [[alpha(100), -0.2, 0.2, 0.4/(30+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.2, 0.2, 0.4/(30+alpha(3))]]] ,\n ['chest02', [[alpha(100), -0.2, 0.2, 0.4/(30+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.2, 0.2, 0.4/(30+alpha(3))]]] ,\n ['neck', [[alpha(100), -0.2, 0.2, 0.4/(30+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.2, 0.2, 0.4/(30+alpha(3))]]] ,\n ['head', [[alpha(100), -0.3, 0.3, 0.6/(30+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.3, 0.3, 0.6/(30+alpha(3))]]] ,\n ['upper_arm.L', [[alpha(100), -0.27, 1, 1/(25+alpha(3))], [0, 0, 0, 0],[alpha(30), -1, 0.75, 1/(25+alpha(3))]]] ,\n ['forearm.L', [[alpha(100), -0.173, 0.45, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.8, 0.23, 1/(15+alpha(3))]]] ,\n ['upper_arm.R', [[alpha(100), -0.27, 1, 1/(25+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.75, 1, 1/(25+alpha(3))]]] ,\n ['forearm.R', [[alpha(100), -0.173, 0.45, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), -0.23, 0.8, 1/(15+alpha(3))]]] ,\n\n # ['thigh.L', [[alpha(100), -0.3, 1, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), 0, 1, 1/(15+alpha(3))]]] ,\n ['thigh.L', [[alpha(100), -0.8, 0.8, 1/(15+alpha(3))], [alpha(100), -0.5, 0.5, 1/(14+alpha(3))], [alpha(30), -0.4, 1, 1/(13+alpha(3))]]] ,\n\n ['shin.L', [[alpha(100), -1, 0, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), 0, 0, 1/(15+alpha(3))]]] ,\n ['foot.L', [[alpha(100), -0.6, 0.4, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), 0, 0, 1/(15+alpha(3))]]] ,\n\n # ['thigh.R', [[alpha(100), -0.3, 1, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), -1, 0, 1/(15+alpha(3))]]] ,\n ['thigh.R', [[alpha(100), -0.8, 0.8, 1/(16+alpha(3))], [alpha(100), -0.5, 0.5, 1/(17+alpha(3))], [alpha(30), -1, 0.4, 1/(18+alpha(3))]]] ,\n\n ['shin.R', [[alpha(100), -1, 0, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), 0, 0, 1/(15+alpha(3))]]] ,\n ['foot.R', [[alpha(100), -0.6, 0.4, 1/(15+alpha(3))], [0, 0, 0, 0],[alpha(30), 0, 0, 1/(15+alpha(3))]]]\n ]\n return param\n\n# ベジェカーブの情報\ncurve_dict = {}\ncurve_dict['hips'] = ('BezierCurve_Head', 1)\ncurve_dict['spine'] = ('BezierCurve_Head', 2)\ncurve_dict['chest01'] = ('BezierCurve_Head', 3)\ncurve_dict['chest02'] = ('BezierCurve_Head', 4)\ncurve_dict['neck'] = ('BezierCurve_Head', 5)\ncurve_dict['head'] = ('BezierCurve_Head', 6)\n\ncurve_dict['shoulder.L'] = ('BezierCurve_Arm.L', 1)\ncurve_dict['upper_arm.L'] = ('BezierCurve_Arm.L', 2)\ncurve_dict['forearm.L'] = ('BezierCurve_Arm.L', 3)\n\ncurve_dict['shoulder.R'] = ('BezierCurve_Arm.R', 1)\ncurve_dict['upper_arm.R'] = ('BezierCurve_Arm.R', 2)\ncurve_dict['forearm.R'] = ('BezierCurve_Arm.R', 3)\n\ncurve_dict['hips.L'] = ('BezierCurve_Foot.L', 1)\ncurve_dict['thigh.L'] = ('BezierCurve_Foot.L', 2)\ncurve_dict['shin.L'] = ('BezierCurve_Foot.L', 3)\ncurve_dict['foot.L'] = ('BezierCurve_Foot.L', 4)\n\ncurve_dict['hips.R'] = ('BezierCurve_Foot.R', 1)\ncurve_dict['thigh.R'] = ('BezierCurve_Foot.R', 2)\ncurve_dict['shin.R'] = ('BezierCurve_Foot.R', 3)\ncurve_dict['foot.R'] = ('BezierCurve_Foot.R', 4)\n\n# ----------------------------------------------------------------------------------------------------\n\nconst_bone_list = ['head', 'forearm.L', 'foot.L', 'forearm.R', 'foot.R']\nconstraints_dict = {}\ndef remove_bone_constraints(armature_name):\n '''\n ボーンのコンストレイントを削除\n '''\n for const_bone in const_bone_list:\n bone = bpy.data.objects[armature_name].pose.bones[const_bone]\n constraints_dict[const_bone] = (bone.constraints[\"Spline IK\"].target, bone.constraints[\"Spline IK\"].chain_count)\n #bone.constraints.remove(bone.constraints[0])\n bone.constraints[\"Spline IK\"].target = None\n\ndef store_bone_constraints(armature_name):\n for const_bone in const_bone_list:\n bone = bpy.data.objects[armature_name].pose.bones[const_bone]\n target, chain_count = constraints_dict[const_bone]\n bone.constraints[\"Spline IK\"].target = target\n bone.constraints[\"Spline IK\"].chain_count = chain_count\n\n\ndef move_armature(frame_num, armature_name, param):\n '''\n bone_paramに従ってーマチュアの位置の変更。\n frame_num : フレーム番号\n armature_name : アーマチュア名\n param : アーマチュアパラメータ\n '''\n x_param = param[0]\n y_param = param[1]\n z_param = param[2]\n update_armature_location(frame_num, armature_name, 0, x_param)\n update_armature_location(frame_num, armature_name, 1, y_param)\n update_armature_location(frame_num, armature_name, 2, z_param)\n\ndef update_armature_location(frame_num, armature_name, index, param):\n '''\n paramに従ってアーマチュアのlocationを変更。\n frame_num : フレーム番号\n armature_name : アーマチュア名\n index : locationの番号\n param : パラメータ\n '''\n pad = param[0]\n min = param[1]\n max = param[2]\n inc = param[3]\n #bone = bpy.data.objects[armature_name].pose.bones[bone_name]\n frame_num_wk = frame_num + pad\n width = max - min\n if width <= 0:\n return None\n move_total_len = inc * frame_num_wk\n move_len = move_total_len % width\n if (move_total_len % (width*2)) >= width:\n bpy.data.objects[armature_name].location[index] = max - move_len\n else:\n bpy.data.objects[armature_name].location[index] = min + move_len\n\ndef rotation_armature(frame_num, armature_name, param):\n '''\n bone_paramに従ってアーマチュアのローテーションの変更。\n frame_num : フレーム番号\n armature_name : アーマチュア名\n param : アーマチュアパラメータ\n '''\n x_param = param[0]\n y_param = param[1]\n z_param = param[2]\n update_armature_rotation(frame_num, armature_name, 0, x_param)\n update_armature_rotation(frame_num, armature_name, 1, y_param)\n update_armature_rotation(frame_num, armature_name, 2, z_param)\n\ndef update_armature_rotation(frame_num, armature_name, index, param):\n '''\n paramに従ってアーマチュアのローテーションを変更。\n frame_num : フレーム番号\n armature_name : アーマチュア名\n index : rotationの番号\n param : パラメータ\n '''\n pad = param[0]\n min = param[1]\n max = param[2]\n inc = param[3]\n #bone = bpy.data.objects[armature_name].pose.bones[bone_name]\n frame_num_wk = frame_num + pad\n width = max - min\n if width <= 0:\n return None\n move_total_len = inc * frame_num_wk\n move_len = move_total_len % width\n if (move_total_len % (width*2)) >= width:\n bpy.data.objects[armature_name].rotation_euler[index] = max - move_len\n else:\n bpy.data.objects[armature_name].rotation_euler[index] = min + move_len\n\ndef update_bone(frame_num, bone_name, bone_param):\n '''\n bone_paramに従ってボーンのローテーションの変更。\n frame_num : フレーム番号\n bone_name : ボーン名\n bone_param : ボーンパラメータ\n '''\n x_param = bone_param[0]\n y_param = bone_param[1]\n z_param = bone_param[2]\n update_bone_quaternion(frame_num, bone_name, 1, x_param)\n update_bone_quaternion(frame_num, bone_name, 2, y_param)\n update_bone_quaternion(frame_num, bone_name, 3, z_param)\n #bone = bpy.data.objects[armature_name1].pose.bones[bone_name]\n\ndef update_bone_quaternion(frame_num, bone_name, quaternion, bone_param):\n '''\n bone_paramに従ってボーンのquaternionのローテーションを変更。\n frame_num : フレーム番号\n bone_name : ボーン名\n quaternion : quaternionの番号\n bone_param : ボーンパラメータ\n '''\n pad = bone_param[0]\n min = bone_param[1]\n max = bone_param[2]\n inc = bone_param[3]\n bone = bpy.data.objects[armature_name1].pose.bones[bone_name]\n frame_num_wk = frame_num + pad\n width = max - min\n if width <= 0:\n return None\n move_total_len = inc * frame_num_wk\n move_len = move_total_len % width\n if (move_total_len % (width*2)) >= width:\n bone.rotation_quaternion[quaternion] = max - move_len\n else:\n bone.rotation_quaternion[quaternion] = min + move_len\n\n# ----------------------------------------------------------------------------------------\ndef get_bone_len(armature_name, bone_name):\n '''\n ボーンの長さを取得\n '''\n return bpy.data.objects[armature_name].pose.bones[bone_name].length\n\ndef get_bone_world_pos(armature_name, bone_name):\n '''\n ボーンのヘッドとテールのワールド座標を取得\n '''\n matrix_world = bpy.data.objects[armature_name].matrix_world\n head_local = bpy.data.objects[armature_name].pose.bones[bone_name].head\n tail_local = bpy.data.objects[armature_name].pose.bones[bone_name].tail\n head_world = matrix_world * head_local\n tail_world = matrix_world * tail_local\n return (head_world, tail_world)\n\ndef move_to(pos, obj):\n '''\n pos で指定した座標分、objの座標を移動する\n '''\n m = np.array([[1, 0, 0, pos[0]],\n [0, 1, 0, pos[1]],\n [0, 0, 1, pos[2]],\n [0, 0, 0, 1 ]])\n new_pos = np.dot(m, obj)\n return new_pos\n\ndef move_back_to(pos, obj):\n '''\n pos で指定した座標分、objの座標を戻す\n '''\n m = np.array([[1, 0, 0, -pos[0]],\n [0, 1, 0, -pos[1]],\n [0, 0, 1, -pos[2]],\n [0, 0, 0, 1 ]])\n new_pos = np.dot(m, obj)\n return new_pos\n\ndef rotation_x(theta, obj):\n '''\n X軸を中心にthetaで指定した角度だけobjの座標を回転する\n '''\n rx = np.array([[ 1, 0 , 0 , 0],\n [ 0, np.cos(theta), -np.sin(theta), 0],\n [ 0, np.sin(theta), np.cos(theta), 0],\n [ 0, 0 , 0 , 1]])\n return np.dot(rx, obj)\n \ndef rotation_y(theta, obj):\n '''\n Y軸を中心にthetaで指定した角度だけobjの座標を回転する\n '''\n ry = np.array([[ np.cos(-theta), 0, np.sin(-theta), 0],\n [ 0 , 1, 0 , 0],\n [-np.sin(-theta), 0, np.cos(-theta), 0],\n [ 0 , 0, 0 , 1]])\n return np.dot(ry, obj)\n\ndef rotation_z(theta, obj):\n '''\n Z軸を中心にthetaで指定した角度だけobjの座標を回転する\n '''\n rz = np.array([[ np.cos(theta), -np.sin(theta), 0, 0],\n [ np.sin(theta), np.cos(theta), 0, 0],\n [ 0 , 0 , 1, 0],\n [ 0 , 0 , 0, 1]])\n return np.dot(rz, obj)\n\ndef get_y_rotation_from_z(obj):\n '''\n Y軸に対する-z方向からの角度を取得\n '''\n obj_x = obj[0]\n obj_z = obj[2]\n if obj_x == 0 and obj_z <= 0:\n theta = 0\n elif obj_x == 0 and obj_z > 0:\n theta = math.pi\n elif obj_x > 0 and obj_z >= 0:\n theta = math.pi/2 + np.arctan(abs(obj_z/obj_x))\n elif obj_x > 0 and obj_z < 0:\n theta = math.pi/2 - np.arctan(abs(obj_z/obj_x))\n elif obj_x < 0 and obj_z >= 0:\n theta = -math.pi/2 - np.arctan(abs(obj_z/obj_x))\n elif obj_x < 0 and obj_z < 0:\n theta = -math.pi/2 + np.arctan(abs(obj_z/obj_x))\n return theta\n\ndef get_x_rotation_from_z(obj):\n '''\n X軸に対する-z方向からの角度を取得\n '''\n obj_y = obj[1]\n obj_z = obj[2]\n if obj_y == 0 and obj_z <= 0:\n theta = 0\n elif obj_y == 0 and obj_z > 0:\n theta = math.pi\n elif obj_y > 0 and obj_z <= 0:\n theta = math.pi/2 - np.arctan(abs(obj_z/obj_y))\n elif obj_y > 0 and obj_z > 0:\n theta = math.pi/2 + np.arctan(abs(obj_z/obj_y))\n elif obj_y < 0 and obj_z <= 0:\n theta = -math.pi/2 + np.arctan(abs(obj_z/obj_y))\n elif obj_y < 0 and obj_z > 0:\n theta = -math.pi/2 - np.arctan(abs(obj_z/obj_y))\n return theta\n\ndef get_x_rotation_from_y(obj):\n '''\n X軸に対する-y方向からの角度を取得\n '''\n obj_y = obj[1]\n obj_z = obj[2]\n if obj_z == 0 and obj_y <= 0:\n theta = 0\n elif obj_z == 0 and obj_y > 0:\n theta = math.pi\n elif obj_z > 0 and obj_y <= 0:\n theta = - np.arctan(abs(obj_z/obj_y))\n elif obj_z > 0 and obj_y > 0:\n theta = -math.pi + np.arctan(abs(obj_z/obj_y))\n elif obj_z < 0 and obj_y <= 0:\n theta = np.arctan(abs(obj_z/obj_y))\n elif obj_z < 0 and obj_y > 0:\n theta = math.pi - np.arctan(abs(obj_z/obj_y))\n return theta\n\n\ndef get_z_rotation_from_y(obj):\n '''\n Z軸に対する-y 方向からの角度を取得\n '''\n obj_x = obj[0]\n obj_y = obj[1]\n if obj_x == 0 and obj_y <= 0:\n theta = 0\n elif obj_x == 0 and obj_y > 0:\n theta = math.pi\n elif obj_x > 0 and obj_y <= 0:\n theta = math.pi * 1/2 - np.arctan(abs(obj_y/obj_x))\n elif obj_x > 0 and obj_y > 0:\n theta = math.pi * 1/2 + np.arctan(abs(obj_y/obj_x))\n elif obj_x < 0 and obj_y <= 0:\n theta = -math.pi * 1/2 + np.arctan(abs(obj_y/obj_x))\n elif obj_x < 0 and obj_y > 0:\n theta = -math.pi * 1/2 - np.arctan(abs(obj_y/obj_x))\n return theta\n\ndef get_theta1(base, obj, camera):\n '''\n カメラから見たbaseを基準とするobjの角度を取得する。次の情報を返す。\n ・objのY軸に対する-z方向からの角度\n ・objのX軸に対する-z方向からの角度, \n ・カメラのY軸に対する-z方向からの角度\n ・カメラのX軸に対する-z方向からの角度, \n '''\n o0 = move_back_to(base.to_4d(), obj.to_4d())\n c0 = move_back_to(base.to_4d(), camera.to_4d())\n c_rz = get_z_rotation_from_y(c0)\n o1 = rotation_z(-c_rz, o0)\n c1 = rotation_z(-c_rz, c0)\n c_rx = get_x_rotation_from_y(c1)\n o2 = rotation_x(-c_rx, o1)\n o_ry = get_y_rotation_from_z(o2)\n o3 = rotation_y(-o_ry, o2)\n o_rx = get_x_rotation_from_z(o3)\n return o_ry, o_rx, c_rz, c_rx\n\ndef get_vector1(base, obj, len, camera):\n '''\n カメラから見たbaseを基準とするobjの角度を取得する。次の情報を返す。\n ・objのY軸に対する-z方向からの角度\n ・objのX軸に対する-z方向からの角度, \n ・カメラのY軸に対する-z方向からの角度\n ・カメラのX軸に対する-z方向からの角度, \n '''\n o0 = move_back_to(base.to_4d(), obj.to_4d())\n c0 = move_back_to(base.to_4d(), camera.to_4d())\n c_rz = get_z_rotation_from_y(c0)\n o1 = rotation_z(-c_rz, o0)\n c1 = rotation_z(-c_rz, c0)\n c_rx = get_x_rotation_from_y(c1)\n o2 = rotation_x(-c_rx, o1)\n #o_ry = get_y_rotation_from_z(o2)\n #o3 = rotation_y(-o_ry, o2)\n #o_rx = get_x_rotation_from_z(o3)\n o3 = o2 / len\n return o3\n\ndef get_theta2(base_4d, obj_4d):\n '''\n base を基準としてobjの角度を取得する。次の情報を返す。\n ・objのZ軸に対する-y方向からの角度\n ・objのX軸に対する-y方向からの角度, \n '''\n o0 = move_back_to(base_4d, obj_4d)\n o_rz = get_z_rotation_from_y(o0)\n o1 = rotation_z(-o_rz, o0)\n o_rx = get_x_rotation_from_y(o1)\n return o_rz, o_rx\n\ndef get_bone_data(armature_name, bone_name):\n '''\n ボーンの次の情報を取得する。\n ・ボーンの長さ\n ・headのワールド座標\n ・tailのワールド座標\n ・カメラのワールド座標\n ・tailのY軸に対する-z方向からの角度\n ・tailのX軸に対する-z方向からの角度, \n ・カメラのY軸に対する-z方向からの角度\n ・カメラのX軸に対する-z方向からの角度, \n '''\n l = get_bone_len(armature_name, bone_name)\n h, t = get_bone_world_pos(armature_name, bone_name)\n c = bpy.context.scene.camera.location\n vec = get_vector1(h, t, l, c)\n return (l, h, t, c, vec)\n\ndef move_bezier_point(curve_name, point_index, position):\n '''\n ベジェカーブの指定したインデックスのポイントを、指定した座標に移動する。\n '''\n bpy.data.curves[curve_name].splines[0].bezier_points[point_index].co = position\n bpy.data.curves[curve_name].splines[0].bezier_points[point_index].handle_left = position\n bpy.data.curves[curve_name].splines[0].bezier_points[point_index].handle_right = position\n\n\ndef save_all_bone_data(csv_file_path, frame_num):\n '''\n 全てのボーンの現在の状態をCSVファイルに保存する。\n '''\n csv_data = []\n csv_data.append(frame_num)\n for bone_name, bone in bpy.data.armatures[armature_name2].bones.items():\n #if bone.parent:\n # parent_name = bone.parent.name\n #else:\n # parent_name = '<none>'\n\n # ボーンの情報を取得\n bone_data = get_bone_data(armature_name1, bone_name)\n len, head, tail, camera, vec = bone_data\n csv_data.append(bone_name)\n csv_data.append(vec[0])\n csv_data.append(vec[1])\n csv_data.append(vec[2])\n with open(csv_file_path, 'a', newline='') as f:\n # CSVファイルへ書き出す\n writer = csv.writer(f)\n writer.writerow(csv_data)\n\n return csv_data\n\n\ndef render(blender_file, out_dir_path, file_prefix, csv_file_path, frame_num_max):\n '''\n レンダリングし、データを作成する。\n blender_file : レンダリングに使用するBlenderファイルパス\n out_dir : 出力先ディレクトリパス\n file_prefix : 出力ファイルプレフィックス\n '''\n # 現在のシステム時刻で乱数のシード設定\n random.seed()\n\n # パラメタの生成\n armature_rotation_params = get_armature_rotation_params()\n bone_move_params = get_bone_move_params()\n\n # Blender ファイルのオープン\n bpy.ops.wm.open_mainfile(filepath=blender_file)\n \n # -------------------------------------------------------------------------------------------\n # ボーンのコンストレイントを削除\n # remove_bone_constraints(armature_name1)\n \n bone_num = 0\n bone_dict = {}\n bone_list = []\n\n # CSVのヘッダー情報出力\n with open(csv_file_path, 'w', newline='') as f:\n bone_header = []\n bone_header.append('[Frame No.]')\n for bone_name, bone in bpy.data.armatures[armature_name2].bones.items():\n if bone.parent == None:\n parent_name = '<none>'\n else:\n parent_name = bone.parent.name\n bone_len = str(bone.length)\n\n bone_list.append((bone.name, parent_name, bone_len))\n bone_dict[bone_name] = (bone_name, parent_name, bone_len)\n bone_num += 1\n\n bone_header.append('[Bone' + str(bone_num) + ' name]')\n bone_header.append('[Bone' + str(bone_num) + ' vecX]')\n bone_header.append('[Bone' + str(bone_num) + ' vecY]')\n bone_header.append('[Bone' + str(bone_num) + ' vecZ]')\n writer = csv.writer(f)\n writer.writerow(['[Bone num]', str(len(bone_list))])\n writer.writerow(['[Bone name]', '[Parent name]', '[Bone length]'])\n for bone_info in bone_list:\n writer.writerow(bone_info)\n writer.writerow(bone_header)\n \n image_num = 0\n for frame_num in range(int(frame_num_max / 4 + 1)):\n # ボーンのコンストレイントを削除\n remove_bone_constraints(armature_name1)\n\n # アーマチュアの移動\n #for armature_move_data in armature_move_params:\n # move_armature(frame_num, armature_move_data[0], armature_move_data[1])\n #\n # アーマチュアの回転\n for armature_rotation_data in armature_rotation_params:\n rotation_armature(frame_num, armature_rotation_data[0], armature_rotation_data[1])\n # ボーンの移動\n for bone_move_data in bone_move_params:\n update_bone(frame_num, bone_move_data[0], bone_move_data[1])\n\n # Lamp の移動(没)\n #bpy.data.objects['Lamp'].location.x = random.uniform(-5,5)\n #bpy.data.objects['Lamp'].location.y = random.uniform(-5,2)\n #bpy.data.objects['Lamp'].location.z = random.uniform(-5,5)\n # Lamp の移動 (没)\n #bpy.data.objects['Lamp'].location.x = random.uniform(-5,5) # 左右は自由に移動\n #bpy.data.objects['Lamp'].location.y = random.uniform(-5,-0.2) # 前後は体より手前のみ\n #bpy.data.objects['Lamp'].location.z = random.uniform( 2,5) # 上下は顔より上のみ\n # Lamp は移動しない\n\n # レンダリング\n bpy.ops.render.render()\n\n # 画像の保存\n out_file_path = out_dir_path + \"/\" + file_prefix + \"{0:05d}.png\".format(image_num)\n bpy.data.images['Render Result'].save_render(filepath=out_file_path)\n\n # ボーンの情報を保存\n frame_data = save_all_bone_data(csv_file_path, image_num)\n\n image_num = image_num + 1\n\n ###################################################\n # ボーンのY座標を変換したデータの生成処理\n ###################################################\n # 変更対象のボーン名\n bone_1_name = 'thigh.L'\n bone_2_name = 'thigh.R'\n\n # rootの位置を取得\n root_pos, tail_pos = ReadBoneData.get_bone_world_pos(\"metarig\", \"hips\")\n\n # bone_1 および bone_2 の情報を取得\n index = 1\n for bone_index in range(bone_num):\n bone_name = frame_data[index]\n x = frame_data[index+1]\n y = frame_data[index+2]\n z = frame_data[index+3]\n if bone_name == bone_1_name:\n bone_1_y = y\n bone_1_index = index\n if bone_name == bone_2_name:\n bone_2_y = y\n bone_2_index = index\n index = index + 4\n\n #print(\"bone_1_index : \" + str(bone_1_index))\n #print(\"bone_1_y : \" + str(bone_1_y))\n #print(\"bone_2_index : \" + str(bone_2_index))\n #print(\"bone_2_y : \" + str(bone_2_y))\n\n # ボーンのコンストレイントを復元\n store_bone_constraints(armature_name1)\n\n #####################################\n # bone_name_1 のYを反転したデータを作成\n frame_data[bone_1_index+2] = bone_1_y * (-1)\n frame_data[bone_2_index+2] = bone_2_y\n ReadBoneData.restore_frame_data(bone_num, bone_dict, frame_data, root_pos=root_pos)\n # レンダリング\n bpy.ops.render.render()\n # 画像の保存\n out_file_path = out_dir_path + \"/\" + file_prefix + \"{0:05d}.png\".format(image_num)\n bpy.data.images['Render Result'].save_render(filepath=out_file_path)\n # ボーンの情報を保存\n frame_data = save_all_bone_data(csv_file_path, image_num)\n\n image_num = image_num + 1\n\n #####################################\n # bone_name_2 のYを反転したデータを作成\n frame_data[bone_1_index+2] = bone_1_y\n frame_data[bone_2_index+2] = bone_2_y * (-1)\n ReadBoneData.restore_frame_data(bone_num, bone_dict, frame_data, root_pos=root_pos)\n # レンダリング\n bpy.ops.render.render()\n # 画像の保存\n out_file_path = out_dir_path + \"/\" + file_prefix + \"{0:05d}.png\".format(image_num)\n bpy.data.images['Render Result'].save_render(filepath=out_file_path)\n # ボーンの情報を保存\n frame_data = save_all_bone_data(csv_file_path, image_num)\n\n image_num = image_num + 1\n\n #####################################\n # bone_name_1 および bone_name_2 のYを反転したデータを作成\n frame_data[bone_1_index+2] = bone_1_y * (-1)\n frame_data[bone_2_index+2] = bone_2_y * (-1)\n ReadBoneData.restore_frame_data(bone_num, bone_dict, frame_data, root_pos=root_pos)\n # レンダリング\n bpy.ops.render.render()\n # 画像の保存\n out_file_path = out_dir_path + \"/\" + file_prefix + \"{0:05d}.png\".format(image_num)\n bpy.data.images['Render Result'].save_render(filepath=out_file_path)\n # ボーンの情報を保存\n frame_data = save_all_bone_data(csv_file_path, image_num)\n\n image_num = image_num + 1\n\n if image_num >= frame_num_max:\n break\n\nif __name__ == '__main__':\n #######################################################################\n # main proc\n #######################################################################\n blender_file_path = '/media/kino/HD-LBU2/temp/HumanMale.blend'\n \n # 出力先情報\n out_path = '/media/kino/HD-LBU2/temp/OUT_DIR/MyData/train/GenData'\n prefix = 'test_'\n csv_path = out_path + '/bone_info.csv'\n frame_num = 100\n \n #sys.stdout = io.StringIO()\n #sys.stderr = io.StringIO()\n render(blender_file_path, out_path, prefix, csv_path, frame_num)\n #sys.stdout = sys.__stdout__\n #sys.stderr = sys.__stderr__\n\n"
},
{
"alpha_fraction": 0.5594485998153687,
"alphanum_fraction": 0.6182270646095276,
"avg_line_length": 44.02586364746094,
"blob_id": "2020bf16eae96d1821025cc80632b5c620e3a353",
"content_id": "f89587b149e2922cf23816b214c5643510645bf9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5291,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 116,
"path": "/V09.2/model_1_bone.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom keras.models import Sequential, Model\nimport keras.layers\nfrom keras.layers import Input, Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D, Concatenate\nfrom keras import optimizers\nfrom keras import regularizers\n\nfrom image_data import img_width, img_height, img_channel\nfrom image_data import img_monochrome, img_normalize\n\nimport time\n\n# 入力データに関する情報\n\n\n\n# 正則化の値\nL1 = 0.001\nL2 = 0.001\n\n\ndef create_model(weight_file, target_bone_num):\n '''\n モデルの作成\n '''\n\n\n ##### input_main\n input_main = Input(shape=(img_width, img_height, img_channel), dtype='float', name='main_input')\n\n #conv_main_10 = Conv2D(1, (1, 1), padding='same', kernel_initializer='ones', name='conv_main_11')(input_main)\n conv_main_11 = Conv2D(8, (3, 3), padding='same', kernel_initializer='he_normal', name='conv_main_10')(input_main)\n #conv_main_1 = keras.layers.add([conv_main_10, conv_main_11])\n conv_main_1 = Dropout(0.25)(conv_main_11)\n\n #conv_main_20 = Conv2D(2, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_main_21')(conv_main_1)\n conv_main_21 = Conv2D(16, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_main_20')(conv_main_1)\n #conv_main_2 = keras.layers.add([conv_main_20, conv_main_21])\n conv_main_2 = Dropout(0.25)(conv_main_21)\n\n #conv_main_30 = Conv2D(3, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_main_31')(conv_main_2)\n conv_main_31 = Conv2D(32, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_main_30')(conv_main_2)\n #conv_main_3 = keras.layers.add([conv_main_30, conv_main_31])\n conv_main_3 = Dropout(0.25)(conv_main_31)\n\n #conv_main_40 = Conv2D(4, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_main_41')(conv_main_2)\n conv_main_41 = Conv2D(64, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_main_40')(conv_main_3)\n #conv_main_4 = keras.layers.add([conv_main_40, conv_main_41])\n conv_main_4 = Dropout(0.25)(conv_main_41)\n\n flat_main = Flatten()(conv_main_4)\n\n ##### input_data\n input_data = Input(shape=(img_width, img_height, img_channel), dtype='float', name='data_input')\n\n #conv_data_10 = Conv2D(16, (1, 1), padding='same', kernel_initializer='ones', name='conv_data_11')(input_data)\n conv_data_11 = Conv2D(16, (3, 3), padding='same', kernel_initializer='he_normal', name='conv_data_10')(input_data)\n #conv_data_1 = keras.layers.add([conv_data_10, conv_data_11])\n conv_data_1 = Dropout(0.25)(conv_data_11)\n\n #conv_data_20 = Conv2D(32, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_data_21')(conv_data_1)\n conv_data_21 = Conv2D(32, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_data_20')(conv_data_1)\n #conv_data_2 = keras.layers.add([conv_data_20, conv_data_21])\n conv_data_2 = Dropout(0.25)(conv_data_21)\n\n #conv_data_30 = Conv2D(64, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_data_31')(conv_data_2)\n conv_data_31 = Conv2D(64, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_data_30')(conv_data_2)\n #conv_data_3 = keras.layers.add([conv_data_30, conv_data_31])\n conv_data_3 = Dropout(0.25)(conv_data_31)\n\n #conv_data_40 = Conv2D(128, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_data_41')(conv_data_2)\n conv_data_41 = Conv2D(128, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_data_40')(conv_data_3)\n #conv_data_4 = keras.layers.add([conv_data_40, conv_data_41])\n conv_data_4 = Dropout(0.25)(conv_data_41)\n\n flat_data = Flatten()(conv_data_4)\n\n #### Concatenate\n flat = keras.layers.concatenate([flat_main, flat_data], axis=-1)\n\n #### Dense\n outputs = []\n los_weights = []\n for i in range(target_bone_num):\n dens_1 = Dense(256, activation='relu', init='he_normal', name='bone'+str(i)+'_dens_1')(flat)\n dens_1 = Dropout(0.5)(dens_1)\n\n dens_2 = Dense(256, activation='relu', init='he_normal', name='bone'+str(i)+'_dens_2')(dens_1)\n dens_2 = Dropout(0.5)(dens_2)\n \n dens_out = Dense(3, activation='linear', init='he_normal', name='bone'+str(i)+'_dens_3')(dens_2)\n\n outputs.append(dens_out)\n los_weights.append(1.0)\n\n model = Model(inputs=[input_main, input_data], outputs=outputs)\n\n if weight_file != None:\n # 学習済みウエイトのロード\n print('=======================================')\n print('load weights from ' + weight_file)\n print('=======================================')\n model.load_weights(weight_file)\n\n print(\"###### Model Summary ######\")\n model.summary()\n #time.sleep(2)\n #system.exit(1)\n\n model.compile(loss='mean_squared_error',\n loss_weights=los_weights,\n optimizer=optimizers.Adam(),\n metrics=['accuracy'])\n\n return model\n"
},
{
"alpha_fraction": 0.48954489827156067,
"alphanum_fraction": 0.5104551315307617,
"avg_line_length": 35.401493072509766,
"blob_id": "445c461ed95b417832ab3c3ed525c74a83e435b4",
"content_id": "674d906e6755412cc914609f7d14939c40c4581a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25786,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 670,
"path": "/20171203/l_and_p.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport bpy\nimport csv\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport sys\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D\nfrom keras.layers.recurrent import SimpleRNN\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras import regularizers\n\nimport myutil\nimport myfilter\nimport GenBoneData\n\nimport subprocess\n\n#OUT_DIR = '/root/OUT_DIR'\n#OUT_DIR = '/media/kino/HD-LBU2/MyPrograms/Docker/AutoTraining/V03/OUT_DIR'\nOUT_DIR = '/media/kino/HD-LBU2/temp/OUT_DIR'\n\nLAVEL_FILE_PATH='./bone_info.csv'\n\n# 入力データに関する情報\nimg_monochrome = True\nimg_normalize = True\nimg_width, img_height = 256, 256\ntrain_data_dir = os.path.join(OUT_DIR, 'MyData/train/GenData')\nif not os.path.isdir(train_data_dir):\n os.makedirs(train_data_dir)\n\ntrain_data_prefix = 'test_'\nvalidation_data_dir = os.path.join(OUT_DIR, 'MyData/validation')\nif not os.path.isdir(validation_data_dir):\n os.makedirs(validation_data_dir)\n\ntarget_bone_name = 'ALL'\n#target_bone_name = 'forearm.R'\n\nif target_bone_name == 'ALL':\n # bone_num = 64\n bone_num = 20\nelse:\n bone_num = 1\n\nif img_monochrome == True:\n img_channel = 1\nelse:\n img_channel = 3\n \n# イメージを変換するフィルタの設定\nimg_filters1 = [\n #myfilter.gen_img_resize_filter(256, 256),\n #myfilter.gen_img_zoom_filter(1),\n #myfilter.gen_img_move_filter(0, 0)\n myfilter.gen_img_region_filter(),\n ]\n\ndata_filters1 = [\n myfilter.gen_monochrome_filter(),\n myfilter.gen_normalize_filter()\n ]\n\nimg_filters2 = [\n #myfilter.gen_img_resize_filter(256, 256),\n #myfilter.gen_img_zoom_filter(1),\n #myfilter.gen_img_move_filter(0, 0)\n myfilter.gen_img_region_filter(),\n ]\ndata_filters2 = [\n myfilter.gen_monochrome_filter(),\n #myfilter.gen_nega_filter(), # ネガは没\n myfilter.gen_normalize_filter()\n ]\n\n\n# 学習データ生成に関する情報\ncsv_file_path = os.path.join(train_data_dir, 'bone_info.csv')\n\n# 出力に関するデータ\nresult_dir = os.path.join(OUT_DIR, 'results')\nif not os.path.isdir(result_dir):\n os.makedirs(result_dir)\n\nhistory_file_path = os.path.join(result_dir, 'history.csv')\n\n# モデルに関するデータ\ntimestep = 1 # ★汎化性能のために1でやってみる\n\n# 学習に関するデータ\nbatch_size = 100\n\n# 学習データの生成方法\ndata_gen_auto = True\n# 学習データに関するデータ\ndata_start_index = 0 # 学習に使用するデータの先頭インデックス 0\nif data_gen_auto == True:\n data_end_index = 100 # 学習に使用するデータの末尾インデックス ★動的学習データを使用する場合は100とする\nelse:\n data_end_index = 1000 # 学習に使用するデータの末尾インデックス ★静的学習データを使用する場合は1000とする\n\n# 検証データのインデックス\nsample_index = 0\n\n# 正則化の値\nL1 = 0.001\nL2 = 0.001\n\n\ndef create_cnn_model(weight_file=None):\n '''\n モデルの作成\n '''\n model_com = Sequential()\n model_com.add(Conv2D(32, (3, 3), input_shape=(img_width, img_height, img_channel)\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_com.add(Activation('relu'))\n model_com.add(MaxPooling2D(pool_size=(2, 2)))\n model_com.add(Dropout(0.25))\n\n model_com.add(Conv2D(32, (3, 3)\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_com.add(Activation('relu'))\n model_com.add(MaxPooling2D(pool_size=(2, 2)))\n model_com.add(Dropout(0.25))\n\n model_com.add(Conv2D(64, (3, 3)\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_com.add(Activation('relu'))\n model_com.add(MaxPooling2D(pool_size=(2, 2)))\n model_com.add(Dropout(0.25))\n\n model_com.add(Flatten())\n model_com.add(Dense(3042, activation='relu', init='he_normal'\n #model_com.add(Dense(2048, activation='relu', init='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n #model_com.add(Dropout(0.5))\n\n models = []\n los_weights = []\n for i in range(bone_num):\n model_bone = Sequential()\n model_bone.add(model_com)\n\n model_bone.add(Dense(512, activation='relu', init='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_bone.add(Dropout(0.5))\n\n model_bone.add(Dense(512, activation='relu', init='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_bone.add(Dropout(0.5))\n\n model_bone.add(Dense(3, activation='linear', init='he_normal')) # ??????? tanh or linear\n\n models.append(model_bone.output)\n los_weights.append(1.0)\n\n model = Model(input=model_com.input, output=models)\n\n\n if weight_file != None:\n # 学習済みウエイトのロード\n print('=======================================')\n print('load weights from ' + weight_file)\n print('=======================================')\n model.load_weights(weight_file)\n\n model.summary()\n\n model.compile(loss='mean_squared_error',\n loss_weights=los_weights,\n optimizer=optimizers.Adam(),\n metrics=['accuracy'])\n\n return model\n\ndef create_model(weight_file=None):\n '''\n モデルの作成\n '''\n\n # データのshape\n img_shape = (img_width, img_height, img_channel)\n timestep_shape = (timestep, img_width, img_height, img_channel)\n\n # 入力のCNN層\n model_conv = Sequential()\n #model_conv.add(Conv2D(32, (3, 3), input_shape=img_shape, batch_size=batch_size))\n\n #model_conv.add(Conv2D(8, (3, 3), input_shape=img_shape, batch_size=batch_size))\n model_conv.add(Conv2D(8, (3, 3), input_shape=img_shape))\n\n model_conv.add(Activation('relu'))\n #model_conv.add(MaxPooling2D(pool_size=(2, 2)))\n #model_conv.add(Dropout(0.25)) # 2017/9/10\n\n #model_conv.add(Conv2D(32, (3, 3)))\n model_conv.add(Conv2D(8, (3, 3)))\n model_conv.add(Activation('relu'))\n #model_conv.add(MaxPooling2D(pool_size=(2, 2)))\n #model_conv.add(Dropout(0.25)) # 2017/9/10\n\n #model_conv.add(Conv2D(64, (3, 3)))\n model_conv.add(Conv2D(16, (3, 3)))\n model_conv.add(Activation('relu'))\n #model_conv.add(MaxPooling2D(pool_size=(2, 2)))\n #model_conv.add(Dropout(0.25)) # 2017/9/10\n\n model_conv.add(Flatten())\n\n #print('model_conv:', model_conv)\n\n # 入力のCNN層をtimestep数分繋いでflatにする\n model_com = Sequential()\n\n #model_com.add(TimeDistributed(model_conv, input_shape=timestep_shape, batch_size=batch_size))\n model_com.add(TimeDistributed(model_conv, input_shape=timestep_shape))\n\n model_com.add(Flatten())\n\n model_com.add(Dense(512, activation='relu', kernel_initializer='he_normal'\n #,kernel_regularizer=regularizers.l2(L2) # 2017/9/10\n #,activity_regularizer=regularizers.l1(L1) # 2017/9/10\n ))\n #model_com.add(Dropout(0.5)) # 2017/9/10\n #model_com.add(Dense(512, activation='relu', kernel_initializer='he_normal'))\n #model_com.add(Dropout(0.5))\n\n #print('model_com:', model_com)\n\n models = []\n los_weights = []\n for i in range(bone_num*3):\n model_bone = Sequential()\n model_bone.add(model_com)\n #model_bone.add(Dense(512, activation='relu', kernel_initializer='he_normal'))\n #model_bone.add(Dropout(0.5)) # 2017/9/10\n model_bone.add(Dense(128, activation='relu', kernel_initializer='he_normal'\n #,kernel_regularizer=regularizers.l2(L2) # 2017/9/10\n #,activity_regularizer=regularizers.l1(L1) # 2017/9/10\n ))\n #model_bone.add(Dropout(0.5)) # 2017/9/10\n model_bone.add(Dense(1, activation='linear'\n #,kernel_regularizer=regularizers.l2(L2) # 2017/9/10\n #,activity_regularizer=regularizers.l1(L1) # 2017/9/10\n ))\n\n models.append(model_bone.output)\n los_weights.append(1.0)\n\n model = Model(inputs=model_com.input, outputs=models)\n #model.summary()\n\n if weight_file != None:\n # 学習済みウエイトのロード\n print('=======================================')\n print('load weights from ' + weight_file)\n print('=======================================')\n model.load_weights(weight_file)\n\n\n model.compile(loss='mean_squared_error',\n loss_weights=los_weights,\n optimizer=optimizers.Adam(),\n metrics=['accuracy'])\n\n return model\n\ntrain_dir_index = 0\ndef get_train_data_static():\n '''\n トレーニング用データの生成(既に生成済みの画像から生成する)\n '''\n\n # トレーニング用のデータのディレクトリのリスト\n train_data_dirs = [\n './MyData/train/Other',\n # './MyData/train/Male',\n './MyData/train/Female',\n './MyData/train/MaleNoClothes',\n './MyData/train/FemaleNoClothes'\n ]\n\n # 次に使用するBlenderファイル名を決定\n global train_dir_index\n train_data_dir = train_data_dirs[train_dir_index]\n train_dir_index += 1\n if train_dir_index >= len(train_data_dirs):\n train_dir_index = 0\n\n # データの生成\n train_datagen = myutil.MyDataGenerator(train_data_dir, width=img_width, height=img_height)\n train_list = []\n for i in range(data_start_index, data_end_index, batch_size):\n x_train, y_train = train_datagen.gen_data(i, i+batch_size,\n target_bone=target_bone_name,\n divided=True,\n img_filters=img_filters1,\n data_filters=data_filters1)\n\n train_list.append((x_train, y_train))\n\n return (train_list, train_data_dir)\n\ntrain_blender_index = 0\ndef get_train_data_auto():\n '''\n トレーニング用データの生成(動的に画像を生成する)\n '''\n\n # トレーニング用のBlenderファイルのリスト\n train_blender_files = [\n #('./HumanMale.blend', img_filters1, data_filters1), 検証用\n #(None, img_filters2, data_filters2),\n ('./Robot_1.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./HumanOther.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./BraidHair.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./Robot_2.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./CasualSuit.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./MaleSuit.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./Robot_3.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./HumanFemale.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./HumanMaleNoClothes.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./Robot_2.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./HumanFemaleNoClothes.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ]\n\n # 次に使用するBlenderファイル名を決定\n global train_blender_index\n (train_blender_file, img_filters, data_filters, img_mirror_x) = train_blender_files[train_blender_index]\n train_blender_index += 1\n if train_blender_index >= len(train_blender_files):\n train_blender_index = 0\n\n if train_blender_file != None:\n # データの生成\n GenBoneData.render(train_blender_file, train_data_dir, train_data_prefix, csv_file_path, data_end_index - data_start_index)\n else:\n train_blender_file = \"\"\n\n # トレーニング用のデータ\n train_datagen = myutil.MyDataGenerator(train_data_dir, width=img_width, height=img_height)\n train_list = []\n for i in range(data_start_index, data_end_index, batch_size):\n x_train, y_train = train_datagen.gen_data(i, i+batch_size,\n target_bone=target_bone_name,\n divided=True,\n img_mirror_x=img_mirror_x,\n img_filters=img_filters,\n data_filters=data_filters)\n train_list.append((x_train, y_train))\n\n return (train_list, train_blender_file)\n\n\ndef learning(weight_file):\n '''\n 学習を実行\n weight_file\n None : 重みを初期値から学習する\n その他 : 指定された重みファイルからロードしてから学習を続行する\n '''\n # モデル作成\n model = create_cnn_model(weight_file) \n\n epoch_counter = 0\n while True:\n # トレーニングデータの取得\n if data_gen_auto == True:\n train_list, train_data_name = get_train_data_auto()\n else:\n train_list, train_data_name = get_train_data_static()\n \n # 検証用のデータ\n vali_datagen = myutil.MyDataGenerator(validation_data_dir, width=img_width, height=img_height)\n x_vali, y_vali = vali_datagen.gen_data(sample_index, sample_index+batch_size,\n target_bone=target_bone_name,\n divided=True,\n img_filters=img_filters1,\n data_filters=data_filters1)\n \n # 具体的な誤差を表示するためのデータ\n w_vali = np.array(x_vali).reshape(batch_size, img_width, img_height, img_channel)\n\n # テストデータのシーケンスが変わったのでRNNレイヤーのステータスをクリア\n # model.reset_states()\n \n start_no = 1\n for x_train, y_train in train_list:\n #w_train = np.array(x_train[sample_index:sample_index+batch_size]).reshape(batch_size, timestep, img_width, img_height, img_channel)\n w_train = np.array(x_train[sample_index]).reshape(1, img_width, img_height, img_channel)\n epoch_counter+=1\n print(\"==========================================================\")\n print(\"# Epoch : \" + str(epoch_counter) + \n \" [\" + train_data_name + \"](\" + str(start_no) + \" - \" + str(start_no+batch_size-1) + \")\")\n start_no += batch_size\n # Fine-tuning\n history = model.fit(x_train, y_train, epochs=1, batch_size=batch_size, validation_data=(x_vali, y_vali), verbose=0)\n \n # モデルの重み情報のセーブ\n '''\n dt = datetime.now()\n dt_str = dt.strftime('%Y%m%d_%H%M%S')\n if epoch_counter % 100 == 0:\n weight_file_name = 'weight_data_' + dt_str + '.h5'\n else:\n weight_file_name = 'weight_data_temp.h5'\n\n model.save_weights(os.path.join(result_dir, weight_file_name))\n '''\n if epoch_counter % (2 * (100/batch_size)) == 0:\n weight_file_name = 'weight_data_tempB.h5'\n model.save_weights(os.path.join(result_dir, weight_file_name))\n elif epoch_counter % (1 * (100/batch_size)) == 0:\n weight_file_name = 'weight_data_tempA.h5'\n model.save_weights(os.path.join(result_dir, weight_file_name))\n \n # 具体的な誤差を表示\n gosa_list = []\n train_preds = model.predict(w_train)\n train_loss_str = ''\n if bone_num == 1:\n for i in range(0, 3):\n train_gosa = train_preds[0][0][i]-y_train[0][sample_index][i]\n train_loss_str += \"[{0:.5f}]\".format(train_gosa)\n gosa_list.append(train_gosa)\n else:\n for i in range(0, 3):\n train_gosa = train_preds[7][0][i]-y_train[7][sample_index][i]\n train_loss_str += \"[{0:.5f}]\".format(train_gosa)\n gosa_list.append(train_gosa)\n print('predict(train) : ' + train_loss_str)\n \n valid_preds = model.predict(w_vali)\n valid_loss_str = ''\n if bone_num == 1:\n for i in range(0, 3):\n valid_gosa = valid_preds[0][0][i]-y_vali[0][0][i]\n valid_loss_str += \"[{0:.5f}]\".format(valid_gosa)\n gosa_list.append(valid_gosa)\n else:\n for i in range(0, 3):\n valid_gosa = valid_preds[7][0][i]-y_vali[7][0][i]\n valid_loss_str += \"[{0:.5f}]\".format(valid_gosa)\n gosa_list.append(valid_gosa)\n print('predict(valid) : ' + valid_loss_str)\n \n # history情報のセーブ\n hist_dic = history.history\n\n # hist_dic のキー\n # val_dense_nnn_loss\n # val_dense_nnn_acc\n # dense_nnn_loss\n # dense_nnn_acc\n keys = hist_dic.keys()\n val_loss = 0.0\n val_acc = 0.0\n loss = 0.0\n acc = 0.0\n val_loss_n = 0.0\n val_acc_n = 0.0\n loss_n = 0.0\n acc_n = 0.0\n count = 0\n for key in keys:\n '''\n print(\"key=\"+str(key))\n print(\"len(hist_dic[key])=\" + str(len(hist_dic[key])))\n '''\n if key.startswith(\"val_\") and key.endswith(\"_loss\"):\n val_loss += float(hist_dic[key][0])\n val_loss_n += 1\n elif key.startswith(\"val_\") and key.endswith(\"_acc\"):\n val_acc += float(hist_dic[key][0])\n val_acc_n += 1\n elif key.endswith(\"loss\"):\n loss += float(hist_dic[key][0])\n loss_n += 1\n elif key.endswith(\"acc\"):\n acc += float(hist_dic[key][0])\n acc_n += 1\n #count += 1\n #count /= 4\n if val_loss_n != 0:\n val_loss /= val_loss_n\n if val_acc_n != 0:\n val_acc /= val_acc_n\n if loss_n != 0:\n loss /= loss_n\n if acc_n != 0:\n acc /= acc_n\n\n '''\n print(\"val_loss_n = \" + str(val_loss_n))\n print(\"val_acc_n = \" + str(val_acc_n))\n print(\"loss_n = \" + str(loss_n))\n print(\"acc_n = \" + str(acc_n))\n '''\n\n print(\"loss=\" + str(loss))\n print(\"acc =\" + str(acc))\n print(\"val_loss=\" + str(val_loss))\n print(\"val_acc =\" + str(val_acc))\n\n dt = datetime.now()\n dt_str = dt.strftime('%Y%m%d_%H%M%S')\n csv_info = [dt_str, epoch_counter,\n loss, acc, val_loss, val_acc]\n csv_info.extend(gosa_list)\n with open(history_file_path, \"a\") as f:\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerow(csv_info)\n \ndef predicting(model, data_dir, start_index, end_index, out_file):\n '''\n 予測を実行\n '''\n\n # 検証用のデータ\n datagen = myutil.MyDataGenerator(data_dir, width=img_width, height=img_height, lavel_file_path=LAVEL_FILE_PATH)\n x, y = datagen.gen_data(start_index, end_index,\n target_bone=target_bone_name,\n divided=True,\n image_only=True,\n img_filters=img_filters1,\n data_filters=data_filters1)\n # ボーンの情報を取得\n bone_list = datagen.get_bone_data()\n bone_num = len(bone_list)\n\n # テストデータのシーケンスが変わったのでRNNレイヤーのステータスをクリア\n model.reset_states()\n \n # 予測実行\n preds = model.predict(x)\n\n # 誤差の表示\n '''\n valid_loss_str = ''\n for i in range(21,24):\n valid_gosa = preds[i][0][0]-y[i][0]\n valid_loss_str += \"[{0:.5f}]\".format(valid_gosa)\n\n print('predict(valid) : ' + valid_loss_str)\n '''\n \n '''\n 予測した情報をファイルに出力する。\n '''\n with open(out_file, 'w', newline='') as f:\n writer = csv.writer(f)\n\n # Boneのヘッダー出力\n writer.writerow(['[Bone num]', str(bone_num)])\n writer.writerow(['[Bone name]', '[Parent name]', '[Bone length]'])\n # Boneの長さの情報を出力\n for bone_info in bone_list:\n writer.writerow(bone_info)\n\n # フレームのヘッダー情報出力\n bone_header = []\n bone_header.append('[Frame No.]')\n for bone_num in range(1, bone_num+1):\n bone_header.append('[Bone' + str(bone_num) + ' name]')\n bone_header.append('[Bone' + str(bone_num) + ' vecX]')\n bone_header.append('[Bone' + str(bone_num) + ' vecY]')\n bone_header.append('[Bone' + str(bone_num) + ' vecZ]')\n writer.writerow(bone_header)\n\n data_len = len(preds[0])\n for i in range(data_len):\n data = list()\n data.append(str(i+1)) # Frame No.\n for bone_index in range(bone_num):\n bone_name, parant_name, bone_length = bone_list[bone_index]\n data.append(bone_name) # Bone<N> name\n '''\n vexX = preds[bone_index*3][i][0]\n vexY = preds[bone_index*3+1][i][0]\n vexZ = preds[bone_index*3+2][i][0]\n '''\n vexX = preds[bone_index][i][0]\n vexY = preds[bone_index][i][1]\n vexZ = preds[bone_index][i][2]\n data.append(str(vexX)) # Bone<N> vecX\n data.append(str(vexY)) # Bone<N> vecY\n data.append(str(vexZ)) # Bone<N> vecZ\n writer.writerow(data)\n\n print(\"Write Data success.[\" + out_file + \"]\")\n\n\n#----------------------------------------------------------------------------------\n# Main proc\n#----------------------------------------------------------------------------------\nif __name__ == '__main__':\n # 引数のチェック\n if len(sys.argv) == 1:\n print(\"Arg Error !!!\")\n print(\"usage: python l_and_p.py {l [<weight_file>] | p <weight_file>}\")\n sys.exit()\n\n if sys.argv[1] != 'l' and sys.argv[1] != 'p':\n print(\"Arg Error !!!\")\n print(\"usage: python l_and_p.py {l [<weight_file>] | p <weight_file>}\")\n sys.exit()\n\n weight_file = None\n if len(sys.argv) > 2:\n weight_file = sys.argv[2]\n\n if sys.argv[1] == 'l':\n # 学習を実行\n learning(weight_file)\n else:\n # 予測を実行\n if weight_file == None:\n print(\"Arg Error !!!\")\n print(\"usage: python l_and_p.py {l [<weight_file>] | p <weight_file>}\")\n sys.exit()\n\n data_dir = os.path.join(OUT_DIR, 'MyData/train/Male')\n if not os.path.isdir(data_dir):\n os.makedirs(data_dir)\n\n out_dir = os.path.join(OUT_DIR,'predict_out')\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n out_file = os.path.join(out_dir,'bone_info.csv')\n \n start_index = 1\n end_index = 100\n\n batch_size = end_index - start_index\n predicting(weight_file, data_dir, start_index, end_index, out_file)\n\n"
},
{
"alpha_fraction": 0.6504854559898376,
"alphanum_fraction": 0.6626213788986206,
"avg_line_length": 22.19718360900879,
"blob_id": "1529afd8fa4b7ec8e498781ede305b93afe5e177",
"content_id": "b6ec518e5bb3dc5bf366fb1f12855321e425fe59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1852,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 71,
"path": "/Blender/Dockerfile",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# Keras の実行環境\n\n# ベースイメージの設定\nFROM ubuntu:16.04\n\n# 作成者情報\nMAINTAINER Morio Kinoshita [email protected]\n\n# 作業ディレクトリの指定\nWORKDIR /root\n\n# 環境変数の設定\nENV PYENV_ROOT=/root/.pyenv \\\n PATH=/root/.pyenv/bin:$PATH\n\n# 前提ツールのインストール\nRUN apt-get update\nRUN apt-get install -y make \\\n build-essential \\\n libssl-dev \\\n zlib1g-dev \\\n libbz2-dev \\\n libreadline-dev \\\n libsqlite3-dev \\\n wget curl \\\n llvm \\\n libncurses5-dev \\\n libncursesw5-dev \\\n xz-utils \\\n git\n\n# pyenv のインストール\nRUN git clone https://github.com/yyuu/pyenv.git ~/.pyenv\nRUN echo 'export PYENV_ROOT=/root/.pyenv' >> ~/.bashrc\nRUN echo 'export PATH=/root/.pyenv/bin:$PATH' >> ~/.bashrc\nRUN echo 'eval \"$(pyenv init -)\"' >> ~/.bashrc\n\n# Pythonのインストール\nRUN pyenv install 3.5.0\nRUN pyenv global 3.5.0\n\n# 必要なpythonのパッケージをインストール。多分入ってるとおもう。\nRUN apt-get install -y \\\n python3-pip \\\n python3-dev\n\n# Blender ソースのダウンロード\n# <https://wiki.blender.org/index.php/Dev:Doc/Building_Blender/Linux/Ubuntu/CMake>\n# 1. Get the source\nRUN mkdir ~/blender-git && \\\n cd ~/blender-git && \\\n git clone https://git.blender.org/blender.git && \\\n cd blender && \\\n git submodule update --init --recursive && \\\n git submodule foreach git checkout master && \\\n git submodule foreach git pull --rebase origin master\n\n# 2. Install/Update the dependenies\nRUN apt-get update && \\\n apt-get install -y git build-essential sudo\n\nRUN echo 'Y' > /tmp/Y && \\\n cd ~/blender-git && \\\n ./blender/build_files/build_environment/install_deps.sh < /tmp/Y && \\\n rm /tmp/Y\n\n# 3. Compile Blender with CMake\nRUN apt-get install -y cmake \\\n cmake-curses-gui\nRUN cd ~/blender-git/blender && \\\n make\n\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.71875,
"avg_line_length": 31,
"blob_id": "bcb2dd26bc898307d5170250e521b6252fcaad26",
"content_id": "c7437632c0f90ff9c05a51db078db78f5e24b4cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/V03/1_build.sh",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "docker build -t autotrain:0.3 .\n"
},
{
"alpha_fraction": 0.49941226840019226,
"alphanum_fraction": 0.5306347608566284,
"avg_line_length": 32.35784149169922,
"blob_id": "c111d038401eefcd9c4da04b815e6ff0d026161c",
"content_id": "4287d0ee16ae655beced5e5bd002801c0e0c1423",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14916,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 408,
"path": "/V09/ReadBoneData.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport math\nimport csv\nimport bpy\n\n\n# アーマチュア情報\n#armature_name1 = 'Armature'\n#armature_name2 = 'Armature.001'\narmature_name1 = 'metarig'\narmature_name2 = 'Armature'\n\n# ベジェカーブの情報\ncurve_dict = {}\ncurve_dict['hips'] = ('BezierCurve_Head', 1)\ncurve_dict['spine'] = ('BezierCurve_Head', 2)\ncurve_dict['chest01'] = ('BezierCurve_Head', 3)\ncurve_dict['chest02'] = ('BezierCurve_Head', 4)\ncurve_dict['neck'] = ('BezierCurve_Head', 5)\ncurve_dict['head'] = ('BezierCurve_Head', 6)\n\ncurve_dict['shoulder.L'] = ('BezierCurve_Arm.L', 1)\ncurve_dict['upper_arm.L'] = ('BezierCurve_Arm.L', 2)\ncurve_dict['forearm.L'] = ('BezierCurve_Arm.L', 3)\n\ncurve_dict['shoulder.R'] = ('BezierCurve_Arm.R', 1)\ncurve_dict['upper_arm.R'] = ('BezierCurve_Arm.R', 2)\ncurve_dict['forearm.R'] = ('BezierCurve_Arm.R', 3)\n\ncurve_dict['hips.L'] = ('BezierCurve_Foot.L', 1)\ncurve_dict['thigh.L'] = ('BezierCurve_Foot.L', 2)\ncurve_dict['shin.L'] = ('BezierCurve_Foot.L', 3)\ncurve_dict['foot.L'] = ('BezierCurve_Foot.L', 4)\n\ncurve_dict['hips.R'] = ('BezierCurve_Foot.R', 1)\ncurve_dict['thigh.R'] = ('BezierCurve_Foot.R', 2)\ncurve_dict['shin.R'] = ('BezierCurve_Foot.R', 3)\ncurve_dict['foot.R'] = ('BezierCurve_Foot.R', 4)\n\ndef get_bone_len(armature_name, bone_name):\n '''\n ボーンの長さを取得\n '''\n return bpy.data.objects[armature_name].pose.bones[bone_name].length\n\ndef get_bone_world_pos(armature_name, bone_name):\n '''\n ボーンのヘッドとテールのワールド座標を取得\n '''\n matrix_world = bpy.data.objects[armature_name].matrix_world\n head_local = bpy.data.objects[armature_name].pose.bones[bone_name].head\n tail_local = bpy.data.objects[armature_name].pose.bones[bone_name].tail\n head_world = matrix_world * head_local\n tail_world = matrix_world * tail_local\n return (head_world, tail_world)\n\ndef move_to(pos, obj):\n '''\n pos で指定した座標分、objの座標を移動する\n '''\n m = np.array([[1, 0, 0, pos[0]],\n [0, 1, 0, pos[1]],\n [0, 0, 1, pos[2]],\n [0, 0, 0, 1 ]])\n new_pos = np.dot(m, obj)\n return new_pos\n\ndef move_back_to(pos, obj):\n '''\n pos で指定した座標分、objの座標を戻す\n '''\n m = np.array([[1, 0, 0, -pos[0]],\n [0, 1, 0, -pos[1]],\n [0, 0, 1, -pos[2]],\n [0, 0, 0, 1 ]])\n new_pos = np.dot(m, obj)\n return new_pos\n\ndef rotation_x(theta, obj):\n '''\n X軸を中心にthetaで指定した角度だけobjの座標を回転する\n '''\n rx = np.array([[ 1, 0 , 0 , 0],\n [ 0, np.cos(theta), -np.sin(theta), 0],\n [ 0, np.sin(theta), np.cos(theta), 0],\n [ 0, 0 , 0 , 1]])\n return np.dot(rx, obj)\n \ndef rotation_y(theta, obj):\n '''\n Y軸を中心にthetaで指定した角度だけobjの座標を回転する\n '''\n ry = np.array([[ np.cos(-theta), 0, np.sin(-theta), 0],\n [ 0 , 1, 0 , 0],\n [-np.sin(-theta), 0, np.cos(-theta), 0],\n [ 0 , 0, 0 , 1]])\n return np.dot(ry, obj)\n\ndef rotation_z(theta, obj):\n '''\n Z軸を中心にthetaで指定した角度だけobjの座標を回転する\n '''\n rz = np.array([[ np.cos(theta), -np.sin(theta), 0, 0],\n [ np.sin(theta), np.cos(theta), 0, 0],\n [ 0 , 0 , 1, 0],\n [ 0 , 0 , 0, 1]])\n return np.dot(rz, obj)\n\ndef get_y_rotation_from_z(obj):\n '''\n Y軸に対する-z方向からの角度を取得\n '''\n obj_x = obj[0]\n obj_z = obj[2]\n if obj_x == 0 and obj_z <= 0:\n theta = 0\n elif obj_x == 0 and obj_z > 0:\n theta = math.pi\n elif obj_x > 0 and obj_z >= 0:\n theta = math.pi/2 + np.arctan(abs(obj_z/obj_x))\n elif obj_x > 0 and obj_z < 0:\n theta = math.pi/2 - np.arctan(abs(obj_z/obj_x))\n elif obj_x < 0 and obj_z >= 0:\n theta = -math.pi/2 - np.arctan(abs(obj_z/obj_x))\n elif obj_x < 0 and obj_z < 0:\n theta = -math.pi/2 + np.arctan(abs(obj_z/obj_x))\n return theta\n\ndef get_x_rotation_from_z(obj):\n '''\n X軸に対する-z方向からの角度を取得\n '''\n obj_y = obj[1]\n obj_z = obj[2]\n if obj_y == 0 and obj_z <= 0:\n theta = 0\n elif obj_y == 0 and obj_z > 0:\n theta = math.pi\n elif obj_y > 0 and obj_z <= 0:\n theta = math.pi/2 - np.arctan(abs(obj_z/obj_y))\n elif obj_y > 0 and obj_z > 0:\n theta = math.pi/2 + np.arctan(abs(obj_z/obj_y))\n elif obj_y < 0 and obj_z <= 0:\n theta = -math.pi/2 + np.arctan(abs(obj_z/obj_y))\n elif obj_y < 0 and obj_z > 0:\n theta = -math.pi/2 - np.arctan(abs(obj_z/obj_y))\n return theta\n\ndef get_x_rotation_from_y(obj):\n '''\n X軸に対する-y方向からの角度を取得\n '''\n obj_y = obj[1]\n obj_z = obj[2]\n if obj_z == 0 and obj_y <= 0:\n theta = 0\n elif obj_z == 0 and obj_y > 0:\n theta = math.pi\n elif obj_z > 0 and obj_y <= 0:\n theta = - np.arctan(abs(obj_z/obj_y))\n elif obj_z > 0 and obj_y > 0:\n theta = -math.pi + np.arctan(abs(obj_z/obj_y))\n elif obj_z < 0 and obj_y <= 0:\n theta = np.arctan(abs(obj_z/obj_y))\n elif obj_z < 0 and obj_y > 0:\n theta = math.pi - np.arctan(abs(obj_z/obj_y))\n return theta\n\n\ndef get_z_rotation_from_y(obj):\n '''\n Z軸に対する-y 方向からの角度を取得\n '''\n obj_x = obj[0]\n obj_y = obj[1]\n if obj_x == 0 and obj_y <= 0:\n theta = 0\n elif obj_x == 0 and obj_y > 0:\n theta = math.pi\n elif obj_x > 0 and obj_y <= 0:\n theta = math.pi * 1/2 - np.arctan(abs(obj_y/obj_x))\n elif obj_x > 0 and obj_y > 0:\n theta = math.pi * 1/2 + np.arctan(abs(obj_y/obj_x))\n elif obj_x < 0 and obj_y <= 0:\n theta = -math.pi * 1/2 + np.arctan(abs(obj_y/obj_x))\n elif obj_x < 0 and obj_y > 0:\n theta = -math.pi * 1/2 - np.arctan(abs(obj_y/obj_x))\n return theta\n\ndef get_theta1(base, obj, camera):\n '''\n カメラから見たbaseを基準とするobjの角度を取得する。次の情報を返す。\n ・objのY軸に対する-z方向からの角度\n ・objのX軸に対する-z方向からの角度, \n ・カメラのY軸に対する-z方向からの角度\n ・カメラのX軸に対する-z方向からの角度, \n '''\n o0 = move_back_to(base.to_4d(), obj.to_4d())\n c0 = move_back_to(base.to_4d(), camera.to_4d())\n c_rz = get_z_rotation_from_y(c0)\n o1 = rotation_z(-c_rz, o0)\n c1 = rotation_z(-c_rz, c0)\n c_rx = get_x_rotation_from_y(c1)\n o2 = rotation_x(-c_rx, o1)\n o_ry = get_y_rotation_from_z(o2)\n o3 = rotation_y(-o_ry, o2)\n o_rx = get_x_rotation_from_z(o3)\n return o_ry, o_rx, c_rz, c_rx\n\ndef get_theta2(base_4d, obj_4d):\n '''\n base を基準としてobjの角度を取得する。次の情報を返す。\n ・objのZ軸に対する-y方向からの角度\n ・objのX軸に対する-y方向からの角度, \n '''\n o0 = move_back_to(base_4d, obj_4d)\n o_rz = get_z_rotation_from_y(o0)\n o1 = rotation_z(-o_rz, o0)\n o_rx = get_x_rotation_from_y(o1)\n return o_rz, o_rx\n\n# --------------------------------------------------\ndef get_bone_data(armature_name, bone_name):\n '''\n ボーンの次の情報を取得する。\n ・ボーンの長さ\n ・headのワールド座標\n ・tailのワールド座標\n ・カメラのワールド座標\n ・tailのY軸に対する-z方向からの角度\n ・tailのX軸に対する-z方向からの角度, \n ・カメラのY軸に対する-z方向からの角度\n ・カメラのX軸に対する-z方向からの角度, \n '''\n l = get_bone_len(armature_name, bone_name)\n h, t = get_bone_world_pos(armature_name, bone_name)\n c = bpy.context.scene.camera.location\n t_ry, t_rx, c_rz, c_rx = get_theta1(h, t, c)\n return (l, h, t, c, t_ry, t_rx, c_rz, c_rx)\n\ndef move_bezier_point(curve_name, point_index, position):\n '''\n ベジェカーブの指定したインデックスのポイントを、指定した座標に移動する。\n '''\n bpy.data.curves[curve_name].splines[0].bezier_points[point_index].co = position\n bpy.data.curves[curve_name].splines[0].bezier_points[point_index].handle_left = position\n bpy.data.curves[curve_name].splines[0].bezier_points[point_index].handle_right = position\n\n \nmirror_dict = {}\nmirror_dict['hips'] = 'hips'\nmirror_dict['spine'] = 'spine'\nmirror_dict['chest01'] = 'chest01'\nmirror_dict['chest02'] = 'chest02'\nmirror_dict['neck'] = 'neck'\nmirror_dict['head'] = 'head'\nmirror_dict['shoulder.L'] = 'shoulder.R'\nmirror_dict['upper_arm.L'] = 'upper_arm.R'\nmirror_dict['forearm.L'] = 'forearm.R'\nmirror_dict['hand.L'] = 'hand.R'\nmirror_dict['thumb.01.L'] = 'thumb.01.R'\nmirror_dict['thumb.02.L'] = 'thumb.02.R'\nmirror_dict['thumb.03.L'] = 'thumb.03.R'\nmirror_dict['palm.01.L'] = 'palm.01.R'\nmirror_dict['f_index.01.L'] = 'f_index.01.R'\nmirror_dict['f_index.02.L'] = 'f_index.02.R'\nmirror_dict['f_index.03.L'] = 'f_index.03.R'\nmirror_dict['palm.02.L'] = 'palm.02.R'\nmirror_dict['f_middle.01.L'] = 'f_middle.01.R'\nmirror_dict['f_middle.02.L'] = 'f_middle.02.R'\nmirror_dict['f_middle.03.L'] = 'f_middle.03.R'\nmirror_dict['palm.03.L'] = 'palm.03.R'\nmirror_dict['f_ring.01.L'] = 'f_ring.01.R'\nmirror_dict['f_ring.02.L'] = 'f_ring.02.R'\nmirror_dict['f_ring.03.L'] = 'f_ring.03.R'\nmirror_dict['palm.04.L'] = 'palm.04.R'\nmirror_dict['f_pinky.01.L'] = 'f_pinky.01.R'\nmirror_dict['f_pinky.02.L'] = 'f_pinky.02.R'\nmirror_dict['f_pinky.03.L'] = 'f_pinky.03.R'\nmirror_dict['shoulder.R'] = 'shoulder.L'\nmirror_dict['upper_arm.R'] = 'upper_arm.L'\nmirror_dict['forearm.R'] = 'forearm.L'\nmirror_dict['hand.R'] = 'hand.L'\nmirror_dict['thumb.01.R'] = 'thumb.01.L'\nmirror_dict['thumb.02.R'] = 'thumb.02.L'\nmirror_dict['thumb.03.R'] = 'thumb.03.L'\nmirror_dict['palm.01.R'] = 'palm.01.L'\nmirror_dict['f_index.01.R'] = 'f_index.01.L'\nmirror_dict['f_index.02.R'] = 'f_index.02.L'\nmirror_dict['f_index.03.R'] = 'f_index.03.L'\nmirror_dict['palm.02.R'] = 'palm.02.L'\nmirror_dict['f_middle.01.R'] = 'f_middle.01.L'\nmirror_dict['f_middle.02.R'] = 'f_middle.02.L'\nmirror_dict['f_middle.03.R'] = 'f_middle.03.L'\nmirror_dict['palm.03.R'] = 'palm.03.L'\nmirror_dict['f_ring.01.R'] = 'f_ring.01.L'\nmirror_dict['f_ring.02.R'] = 'f_ring.02.L'\nmirror_dict['f_ring.03.R'] = 'f_ring.03.L'\nmirror_dict['palm.04.R'] = 'palm.04.L'\nmirror_dict['f_pinky.01.R'] = 'f_pinky.01.L'\nmirror_dict['f_pinky.02.R'] = 'f_pinky.02.L'\nmirror_dict['f_pinky.03.R'] = 'f_pinky.03.L'\nmirror_dict['hips.L'] = 'hips.R'\nmirror_dict['thigh.L'] = 'thigh.R'\nmirror_dict['shin.L'] = 'shin.R'\nmirror_dict['foot.L'] = 'foot.R'\nmirror_dict['toe.L'] = 'toe.R'\nmirror_dict['heel.L'] = 'heel.R'\nmirror_dict['hips.R'] = 'hips.L'\nmirror_dict['thigh.R'] = 'thigh.L'\nmirror_dict['shin.R'] = 'shin.L'\nmirror_dict['foot.R'] = 'foot.L'\nmirror_dict['toe.R'] = 'toe.L'\nmirror_dict['heel.R'] = 'heel.L'\n\n\ndef restore_frame_data(bone_num, bone_dict, frame_data, root_pos=[0.0, 0.0, 0.0]):\n '''\n フレーム情報からボーンを復元\n bone_num : ボーンの数\n bone_dict : キー : <ボーン名>\n データ : (<ボーン名>, <親ボーン名>, <ボーンの長さ>)\n frame_data : [<フレーム番号>, <ボーン名>, <Xvec>, <Yvec>, <Zvec>, ‥]\n '''\n\n # 復元先の座標\n #root_pos = [0.0, 0.0, 0.0]\n root_pos_4d = [root_pos[0], root_pos[1], root_pos[2], 1.0]\n\n new_dict = {}\n index = 1\n for i in range(bone_num):\n #bone_name = mirror_dict[frame_data[index]]\n #vecX = float(frame_data[index+1]) * (-1.0)\n bone_name = frame_data[index]\n vecX = float(frame_data[index+1])\n vecY = float(frame_data[index+2])\n vecZ = float(frame_data[index+3])\n index += 4\n parent_name = bone_dict[bone_name][1]\n bone_len = float(bone_dict[bone_name][2])\n # ==================================================\n # ボーンの位置の復元\n # ==================================================\n # 原点でボーンの傾きを復元\n T2 = np.array([vecX*bone_len, vecY*bone_len, vecZ*bone_len, 1])\n # --------------------------------------------------\n # カメラとhead(親のtail)の位置関係を取得\n # --------------------------------------------------\n if parent_name == '<none>':\n parent_tail_4d = root_pos_4d\n else:\n p_name, pp_name, parent_tail_4d = new_dict[parent_name]\n camera_pos = bpy.context.scene.camera.location\n camera_rz, camera_rx = get_theta2(parent_tail_4d, camera_pos.to_4d())\n # --------------------------------------------------\n # カメラとのhead(親のtail)の位置関係をによる傾きの補正\n # --------------------------------------------------\n T1 = rotation_x(camera_rx, T2)\n T0 = rotation_z(camera_rz, T1)\n T = move_to(parent_tail_4d, T0)\n new_dict[bone_name] = (bone_name, parent_name, T)\n # --------------------------------------------------\n # ボーンの位置を復元\n # --------------------------------------------------\n if not bone_name in curve_dict.keys():\n continue\n curve_name, point_index = curve_dict[bone_name]\n if parent_name == '<none>':\n move_bezier_point(curve_name, 0, root_pos)\n elif bone_name == 'shoulder.L' or bone_name == 'shoulder.R':\n move_bezier_point(curve_name, 0, [parent_tail_4d[0], parent_tail_4d[1], parent_tail_4d[2]])\n move_bezier_point(curve_name, point_index, [T[0], T[1], T[2]])\n\n\ndef read_bone_data(csv_file_path):\n \n # --------------------------------------------------\n bone_dict = dict()\n frame_data_list = []\n # --------------------------------------------------\n \n # CSVファイルから情報を読み込む\n bone_dict = dict()\n frame_data_list = []\n with open(csv_file_path, 'r', encoding='utf8') as f:\n reader = csv.reader(f)\n # ヘッダー情報からボーン数を取得\n head = next(reader)\n bone_num = int(head[1])\n head = next(reader)\n for i in range(bone_num):\n data = next(reader)\n bone_dict[data[0]] = data\n head = next(reader)\n for data in reader:\n frame_data_list.append(data)\n \n \n # --------------------------------------------------\n restore_frame_data(bone_num, bone_dict, frame_data_list[0])\n # --------------------------------------------------\n\n\nif __name__ == '__main__':\n # 出力先情報\n csv_file_path = '../predict_out/bone_info.csv'\n read_bone_data(csv_file_path)\n\n\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.71875,
"avg_line_length": 31,
"blob_id": "8617b488b70b420804d0571a996ee204e80501d7",
"content_id": "88dbae5307a04a936d96f99e51056ccbab832bf9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/V09/1_build.sh",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "docker build -t autotrain:0.9 .\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 53,
"blob_id": "bd8f51bde100626cfb6867a049f14ee91978b1f9",
"content_id": "27bb3eb31ccf029af4344a94b8e8cdb6788f4cce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 1,
"path": "/20171203/run.sh",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "python ReadImageAndBoneData.py ./weight_data_tempB.h5\n\n"
},
{
"alpha_fraction": 0.5423680543899536,
"alphanum_fraction": 0.575178325176239,
"avg_line_length": 31.757009506225586,
"blob_id": "2ea5ab22683f53eaa63c005f262d7ce2a5ec9d84",
"content_id": "c5b6ab224c93f734084c3d6bfcd151358b287836",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3573,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 107,
"path": "/V07/model_1_bone.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D\nfrom keras import optimizers\nfrom keras import regularizers\n\nfrom image_data import img_width, img_height, img_channel\nfrom image_data import img_monochrome, img_normalize\n\n# 入力データに関する情報\n\n\n\n# 正則化の値\nL1 = 0.001\nL2 = 0.001\n\n\ndef create_model(weight_file, target_bone_num):\n '''\n モデルの作成\n '''\n model_com = Sequential()\n model_com.add(Conv2D(16, (3, 3), padding='same', input_shape=(img_width, img_height, img_channel)\n , kernel_initializer='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_com.add(Activation('relu'))\n #model_com.add(MaxPooling2D(pool_size=(2, 2)))\n model_com.add(Dropout(0.25))\n\n model_com.add(Conv2D(32, (3, 3), strides=(2, 2)\n , kernel_initializer='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_com.add(Activation('relu'))\n #model_com.add(MaxPooling2D(pool_size=(2, 2)))\n model_com.add(Dropout(0.25))\n\n model_com.add(Conv2D(64, (3, 3), strides=(2, 2)\n , kernel_initializer='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_com.add(Activation('relu'))\n #model_com.add(MaxPooling2D(pool_size=(2, 2)))\n model_com.add(Dropout(0.25))\n\n # 128 * 128 * 128\n model_com.add(Conv2D(128, (3, 3), strides=(2, 2)\n , kernel_initializer='he_normal'\n ))\n model_com.add(Activation('relu'))\n model_com.add(Dropout(0.25))\n\n\n model_com.add(Flatten())\n #model_com.add(Dense(3042, activation='relu', init='he_normal'))\n #model_com.add(Dense(2048, activation='relu', init='he_normal'))\n #model_com.add(Dropout(0.5))\n\n models = []\n los_weights = []\n for i in range(target_bone_num):\n model_bone = Sequential()\n model_bone.add(model_com)\n\n model_bone.add(Dense(512, activation='relu', init='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_bone.add(Dropout(0.5))\n\n model_bone.add(Dense(512, activation='relu', init='he_normal'\n #,kernel_regularizer=regularizers.l2(L2)\n #,activity_regularizer=regularizers.l1(L1)\n ))\n model_bone.add(Dropout(0.5))\n\n model_bone.add(Dense(3, activation='linear', init='he_normal'))\n #model_bone.add(Dense(3, activation='tanh', init='glorot_normal'))\n #model_bone.add(Dense(3, activation='tanh', init='glorot_uniform'))\n\n models.append(model_bone.output)\n los_weights.append(1.0)\n\n model = Model(input=model_com.input, output=models)\n\n\n if weight_file != None:\n # 学習済みウエイトのロード\n print('=======================================')\n print('load weights from ' + weight_file)\n print('=======================================')\n model.load_weights(weight_file)\n\n model.summary()\n\n model.compile(loss='mean_squared_error',\n loss_weights=los_weights,\n optimizer=optimizers.Adam(),\n metrics=['accuracy'])\n\n return model\n"
},
{
"alpha_fraction": 0.464895635843277,
"alphanum_fraction": 0.46932321786880493,
"avg_line_length": 28.811321258544922,
"blob_id": "70c5717a29d29af6f30ef15a3a1e614b577e6418",
"content_id": "fb4d5100ab631df84f6140737843657de4d06f8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1713,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 53,
"path": "/20171203/ReadImageAndBoneData.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "from l_and_p import create_cnn_model \nfrom l_and_p import predicting\n\nimport sys\nimport os\nimport subprocess\n\n#----------------------------------------------------------------------------------\n# Main proc\n#----------------------------------------------------------------------------------\nif __name__ == '__main__':\n # 引数のチェック\n if len(sys.argv) == 1:\n print(\"Arg Error !!!\")\n print(\"usage: python ReadImageAndBone.py <weight_file>\")\n sys.exit()\n\n weight_file = None\n weight_file = sys.argv[1]\n\n '''\n 予測を実行\n weight_file : 指定された重みファイルからロードしてから予測を実行する\n '''\n # モデル作成\n model = create_cnn_model(weight_file) \n\n data_dir = '../predict_in'\n csv_file = '../predict_out/bone_info.csv'\n\n while True:\n print(\"##### 画像ファイルを格納して[Return]キーを押してください。#####\")\n sys.stdin.readline()\n\n files = os.listdir(data_dir)\n for file in files:\n if file == 'bone_info.csv':\n continue\n if file == '.save':\n continue\n print(\"file=\" + file)\n file_name, ext = os.path.splitext(file)\n frame_num = file_name.split(\"_\")[1]\n start_index = int(frame_num)\n end_index = start_index + 1\n batch_size = end_index - start_index\n predicting(model, data_dir, start_index, end_index, csv_file)\n \n args = ['/home/kino/blender-2.78c/blender',\n 'ReadBoneData.blend',\n '--python',\n 'ReadBoneData.py']\n res = subprocess.call(args)\n\n"
},
{
"alpha_fraction": 0.6784080266952515,
"alphanum_fraction": 0.6944190263748169,
"avg_line_length": 30.22142791748047,
"blob_id": "d1779107f9f5333a99221e11daf57ba62a8023ab",
"content_id": "70cbb6b286c832734c611d23a173f8e9b046b687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 4790,
"license_type": "no_license",
"max_line_length": 625,
"num_lines": 140,
"path": "/V08/Dockerfile",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# Keras の実行環境\n\n# ベースイメージの設定\nFROM ubuntu:16.04\n\n# 作成者情報\nMAINTAINER Morio Kinoshita [email protected]\n\n# 作業ディレクトリの指定\nWORKDIR /root\n\n# 環境変数の設定\nENV PYENV_ROOT=/root/.pyenv \\\n PATH=/root/.pyenv/bin:/root/.pyenv/shims:$PATH\n\n# 前提ツールのインストール\nRUN apt-get update\nRUN apt-get install -y make \\\n build-essential \\\n libssl-dev \\\n zlib1g-dev \\\n libbz2-dev \\\n libreadline-dev \\\n libsqlite3-dev \\\n wget curl \\\n llvm \\\n libncurses5-dev \\\n libncursesw5-dev \\\n xz-utils \\\n git\n\n# pyenv のインストール\nRUN git clone https://github.com/yyuu/pyenv.git ~/.pyenv\nRUN echo 'export PYENV_ROOT=/root/.pyenv' >> ~/.bashrc\nRUN echo 'export PATH=/root/.pyenv/bin:$PATH' >> ~/.bashrc\nRUN echo 'eval \"$(pyenv init -)\"' >> ~/.bashrc\n\n# Pythonのインストール\n# ★共有ライブラリ作成可能のものをインストールする必要がある\nRUN env PYTHON_CONFIGURE_OPTS=\"--enable-shared\" pyenv install 3.6.0\nRUN pyenv global 3.6.0\n\n# 必要なpythonのパッケージをインストール。多分入ってるとおもう。\nRUN apt-get install -y \\\n python3-pip \\\n python3-dev\n\n# Blender ソースのダウンロード\n# <https://wiki.blender.org/index.php/Dev:Doc/Building_Blender/Linux/Ubuntu/CMake>\n# 1. Get the source\nRUN mkdir ~/blender-git && \\\n cd ~/blender-git && \\\n git clone https://git.blender.org/blender.git && \\\n cd blender && \\\n git submodule update --init --recursive && \\\n git submodule foreach git checkout master && \\\n git submodule foreach git pull --rebase origin master\n\n# 2. Install/Update the dependenies\nRUN apt-get update && \\\n apt-get install -y git build-essential sudo\n\nRUN echo 'Y' > /tmp/Y && \\\n cd ~/blender-git && \\\n ./blender/build_files/build_environment/install_deps.sh < /tmp/Y && \\\n rm /tmp/Y\n\n# 共有ライブラリ作成可能なpythonのライブラリ(libpython3.6m.a)を /opt/lib/python-3.6/lib にコピー\nRUN cp /root/.pyenv/versions/3.6.0/lib/python3.6/config-3.6m-x86_64-linux-gnu/libpython3.6m.a /opt/lib/python-3.6/lib/.\n\n# 3. Compile Blender with CMake\nRUN apt-get install -y cmake \\\n cmake-curses-gui\n\n#RUN cd ~/blender-git/blender && \\\n# make\n\n#############################\n# build bpy\nRUN cd ~/blender-git/blender && \\\n cp CMakeLists.txt CMakeLists.bk && \\\n sed -i -e 's/option(WITH_PYTHON_INSTALL \"Copy system python into the blender install folder\" ON)/option(WITH_PYTHON_INSTALL \"Copy system python into the blender install folder\" OFF)/' CMakeLists.txt && \\\n sed -i -e 's/option(WITH_PLAYER \"Build Player\" OFF)/option(WITH_PLAYER \"Build Player\" OFF)/' CMakeLists.txt && \\\n sed -i -e 's/option(WITH_PYTHON_MODULE \"Enable building as a python module which runs without a user interface, like running regular blender in background mode (experimental, only enable for development), installs to PYTHON_SITE_PACKAGES (or CMAKE_INSTALL_PREFIX if WITH_INSTALL_PORTABLE is enabled).\" OFF)/option(WITH_PYTHON_MODULE \"Enable building as a python module which runs without a user interface, like running regular blender in background mode (experimental, only enable for development), installs to PYTHON_SITE_PACKAGES (or CMAKE_INSTALL_PREFIX if WITH_INSTALL_PORTABLE is enabled).\" ON)/' CMakeLists.txt && \\\n make bpy\n\n# bpy.so をインストール\nRUN cp /root/blender-git/build_linux_bpy/bin/bpy.so /root/.pyenv/versions/3.6.0/lib/python3.6/site-packages/. && \\\n cp -rf /opt/lib/python-3.6/lib/python3.6/site-packages/2.79 /root/.pyenv/versions/3.6.0/lib/python3.6/site-packages/.\n\n\n# TensorFlowのインストール\nRUN [\"/bin/bash\", \"-l\", \"-c\", \"cd ~ && \\\n . ~/.bashrc && \\\n pip3 install tensorflow\"]\n# RUN pip3 install tensorflow-gpu\n\n# Kerasのインストール\nRUN pip3 install keras\n\n# 必要な前提のインストール\nRUN pip3 install numpy && \\\n pip3 install pillow && \\\n pip3 install h5py\n\nRUN mkdir /home/kino && \\\n mkdir /home/kino/makehuman && \\\n mkdir /home/kino/makehuman/v1 && \\\n mkdir /home/kino/makehuman/v1/MyData\n\nCOPY textures /home/kino/makehuman/v1/MyData/textures/\n\n# 学習のデータとプログラムを転送\nCOPY BraidHair.blend ./\nCOPY CasualSuit.blend ./\nCOPY HumanFemale.blend ./\nCOPY HumanFemaleNoClothes.blend ./\nCOPY HumanMale.blend ./\nCOPY HumanMaleNoClothes.blend ./\nCOPY HumanOther.blend ./\nCOPY MaleSuit.blend ./\nCOPY ReadBoneData.blend ./\nCOPY Robot_1.blend ./\nCOPY Robot_2.blend ./\nCOPY Robot_3.blend ./\n\nCOPY GenBoneData.py ./\nCOPY ReadBoneData.py ./\nCOPY PredictedBoneData.py ./\nCOPY image_data.py ./\nCOPY myutil.py ./\nCOPY myfilter.py ./\n\nCOPY model_1_bone.py ./\nCOPY model_all_bone.py ./\n\nCOPY l_and_p.py ./\n\n# 学習の実行\nCMD /root/.pyenv/shims/python l_and_p.py l\n\n"
},
{
"alpha_fraction": 0.5559332370758057,
"alphanum_fraction": 0.6152657866477966,
"avg_line_length": 43.328765869140625,
"blob_id": "31d6c9686d649707edb777b8aa04be13f952d098",
"content_id": "6b168828babee66e89ff88c7fd4c239ad135aaa0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6540,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 146,
"path": "/20180127/model_1_bone.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D, Concatenate\nfrom keras import optimizers\nfrom keras import regularizers\n\nfrom image_data import img_width, img_height, img_channel\nfrom image_data import img_monochrome, img_normalize\n\n# 入力データに関する情報\n\n\n\n# 正則化の値\nL1 = 0.001\nL2 = 0.001\n\n\ndef create_model(weight_file, target_bone_num):\n '''\n モデルの作成\n '''\n\n '''\n input_img = Input(shape=(img_width, img_height, img_channel), dtype='float', name='main_input')\n\n conv_1 = Conv2D(16, (3, 3), padding='same', kernel_initializer='he_normal', activation='relu', name='conv_1')(input_img)\n conv_1 = Dropout(0.25)(conv_1)\n\n conv_2 = Conv2D(32, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_2')(conv_1)\n conv_2 = Dropout(0.25)(conv_2)\n\n conv_3 = Conv2D(64, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_3')(conv_2)\n conv_3 = Dropout(0.25)(conv_3)\n\n conv_4 = Conv2D(128, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_4')(conv_3)\n conv_4 = Dropout(0.25)(conv_4)\n\n flat = Flatten()(conv_4)\n\n outputs = []\n los_weights = []\n for i in range(target_bone_num):\n dens_1 = Dense(512, activation='relu', init='he_normal')(flat)\n dens_1 = Dropout(0.5)(dens_1)\n\n dens_2 = Dense(512, activation='relu', init='he_normal')(dens_1)\n dens_2 = Dropout(0.5)(dens_2)\n \n dens_out = Dense(3, activation='linear', init='he_normal')(dens_2)\n\n outputs.append(dens_out)\n los_weights.append(1.0)\n\n model = Model(input=input_img, outputs=outputs)\n '''\n\n input_main = Input(shape=(img_width, img_height, img_channel), dtype='float', name='main_input')\n input_data = Input(shape=(img_width, img_height, img_channel), dtype='float', name='data_input')\n\n ##### input_main\n conv_main_10 = Conv2D(16, (3, 3), padding='same', kernel_initializer='he_normal', name='conv_main_10')(input_main)\n conv_main_11 = Conv2D( 1, (1, 1), padding='same', kernel_initializer='ones', name='conv_main_11')(input_main)\n '''\n conv_main_1 = Concatenate(axis=3)([conv_main_10, conv_main_11])\n conv_main_1 = Dropout(0.25)(conv_main_1)\n\n conv_main_20 = Conv2D(32, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_main_20')(conv_main_1)\n conv_main_21 = Conv2D( 1, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_main_21')(input_main)\n conv_main_2 = Concatenate(axis=3)([conv_main_20, conv_main_21])\n conv_main_2 = Dropout(0.25)(conv_main_2)\n\n conv_main_30 = Conv2D(64, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_main_30')(conv_main_2)\n conv_main_31 = Conv2D( 1, (1, 1), padding='same', strides=(4, 4), kernel_initializer='ones', name='conv_main_31')(input_main)\n conv_main_3 = Concatenate(axis=3)([conv_main_30, conv_main_31])\n conv_main_3 = Dropout(0.25)(conv_main_3)\n\n conv_main_40 = Conv2D(128, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_main_40')(conv_main_3)\n conv_main_41 = Conv2D( 1, (1, 1), padding='same', strides=(8, 8), kernel_initializer='ones', name='conv_main_41')(input_main)\n conv_main_4 = Concatenate(axis=3)([conv_main_40, conv_main_41])\n conv_main_4 = Dropout(0.25)(conv_main_4)\n '''\n\n ##### input_data\n conv_data_10 = Conv2D(16, (3, 3), padding='same', kernel_initializer='he_normal', name='conv_data_10')(input_data)\n conv_data_11 = Conv2D( 1, (1, 1), padding='same', kernel_initializer='ones', name='conv_data_11')(input_data)\n\n ## merge ##\n #conv_data_1 = Concatenate(axis=3)([conv_data_10, conv_data_11])\n conv_data_1 = Concatenate(axis=3)([conv_data_10, conv_data_11, conv_main_10, conv_main_11])\n\n conv_data_1 = Dropout(0.25)(conv_data_1)\n\n conv_data_20 = Conv2D(32, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_data_20')(conv_data_1)\n conv_data_21 = Conv2D( 1, (1, 1), padding='same', strides=(2, 2), kernel_initializer='ones', name='conv_data_21')(input_data)\n conv_data_2 = Concatenate(axis=3)([conv_data_20, conv_data_21])\n conv_data_2 = Dropout(0.25)(conv_data_2)\n\n conv_data_30 = Conv2D(64, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_data_30')(conv_data_2)\n conv_data_31 = Conv2D( 1, (1, 1), padding='same', strides=(4, 4), kernel_initializer='ones', name='conv_data_31')(input_data)\n conv_data_3 = Concatenate(axis=3)([conv_data_30, conv_data_31])\n conv_data_3 = Dropout(0.25)(conv_data_3)\n\n conv_data_40 = Conv2D(128, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal', activation='relu', name='conv_data_40')(conv_data_3)\n conv_data_41 = Conv2D( 1, (1, 1), padding='same', strides=(8, 8), kernel_initializer='ones', name='conv_data_41')(input_data)\n conv_data_4 = Concatenate(axis=3)([conv_data_40, conv_data_41])\n conv_data_4 = Dropout(0.25)(conv_data_4)\n\n #### Flatten\n #conv_concatenate = Concatenate(axis=3)([conv_main_4, conv_data_4])\n #flat = Flatten()(conv_concatenate)\n flat = Flatten()(conv_data_4)\n\n #### Dense\n outputs = []\n los_weights = []\n for i in range(target_bone_num):\n dens_1 = Dense(512, activation='relu', init='he_normal')(flat)\n dens_1 = Dropout(0.5)(dens_1)\n\n dens_2 = Dense(512, activation='relu', init='he_normal')(dens_1)\n dens_2 = Dropout(0.5)(dens_2)\n \n dens_out = Dense(3, activation='linear', init='he_normal')(dens_2)\n\n outputs.append(dens_out)\n los_weights.append(1.0)\n\n model = Model(inputs=[input_main, input_data], outputs=outputs)\n\n if weight_file != None:\n # 学習済みウエイトのロード\n print('=======================================')\n print('load weights from ' + weight_file)\n print('=======================================')\n model.load_weights(weight_file)\n\n model.summary()\n\n model.compile(loss='mean_squared_error',\n loss_weights=los_weights,\n optimizer=optimizers.Adam(),\n metrics=['accuracy'])\n\n return model\n"
},
{
"alpha_fraction": 0.485619455575943,
"alphanum_fraction": 0.5049778819084167,
"avg_line_length": 31,
"blob_id": "506424f71c9a5ceded016b9eb53795fbd7fcb3bd",
"content_id": "c2f8cd7ea73d857965767bc979fa45b93a45ba96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3664,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 113,
"path": "/V08/z_test.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "import myutil\nimport myfilter\nimport GenBoneData\nimport numpy as np\nfrom PIL import Image\nimport time\n\ndef show_img(img_data):\n width, height, chanel = img_data.shape\n max = img_data.max()\n if max > 1.0:\n img_data = img_data / 255\n if chanel == 1:\n w_data = np.zeros((width, height, 3))\n for w in range(width):\n for h in range(height):\n for c in range(3):\n w_data[w, h, c] = img_data[w, h, 0]\n if img_data[w, h, 0] == 0: # 0 の所はグリーンで表示する\n w_data[w, h, 1] = 1.0\n else:\n #w_data = img_data\n w_data = np.zeros((width, height, chanel))\n for w in range(width):\n for h in range(height):\n is_alpha = True\n for c in range(chanel):\n w_data[w, h, c] = img_data[w, h, c]\n if w_data[w, h, c] != 0:\n is_alpha = False\n if is_alpha == True:\n w_data[w, h, 1] = 1.0 # 0 の所はグリーンで表示する\n\n img = (w_data*255).astype('uint8')\n im = Image.fromarray(img)\n im.show()\n\n\n\n\ntrain_blender_file = './HumanOther.blend'\ntrain_data_dir = '/home/kino/Docker/AutoTraining/V01/OUT_DIR/MyData/validation'\nimg_width = 256\nimg_height = 256\nstart = 0\nend = 1\nimg_monochrome = True\nimg_normalize = True\ntarget_bone_name = 'ALL'\ndivided = True\nresize_max = 0.1\nmove_x_max = 0.1\nmove_y_max = 0.1\n\n\n\ntrain_datagen = myutil.MyDataGenerator(train_data_dir, width=img_width, height=img_height)\n\nimg_filters1 = [\n myfilter.gen_img_resize_filter(256, 256),\n myfilter.gen_img_region_filter(),\n myfilter.gen_img_zoom_filter(1),\n myfilter.gen_img_move_filter(0, 0)\n ]\nimg_filters2 = [\n myfilter.gen_img_resize_filter(256, 256),\n myfilter.gen_img_region_filter(),\n myfilter.gen_img_zoom_filter(1),\n myfilter.gen_img_mirror_x_filter(),\n myfilter.gen_img_move_filter(0, 0)\n ]\ndata_filters1 = [\n myfilter.gen_monochrome_filter(),\n #myfilter.gen_nega_filter(),\n #myfilter.gen_region_filter(),\n myfilter.gen_normalize_filter(),\n ]\ndata_filters2 = [\n myfilter.gen_monochrome_filter(),\n #myfilter.gen_nega_filter(),\n #myfilter.gen_region_filter(),\n myfilter.gen_normalize_filter()\n ]\n\nprint(\"gen_data call\")\nx_train, y_train = train_datagen.gen_data_test(start, end,\n target_bone=target_bone_name,\n divided=divided,\n img_filters=img_filters1,\n data_filters=data_filters1)\nprint(\"gen_data end\")\n#print(str(x_train.shape))\n\nfor i in range(x_train.shape[0]):\n print(\"x.max()=\" + str(x_train[i].max()))\n show_img(x_train[i])\n #print(str(x_train[i]))\n time.sleep(3)\nprint(\"gen_data call\")\n\nx_train, y_train = train_datagen.gen_data_test(start, end,\n target_bone=target_bone_name,\n divided=divided,\n img_filters=img_filters2,\n data_filters=data_filters2)\nprint(\"gen_data end\")\n#print(str(x_train.shape))\n\nfor i in range(x_train.shape[0]):\n print(\"x.max()=\" + str(x_train[i].max()))\n show_img(x_train[i])\n #print(str(x_train[i]))\n time.sleep(3)\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.71875,
"avg_line_length": 31,
"blob_id": "6f8bff917c08a22962a4c07629f1c767d9a84046",
"content_id": "660c4facf433924391eac371fdf7d7c137a9386d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/V08/1_build.sh",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "docker build -t autotrain:0.8 .\n"
},
{
"alpha_fraction": 0.48419007658958435,
"alphanum_fraction": 0.49664369225502014,
"avg_line_length": 34.15838623046875,
"blob_id": "fc7ae0d41955320087603f554ac76a41e45ae660",
"content_id": "3bc0f3b64bea82bf7b6e9aa4515b8c5fa5c4da61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12184,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 322,
"path": "/20180127/l_and_p.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport bpy\nimport csv\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport sys\nimport subprocess\n\nimport model_all_bone\nimport model_1_bone\nimport PredictedBoneData\n\nimport image_data\nfrom image_data import img_width, img_height, img_channel\n\nimport myutil\nimport time\n\n#OUT_DIR = '/root/OUT_DIR'\nOUT_DIR = '/media/kino/HD-LBU2/temp/OUT_DIR'\n\nPREDICT_LAVEL_FILE_PATH='./bone_info.csv'\n\n# 処理対象ボーン情報\n#target_bone_name = 'ALL'\n#target_bone_name = 'forearm.R'\ntarget_bone_name = 'thigh.L'\n\nall_bone_num = 20\nif target_bone_name == 'ALL':\n # target_bone_num = 64\n target_bone_num = all_bone_num\nelse:\n target_bone_num = 1\n\n# 入力データに関する情報\nimage_gen_dir = os.path.join(OUT_DIR, 'MyData/train/GenData')\nif not os.path.isdir(image_gen_dir):\n os.makedirs(image_gen_dir)\n\nvalidation_data_dir = os.path.join(OUT_DIR, 'MyData/validation')\nif not os.path.isdir(validation_data_dir):\n os.makedirs(validation_data_dir)\n\n# 出力に関するデータ\nresult_dir = os.path.join(OUT_DIR, 'results')\nif not os.path.isdir(result_dir):\n os.makedirs(result_dir)\n\nhistory_file_path = os.path.join(result_dir, 'history.csv')\n\n# モデルに関するデータ\n#timestep = 1 # ★汎化性能のために1でやってみる\n\n# 学習に関するデータ\n\n# 学習データの生成方法\ndata_gen_auto = True\n\n# 学習データに関するデータ\ndata_start_index = 0 # 学習に使用するデータの先頭インデックス 0\nif data_gen_auto == True:\n data_end_index = 100 # 学習に使用するデータの末尾インデックス ★動的学習データを使用する場合は100とする\nelse:\n data_end_index = 1000 # 学習に使用するデータの末尾インデックス ★静的学習データを使用する場合は1000とする\n\nbatch_size = 100\n\n# 検証データのインデックス\nsample_index = 0\n\n\n\n\ndef learning(weight_file):\n '''\n 学習を実行\n weight_file\n None : 重みを初期値から学習する\n その他 : 指定された重みファイルからロードしてから学習を続行する\n '''\n # モデル作成\n if target_bone_num == 1:\n model = model_1_bone.create_model(weight_file, target_bone_num) \n else:\n model = model_all_bone.create_model(weight_file, target_bone_num) \n\n epoch_counter = 0\n while True:\n # トレーニングデータの取得\n if data_gen_auto == True:\n train_list, train_data_name = image_data.get_train_data_auto(image_gen_dir, target_bone_name,\n data_start_index, data_end_index, batch_size, gen_master_image=True)\n else:\n train_list, train_data_name = image_data.get_train_data_static(target_bone_name)\n \n # 検証用のデータ\n x_vali_main, x_vali, y_vali = image_data.get_validation_data(validation_data_dir, target_bone_name,\n data_start_index, batch_size, gen_master_image=True)\n \n # 具体的な誤差を表示するためのデータ\n w_vali_main = np.array(x_vali_main).reshape(batch_size, img_width, img_height, img_channel)\n w_vali = np.array(x_vali).reshape(batch_size, img_width, img_height, img_channel)\n\n # テストデータのシーケンスが変わったのでRNNレイヤーのステータスをクリア\n # model.reset_states()\n \n start_no = 1\n for x_train_main, x_train, y_train in train_list:\n #w_train = np.array(x_train[sample_index:sample_index+batch_size]).reshape(batch_size, timestep, img_width, img_height, img_channel)\n w_train_main = np.array(x_train_main[sample_index]).reshape(1, img_width, img_height, img_channel)\n w_train = np.array(x_train[sample_index]).reshape(1, img_width, img_height, img_channel)\n epoch_counter+=1\n print(\"==========================================================\")\n print(\"# Epoch : \" + str(epoch_counter) + \n \" [\" + train_data_name + \"](\" + str(start_no) + \" - \" + str(start_no+batch_size-1) + \")\")\n start_no += batch_size\n # Fine-tuning\n history = model.fit([x_train_main, x_train], y_train, epochs=1,\n batch_size=batch_size, validation_data=([x_vali_main, x_vali], y_vali), verbose=0)\n \n # モデルの重み情報のセーブ\n '''\n dt = datetime.now()\n dt_str = dt.strftime('%Y%m%d_%H%M%S')\n if epoch_counter % 100 == 0:\n weight_file_name = 'weight_data_' + dt_str + '.h5'\n else:\n weight_file_name = 'weight_data_temp.h5'\n\n model.save_weights(os.path.join(result_dir, weight_file_name))\n '''\n if epoch_counter % (2 * (100/batch_size)) == 0:\n weight_file_name = 'weight_data_tempB.h5'\n model.save_weights(os.path.join(result_dir, weight_file_name))\n elif epoch_counter % (1 * (100/batch_size)) == 0:\n weight_file_name = 'weight_data_tempA.h5'\n model.save_weights(os.path.join(result_dir, weight_file_name))\n \n # 具体的な誤差を表示\n gosa_list = []\n train_preds = model.predict([w_train_main, w_train])\n train_loss_str = ''\n if target_bone_num == 1:\n #print('###### kino : ' + str(len(train_preds)))\n #print('###### kino : ' + str(len(train_preds[0])))\n #print('###### kino : ' + str(len(train_preds[0][0])))\n #print('###### kino : ' + str(len(y_train)))\n #print('###### kino : ' + str(len(y_train[0])))\n #print('###### kino : ' + str(len(y_train[0][0])))\n for i in range(0, 3):\n train_gosa = train_preds[0][i]-y_train[0][sample_index][i]\n train_loss_str += \"[{0:.5f}]\".format(train_gosa)\n gosa_list.append(train_gosa)\n else:\n for i in range(0, 3):\n train_gosa = train_preds[7][0][i]-y_train[7][sample_index][i]\n train_loss_str += \"[{0:.5f}]\".format(train_gosa)\n gosa_list.append(train_gosa)\n print('predict(train) : ' + train_loss_str)\n \n valid_preds = model.predict([w_vali_main, w_vali])\n valid_loss_str = ''\n if target_bone_num == 1:\n for i in range(0, 3):\n valid_gosa = valid_preds[0][i]-y_vali[0][0][i]\n valid_loss_str += \"[{0:.5f}]\".format(valid_gosa)\n gosa_list.append(valid_gosa)\n else:\n for i in range(0, 3):\n valid_gosa = valid_preds[7][0][i]-y_vali[7][0][i]\n valid_loss_str += \"[{0:.5f}]\".format(valid_gosa)\n gosa_list.append(valid_gosa)\n print('predict(valid) : ' + valid_loss_str)\n \n # history情報のセーブ\n hist_dic = history.history\n\n # hist_dic のキー\n # val_dense_nnn_loss\n # val_dense_nnn_acc\n # dense_nnn_loss\n # dense_nnn_acc\n keys = hist_dic.keys()\n val_loss = 0.0\n val_acc = 0.0\n loss = 0.0\n acc = 0.0\n val_loss_n = 0.0\n val_acc_n = 0.0\n loss_n = 0.0\n acc_n = 0.0\n count = 0\n for key in keys:\n '''\n print(\"key=\"+str(key))\n print(\"len(hist_dic[key])=\" + str(len(hist_dic[key])))\n '''\n if key.startswith(\"val_\") and key.endswith(\"_loss\"):\n val_loss += float(hist_dic[key][0])\n val_loss_n += 1\n elif key.startswith(\"val_\") and key.endswith(\"_acc\"):\n val_acc += float(hist_dic[key][0])\n val_acc_n += 1\n elif key.endswith(\"loss\"):\n loss += float(hist_dic[key][0])\n loss_n += 1\n elif key.endswith(\"acc\"):\n acc += float(hist_dic[key][0])\n acc_n += 1\n #count += 1\n #count /= 4\n if val_loss_n != 0:\n val_loss /= val_loss_n\n if val_acc_n != 0:\n val_acc /= val_acc_n\n if loss_n != 0:\n loss /= loss_n\n if acc_n != 0:\n acc /= acc_n\n\n '''\n print(\"val_loss_n = \" + str(val_loss_n))\n print(\"val_acc_n = \" + str(val_acc_n))\n print(\"loss_n = \" + str(loss_n))\n print(\"acc_n = \" + str(acc_n))\n '''\n\n print(\"loss=\" + str(loss))\n print(\"acc =\" + str(acc))\n print(\"val_loss=\" + str(val_loss))\n print(\"val_acc =\" + str(val_acc))\n\n dt = datetime.now()\n dt_str = dt.strftime('%Y%m%d_%H%M%S')\n csv_info = [dt_str, epoch_counter,\n loss, acc, val_loss, val_acc]\n csv_info.extend(gosa_list)\n with open(history_file_path, \"a\") as f:\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerow(csv_info)\n \ndef predicting(model, data_dir, start_index, end_index,\n bone_name, bone_num, img_mirror_x=False, use_master_image=False):\n '''\n 予測を実行\n img_mirror_x : x軸に反転した画像に対して予測を適用する。\n '''\n\n # 検証用のデータ\n x, bone_list = image_data.get_predict_data(data_dir, bone_name,\n PREDICT_LAVEL_FILE_PATH,\n start_index, end_index, img_mirror_x)\n\n if use_master_image == True:\n # マスターイメージを使用する\n x_master, bone_list = image_data.get_predict_data(data_dir, bone_name,\n PREDICT_LAVEL_FILE_PATH,\n 0, 1, False)\n\n #if img_mirror_x != False:\n #myutil.show_img(x_master[0])\n #time.sleep(2)\n #myutil.show_img(x[0])\n #time.sleep(2)\n\n # 予測実行\n preds = model.predict([x_master, x])\n else:\n # 予測実行\n preds = model.predict(x)\n\n # 予測ボーンデータをオブジェクトに格納\n bone_data = PredictedBoneData.PredictedBoneData(bone_list, preds, bone_name, bone_num)\n\n return bone_data\n\n#----------------------------------------------------------------------------------\n# Main proc\n#----------------------------------------------------------------------------------\nif __name__ == '__main__':\n # 引数のチェック\n if len(sys.argv) == 1:\n print(\"Arg Error !!!\")\n print(\"usage: python l_and_p.py {l [<weight_file>] | p <weight_file>}\")\n sys.exit()\n\n if sys.argv[1] != 'l' and sys.argv[1] != 'p':\n print(\"Arg Error !!!\")\n print(\"usage: python l_and_p.py {l [<weight_file>] | p <weight_file>}\")\n sys.exit()\n\n weight_file = None\n if len(sys.argv) > 2:\n weight_file = sys.argv[2]\n\n if sys.argv[1] == 'l':\n # 学習を実行\n learning(weight_file)\n else:\n # 予測を実行\n if weight_file == None:\n print(\"Arg Error !!!\")\n print(\"usage: python l_and_p.py {l [<weight_file>] | p <weight_file>}\")\n sys.exit()\n\n data_dir = os.path.join(OUT_DIR, 'MyData/train/Male')\n if not os.path.isdir(data_dir):\n os.makedirs(data_dir)\n\n out_dir = os.path.join(OUT_DIR,'predict_out')\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n out_file = os.path.join(out_dir,'bone_info.csv')\n \n start_index = 1\n end_index = 100\n\n batch_size = end_index - start_index\n predicting(weight_file, data_dir, start_index, end_index, out_file)\n\n"
},
{
"alpha_fraction": 0.875,
"alphanum_fraction": 0.875,
"avg_line_length": 30,
"blob_id": "77418d2b41ec8d11d5c767fbe948b960830d9252",
"content_id": "36366ff3bf3994c95399d74ba1fd911b2a721c45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/20180127/run.sh",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "python ReadImageAndBoneData.py\n\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.71875,
"avg_line_length": 31,
"blob_id": "f9d718b7c64aa8fc1f47e773e9b547188bf9e4c4",
"content_id": "bbb80fb44e4e8cf22c6438add8b8e966c659702f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/V07/1_build.sh",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "docker build -t autotrain:0.7 .\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 15.666666984558105,
"blob_id": "926c68025d6472fae57254cb1ed00be8add93983",
"content_id": "d4db87d2432c4c97df4e4ecb5e89dd847db813c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/README.md",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# hello-world\nJust another repository\nfirst edit.\n"
},
{
"alpha_fraction": 0.47300469875335693,
"alphanum_fraction": 0.5077129602432251,
"avg_line_length": 33.27011489868164,
"blob_id": "9e5ce2dfb13e81120e3a92bdfb5fd1f3f9503eee",
"content_id": "c5be7e5eb0b29d271ebab03a9f7a556994d2bc95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5964,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 174,
"path": "/V07/display_info/display_info.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "import csv\nimport matplotlib.pyplot as plt\n\nmin=20\n\nmax_len = 1000\n\nOUT_DIR_PATH = '/media/kino/HD-LBU2/MyPrograms/Docker/AutoTraining/V07/OUT_DIR'\n\n##################################\nold_file_list = [\n# (OUT_DIR_PATH + '/results/history_01.csv', 1),\n# (OUT_DIR_PATH + '/results/history_02.csv', 1),\n# (OUT_DIR_PATH + '/results/history_03.csv', 1),\n# (OUT_DIR_PATH + '/results/history_04.csv', 1),\n# (OUT_DIR_PATH + '/results/history_05.csv', 1),\n# (OUT_DIR_PATH + '/results/history_06.csv', 1),\n# (OUT_DIR_PATH + '/results/history_07.csv', 1),\n# (OUT_DIR_PATH + '/results/history_08.csv', 1),\n# (OUT_DIR_PATH + '/results/history_09.csv', 1),\n# (OUT_DIR_PATH + '/results/history_10.csv', 1),\n# (OUT_DIR_PATH + '/results/history_11.csv', 1),\n\n# (OUT_DIR_PATH + '/results/history_12.csv', 1),\n# (OUT_DIR_PATH + '/results/history_13.csv', 1),\n# (OUT_DIR_PATH + '/results/history_14.csv', 1),\n# (OUT_DIR_PATH + '/results/history_15.csv', 1),\n# (OUT_DIR_PATH + '/results/history_16.csv', 1),\n# (OUT_DIR_PATH + '/results/history_17.csv', 1),\n# (OUT_DIR_PATH + '/results/history_18.csv', 1),\n# (OUT_DIR_PATH + '/results/history_19.csv', 1),\n# (OUT_DIR_PATH + '/results/history_20.csv', 1),\n# (OUT_DIR_PATH + '/results/history_21.csv', 1),\n# (OUT_DIR_PATH + '/results/history_22.csv', 1),\n# (OUT_DIR_PATH + '/results/history_23.csv', 1),\n# (OUT_DIR_PATH + '/results/history_24.csv', 1),\n# (OUT_DIR_PATH + '/results/history_25.csv', 100/5),\n# (OUT_DIR_PATH + '/results/history_26.csv', 1),\n# (OUT_DIR_PATH + '/results/history_27.csv', 1),\n (OUT_DIR_PATH + '/results/history_28.csv', 1),\n# (OUT_DIR_PATH + '/results/history_29.csv', 1),\n# (OUT_DIR_PATH + '/results/history_30.csv', 1),\n# (OUT_DIR_PATH + '/results/history_31.csv', 1),\n# (OUT_DIR_PATH + '/results/history_32.csv', 1),\n# (OUT_DIR_PATH + '/results/history_33.csv', 1),\n (OUT_DIR_PATH + '/results/history_34.csv', 1),\n# (OUT_DIR_PATH + '/results/history_35.csv', 1),\n (OUT_DIR_PATH + '/results/history_36.csv', 1),\n (OUT_DIR_PATH + '/results/history_37.csv', 1),\n (OUT_DIR_PATH + '/results/history_38.csv', 1),\n (OUT_DIR_PATH + '/results/history_39.csv', 1),\n (OUT_DIR_PATH + '/results/history_40.csv', 1)\n ]\n\n\ndef display_info(info_file, step, max, min=1):\n times = []\n epochs = []\n loss = []\n accs = []\n val_loss = []\n val_accs = []\n \n x_len = 0\n line_count = 0\n loss_wk = 0.0\n accs_wk = 0.0\n val_loss_wk = 0.0\n val_accs_wk = 0.0\n with open(info_file, \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n line_count+=1\n loss_wk += float(row[2])\n accs_wk += float(row[3])\n val_loss_wk += float(row[4])\n val_accs_wk += float(row[5])\n if step!=1 and line_count%step != 0:\n continue\n x_len += 1\n if min > x_len:\n loss_wk = 0.0\n accs_wk = 0.0\n val_loss_wk = 0.0\n val_accs_wk = 0.0\n continue\n times.append(row[0])\n epochs.append(int(row[1]))\n loss.append(loss_wk/step)\n accs.append(accs_wk/step)\n val_loss.append(val_loss_wk/step)\n val_accs.append(val_accs_wk/step)\n\n loss_wk = 0.0\n accs_wk = 0.0\n val_loss_wk = 0.0\n val_accs_wk = 0.0\n\n if max != 0 and x_len >= max:\n break\n\n if max > 0 and max > x_len:\n for i in range(x_len+1, max+1):\n if min > i:\n continue\n loss.append(0.0)\n accs.append(0.0)\n val_loss.append(0.0)\n val_accs.append(0.0)\n x_len = max\n\n return (x_len, loss, accs, val_loss, val_accs)\n\n(info_len, info_loss, info_accs, info_val_loss, info_val_accs) = display_info(OUT_DIR_PATH + '/results/history.csv', 100/100, max_len, min=min)\n\nif max_len == 0:\n max_len = info_len\n\nloss_list = []\naccs_list = []\nval_loss_list = []\nval_accs_list = []\nfor path, step in old_file_list:\n w_len, loss, accs, val_loss, val_accs = display_info(path, step, max_len, min)\n name = path[path.rfind('/')+1:]\n loss_list.append((name, loss))\n accs_list.append((name, accs))\n val_loss_list.append((name, val_loss))\n val_accs_list.append((name, val_accs))\n #print('name:' + name)\n #print('loss:' + str(len(loss)))\n\nx = range(min, max_len+1)\n\nplt.figure(1)\nplt.clf()\n\nplt.subplot(4, 1, 1)\n#plt.xlabel('episodes')\nplt.ylabel('loss')\nfor name, loss in loss_list:\n plt.plot(x, loss)\n #plt.plot(x, loss, label=name)\nplt.plot(x, info_loss)\n#plt.plot(x, info_loss, label='history.csv')\nplt.legend(loc=2)\n\nplt.subplot(4, 1, 2)\n#plt.xlabel('episodes')\nplt.ylabel('accs')\nfor name, accs in accs_list:\n plt.plot(x, accs, label=name)\nplt.plot(x, info_accs, label='history.csv')\nplt.legend(loc=2)\n\nplt.subplot(4, 1, 3)\n#plt.xlabel('episodes')\nplt.ylabel('val_loss')\nfor name, val_loss in val_loss_list:\n plt.plot(x, val_loss)\n #plt.plot(x, val_loss, label=name)\nplt.plot(x, info_val_loss)\n#plt.plot(x, info_val_loss, label='history.csv')\nplt.legend(loc=2)\n\nplt.subplot(4, 1, 4)\nplt.xlabel('episodes')\nplt.ylabel('val_accs')\nfor name, val_accs in val_accs_list:\n plt.plot(x, val_accs, label=name)\nplt.plot(x, info_val_accs, label='history.csv')\nplt.legend(loc=2)\n\nplt.show()\n\n"
},
{
"alpha_fraction": 0.43274036049842834,
"alphanum_fraction": 0.44513291120529175,
"avg_line_length": 36.3283576965332,
"blob_id": "dace373678d0708d83898ce637a24989bc20914d",
"content_id": "d1b2a3215a9c3f27503584813831e6824435352d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5487,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 134,
"path": "/20180108/ReadImageAndBoneData.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "import l_and_p\nimport model_all_bone\nimport model_1_bone\n\nimport math\nimport sys\nimport os\nimport subprocess\n\nall_bone_model_file = './weight_data_All.h5'\nsingle_bone_model_list = [\n ('./weight_data_thigh_L.h5', 'thigh.L', 'thigh.R')\n# ('./weight_data_thigh_L.h5', 'thigh.L', None)\n ]\n\n#---------------------------------------------------------------------------------\n# ボーンの長さを調整するコンバータ\n#---------------------------------------------------------------------------------\ndef conv_length(x, y, z):\n len = math.sqrt(x**2 + y**2 + z**2)\n dif = 1.0 / len\n return (x*dif, y*dif, z*dif)\n\n#---------------------------------------------------------------------------------\n# ボーンの長さをY軸のみで調整するコンバータ\n#---------------------------------------------------------------------------------\ndef conv_length_y(x, y, z):\n y2 = 1 - x**2 - z**2\n new_y = math.sqrt(y2)\n if y < 0:\n new_y = new_y * (-1)\n return (x, new_y, z)\n\n#----------------------------------------------------------------------------------\n# Main proc\n#----------------------------------------------------------------------------------\nif __name__ == '__main__':\n # 引数のチェック\n if len(sys.argv) != 1:\n print(\"Arg Error !!!\")\n print(\"usage: python ReadImageAndBone.py\")\n sys.exit()\n\n '''\n 予測を実行\n weight_file : 指定された重みファイルからロードしてから予測を実行する\n '''\n # 全ボーン用モデル作成\n model_all = model_all_bone.create_model(all_bone_model_file, l_and_p.all_bone_num)\n\n # 単一ボーン用モデルのリスト作成\n model_list = list()\n for (model_file, name_1, name_2) in single_bone_model_list:\n model_single = model_1_bone.create_model(model_file, 1)\n model_list.append((model_single, name_1, name_2))\n\n data_dir = '../predict_in'\n csv_file = '../predict_out/bone_info.csv'\n\n while True:\n print(\"##### 画像ファイルを格納して[Return]キーを押してください。#####\")\n sys.stdin.readline()\n\n files = os.listdir(data_dir)\n for file in files:\n # 次の名前のファイルは処理対象外\n if file == 'bone_info.csv':\n continue\n if file == '.save':\n continue\n\n print(\"file=\" + file)\n\n file_name, ext = os.path.splitext(file)\n frame_num = file_name.split(\"_\")[1]\n start_index = int(frame_num)\n end_index = start_index + 1\n batch_size = end_index - start_index\n\n # 全ボーン用モデルを使用して全ボーンを予測\n bone_data_all = l_and_p.predicting(model_all, data_dir, start_index, end_index, 'ALL', l_and_p.all_bone_num)\n\n for (model_single, name_1, name_2) in model_list:\n # 単一ボーン用モデルを使用して単一ボーンを予測\n bone_data_1 = l_and_p.predicting(model_single, data_dir, start_index, end_index, name_1, 1)\n\n # name_1 のデータの置き換え\n bone_data_all.bone_data_dict[name_1] = bone_data_1.bone_data_dict[name_1]\n (x, y, z) = bone_data_1.bone_data_dict[name_1][0]\n #print(\"x = \" + str(x))\n #print(\"y = \" + str(y))\n #print(\"z = \" + str(z))\n\n if name_2 != None:\n # 単一ボーン用モデルを使用して左右反転した単一ボーンを予測\n bone_data_2 = l_and_p.predicting(model_single, data_dir, start_index, end_index, name_2, 1, img_mirror_x=True)\n\n # name_2 のデータの置き換え\n #bone_data_all.bone_data_dict[name_2] = bone_data_2.bone_data_dict[name_2]\n (x, y, z) = bone_data_2.bone_data_dict[name_2][0]\n #print(\"x = \" + str(x))\n #print(\"y = \" + str(y))\n #print(\"z = \" + str(z))\n\n bone_data_list = bone_data_2.bone_data_dict[name_2]\n bone_data_list_new = list()\n for bone_data in bone_data_list:\n x, y, z = bone_data\n bone_data_list_new.append((-1 * x, y, z))\n \n bone_data_all.bone_data_dict[name_2] = bone_data_list_new\n\n # テスト\n #bone_data_list = list()\n #for i in range(bone_data_all.data_num):\n # bone_data_list.append((0.0, 0.0, -1.0))\n # \n #bone_data_all.bone_data_dict[name_2] = bone_data_list\n #bone_data_all.bone_data_dict['shin.L'] = bone_data_list\n #bone_data_all.bone_data_dict['foot.L'] = bone_data_list\n #bone_data_all.bone_data_dict['thigh.L'] = bone_data_list\n\n\n\n # 予測した情報をCSVファイルに出力\n bone_data_all.write_to_csv(csv_file, conv_length)\n #bone_data_all.write_to_csv(csv_file, None)\n print(\"Write Data success.[\" + csv_file + \"]\")\n \n args = ['/home/kino/blender-2.78c/blender',\n 'ReadBoneData.blend',\n '--python',\n 'ReadBoneData.py']\n res = subprocess.call(args)\n\n"
},
{
"alpha_fraction": 0.5068648457527161,
"alphanum_fraction": 0.5203114151954651,
"avg_line_length": 39.60344696044922,
"blob_id": "1afa12bf21425206e3812b4afb666664493f0799",
"content_id": "80efab92bf31bcfecdbc7adc7cfd9d2aea630206",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7427,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 174,
"path": "/V07/image_data.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport os\nimport myutil\nimport myfilter\nimport GenBoneData\n\n# 入力データに関する情報\nimg_width, img_height = 256, 256\nimg_monochrome = True\nimg_normalize = True\n\nif img_monochrome == True:\n img_channel = 1\nelse:\n img_channel = 3\n\ntrain_data_prefix = 'test_'\n\n# イメージを変換するフィルタの設定\nimg_filters1 = [\n #myfilter.gen_img_resize_filter(256, 256),\n #myfilter.gen_img_zoom_filter(1),\n #myfilter.gen_img_move_filter(0, 0)\n myfilter.gen_img_region_filter(),\n ]\n\ndata_filters1 = [\n myfilter.gen_monochrome_filter(),\n myfilter.gen_normalize_filter()\n ]\n\nimg_filters2 = [\n #myfilter.gen_img_resize_filter(256, 256),\n #myfilter.gen_img_zoom_filter(1),\n #myfilter.gen_img_move_filter(0, 0)\n myfilter.gen_img_region_filter(),\n ]\ndata_filters2 = [\n myfilter.gen_monochrome_filter(),\n #myfilter.gen_nega_filter(), # ネガは没\n myfilter.gen_normalize_filter()\n ]\n\ntrain_dir_index = 0\ndef get_train_data_static(target_bone_name):\n '''\n トレーニング用データの生成(既に生成済みの画像から生成する)\n '''\n\n # トレーニング用のデータのディレクトリのリスト\n train_data_dirs = [\n './MyData/train/Other',\n # './MyData/train/Male',\n './MyData/train/Female',\n './MyData/train/MaleNoClothes',\n './MyData/train/FemaleNoClothes'\n ]\n\n # 次に使用するBlenderファイル名を決定\n global train_dir_index\n train_data_dir = train_data_dirs[train_dir_index]\n train_dir_index += 1\n if train_dir_index >= len(train_data_dirs):\n train_dir_index = 0\n\n # データの生成\n train_datagen = myutil.MyDataGenerator(train_data_dir, width=img_width, height=img_height)\n train_list = []\n for i in range(data_start_index, data_end_index, batch_size):\n x_train, y_train = train_datagen.gen_data(i, i+batch_size,\n target_bone=target_bone_name,\n divided=True,\n img_filters=img_filters1,\n data_filters=data_filters1)\n\n train_list.append((x_train, y_train))\n\n return (train_list, train_data_dir)\n\ntrain_blender_index = 0\ndef get_train_data_auto(image_gen_dir, target_bone_name, data_start_index, data_end_index, batch_size):\n '''\n トレーニング用データの生成(動的に画像を生成する)\n '''\n\n csv_file_path = os.path.join(image_gen_dir, 'bone_info.csv')\n\n # トレーニング用のBlenderファイルのリスト\n train_blender_files = [\n #('./HumanMale.blend', img_filters1, data_filters1), 検証用\n #(None, img_filters2, data_filters2),\n ('./Robot_1.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./HumanOther.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./BraidHair.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./Robot_2.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./CasualSuit.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./MaleSuit.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./Robot_3.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./HumanFemale.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./HumanMaleNoClothes.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ('./Robot_2.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n\n ('./HumanFemaleNoClothes.blend', img_filters1, data_filters1, False),\n (None, img_filters2, data_filters2, True),\n ]\n\n # 次に使用するBlenderファイル名を決定\n global train_blender_index\n (train_blender_file, img_filters, data_filters, img_mirror_x) = train_blender_files[train_blender_index]\n train_blender_index += 1\n if train_blender_index >= len(train_blender_files):\n train_blender_index = 0\n\n if train_blender_file != None:\n # データの生成\n GenBoneData.render(train_blender_file, image_gen_dir, train_data_prefix, csv_file_path, data_end_index - data_start_index)\n else:\n train_blender_file = \"\"\n\n # トレーニング用のデータ\n train_datagen = myutil.MyDataGenerator(image_gen_dir, width=img_width, height=img_height)\n train_list = []\n for i in range(data_start_index, data_end_index, batch_size):\n x_train, y_train = train_datagen.gen_data(i, i+batch_size,\n target_bone=target_bone_name,\n divided=True,\n img_mirror_x=img_mirror_x,\n img_filters=img_filters,\n data_filters=data_filters)\n train_list.append((x_train, y_train))\n\n return (train_list, train_blender_file)\n\n\ndef get_validation_data(image_data_path, target_bone_name, start_index, batch_size):\n vali_datagen = myutil.MyDataGenerator(image_data_path, width=img_width, height=img_height)\n x, y = vali_datagen.gen_data(start_index, start_index+batch_size,\n target_bone=target_bone_name,\n divided=True,\n img_filters=img_filters1,\n data_filters=data_filters1)\n return (x, y)\n\ndef get_predict_data(image_data_path, target_bone_name, lavel_file_path,\n start_index, end_index, img_mirror_x=False):\n # 検証用のデータ\n datagen = myutil.MyDataGenerator(image_data_path, width=img_width, height=img_height,\n lavel_file_path=lavel_file_path)\n x, y = datagen.gen_data(start_index, end_index,\n target_bone=target_bone_name,\n divided=True,\n image_only=True,\n img_mirror_x=img_mirror_x,\n img_filters=img_filters1,\n data_filters=data_filters1)\n bone_list = datagen.get_bone_data()\n\n return (x, bone_list)\n"
},
{
"alpha_fraction": 0.4552610516548157,
"alphanum_fraction": 0.4721349775791168,
"avg_line_length": 35.62730026245117,
"blob_id": "385679938db7a01c4c8e2ef57c7fc58037b158db",
"content_id": "64ab56e25b22267cecd8ffada6b4f40e7ce4e1e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25375,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 652,
"path": "/V08/myutil.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nimport numpy as np\nimport csv\nimport copy\nimport os\nimport random\nimport time\nimport myfilter\n\n# ミラー反転した場合に対応するボーン名の辞書\nmirror_dict = {}\nmirror_dict['hips'] = 'hips'\nmirror_dict['spine'] = 'spine'\nmirror_dict['chest01'] = 'chest01'\nmirror_dict['chest02'] = 'chest02'\nmirror_dict['neck'] = 'neck'\nmirror_dict['head'] = 'head'\nmirror_dict['shoulder.L'] = 'shoulder.R'\nmirror_dict['upper_arm.L'] = 'upper_arm.R'\nmirror_dict['forearm.L'] = 'forearm.R'\nmirror_dict['hand.L'] = 'hand.R'\nmirror_dict['thumb.01.L'] = 'thumb.01.R'\nmirror_dict['thumb.02.L'] = 'thumb.02.R'\nmirror_dict['thumb.03.L'] = 'thumb.03.R'\nmirror_dict['palm.01.L'] = 'palm.01.R'\nmirror_dict['f_index.01.L'] = 'f_index.01.R'\nmirror_dict['f_index.02.L'] = 'f_index.02.R'\nmirror_dict['f_index.03.L'] = 'f_index.03.R'\nmirror_dict['palm.02.L'] = 'palm.02.R'\nmirror_dict['f_middle.01.L'] = 'f_middle.01.R'\nmirror_dict['f_middle.02.L'] = 'f_middle.02.R'\nmirror_dict['f_middle.03.L'] = 'f_middle.03.R'\nmirror_dict['palm.03.L'] = 'palm.03.R'\nmirror_dict['f_ring.01.L'] = 'f_ring.01.R'\nmirror_dict['f_ring.02.L'] = 'f_ring.02.R'\nmirror_dict['f_ring.03.L'] = 'f_ring.03.R'\nmirror_dict['palm.04.L'] = 'palm.04.R'\nmirror_dict['f_pinky.01.L'] = 'f_pinky.01.R'\nmirror_dict['f_pinky.02.L'] = 'f_pinky.02.R'\nmirror_dict['f_pinky.03.L'] = 'f_pinky.03.R'\nmirror_dict['shoulder.R'] = 'shoulder.L'\nmirror_dict['upper_arm.R'] = 'upper_arm.L'\nmirror_dict['forearm.R'] = 'forearm.L'\nmirror_dict['hand.R'] = 'hand.L'\nmirror_dict['thumb.01.R'] = 'thumb.01.L'\nmirror_dict['thumb.02.R'] = 'thumb.02.L'\nmirror_dict['thumb.03.R'] = 'thumb.03.L'\nmirror_dict['palm.01.R'] = 'palm.01.L'\nmirror_dict['f_index.01.R'] = 'f_index.01.L'\nmirror_dict['f_index.02.R'] = 'f_index.02.L'\nmirror_dict['f_index.03.R'] = 'f_index.03.L'\nmirror_dict['palm.02.R'] = 'palm.02.L'\nmirror_dict['f_middle.01.R'] = 'f_middle.01.L'\nmirror_dict['f_middle.02.R'] = 'f_middle.02.L'\nmirror_dict['f_middle.03.R'] = 'f_middle.03.L'\nmirror_dict['palm.03.R'] = 'palm.03.L'\nmirror_dict['f_ring.01.R'] = 'f_ring.01.L'\nmirror_dict['f_ring.02.R'] = 'f_ring.02.L'\nmirror_dict['f_ring.03.R'] = 'f_ring.03.L'\nmirror_dict['palm.04.R'] = 'palm.04.L'\nmirror_dict['f_pinky.01.R'] = 'f_pinky.01.L'\nmirror_dict['f_pinky.02.R'] = 'f_pinky.02.L'\nmirror_dict['f_pinky.03.R'] = 'f_pinky.03.L'\nmirror_dict['hips.L'] = 'hips.R'\nmirror_dict['thigh.L'] = 'thigh.R'\nmirror_dict['shin.L'] = 'shin.R'\nmirror_dict['foot.L'] = 'foot.R'\nmirror_dict['toe.L'] = 'toe.R'\nmirror_dict['heel.L'] = 'heel.R'\nmirror_dict['hips.R'] = 'hips.L'\nmirror_dict['thigh.R'] = 'thigh.L'\nmirror_dict['shin.R'] = 'shin.L'\nmirror_dict['foot.R'] = 'foot.L'\nmirror_dict['toe.R'] = 'toe.L'\nmirror_dict['heel.R'] = 'heel.L'\n\n# ボーンの情報を復元するときに使用するボーン名の情報\n# ベジェカーブの情報\ncurve_dict = {}\ncurve_dict['hips'] = ('BezierCurve_Head', 1)\ncurve_dict['spine'] = ('BezierCurve_Head', 2)\ncurve_dict['chest01'] = ('BezierCurve_Head', 3)\ncurve_dict['chest02'] = ('BezierCurve_Head', 4)\ncurve_dict['neck'] = ('BezierCurve_Head', 5)\ncurve_dict['head'] = ('BezierCurve_Head', 6)\n\ncurve_dict['shoulder.L'] = ('BezierCurve_Arm.L', 1)\ncurve_dict['upper_arm.L'] = ('BezierCurve_Arm.L', 2)\ncurve_dict['forearm.L'] = ('BezierCurve_Arm.L', 3)\n\ncurve_dict['shoulder.R'] = ('BezierCurve_Arm.R', 1)\ncurve_dict['upper_arm.R'] = ('BezierCurve_Arm.R', 2)\ncurve_dict['forearm.R'] = ('BezierCurve_Arm.R', 3)\n\ncurve_dict['hips.L'] = ('BezierCurve_Foot.L', 1)\ncurve_dict['thigh.L'] = ('BezierCurve_Foot.L', 2)\ncurve_dict['shin.L'] = ('BezierCurve_Foot.L', 3)\ncurve_dict['foot.L'] = ('BezierCurve_Foot.L', 4)\n\ncurve_dict['hips.R'] = ('BezierCurve_Foot.R', 1)\ncurve_dict['thigh.R'] = ('BezierCurve_Foot.R', 2)\ncurve_dict['shin.R'] = ('BezierCurve_Foot.R', 3)\ncurve_dict['foot.R'] = ('BezierCurve_Foot.R', 4)\n\n\nclass MyDataGenerator(object):\n \n def __init__(self, path, width=256, height=256, prefix='test_', lavel_file_path=None):\n '''\n MyDataGeneraor インスタンスの初期化\n path : 画像格納ディレクトリパス\n width : 画像の幅\n height : 画像の高さ\n prefix : 画像ファイル名のプレフィックス\n level_file : ボーン情報のファイル名\n '''\n self.path = path\n self.width = width\n self.height = height\n self.prefix = prefix\n if lavel_file_path != None:\n self.lavel_file = lavel_file_path\n else:\n self.lavel_file = os.path.join(self.path, 'bone_info.csv')\n\n def get_y(self, start, end, target_bone='ALL'):\n '''\n ボーン情報を読み込む。\n リターンする情報の形式は、[<フレーム数>, <ボーン情報数=ボーン数*3>]\n start : 開始フレーム番号\n end : 終了フレーム番号+1\n '''\n\n # CSVファイルから教師データを読み込む\n bone_dict = dict()\n frame_data_list = []\n with open(self.lavel_file, 'r', encoding='utf8') as f:\n reader = csv.reader(f)\n # ヘッダー情報からボーン数を取得\n head = next(reader)\n bone_num = int(head[1])\n head = next(reader)\n for i in range(bone_num):\n data = next(reader)\n bone_name = data[0]\n parent_name = data[1]\n bone_len_ = data[2]\n bone_index = i\n bone_dict[bone_name] = bone_index\n head = next(reader)\n data_count=0\n for data in reader:\n frame_num = int(data[0])\n if frame_num < start:\n continue\n if frame_num >= end:\n break\n i = 2\n bone_data_list = []\n for bone_count in range(bone_num):\n if not data[i-1] in curve_dict.keys(): # 復元に使用するボーン以外はスキップ\n continue\n if target_bone == 'ALL':\n bone_data_list.append([float(data[i]), float(data[i+1]), float(data[i+2])])\n elif bone_count == bone_dict[target_bone]:\n bone_data_list.append([float(data[i]), float(data[i+1]), float(data[i+2])])\n break\n i+=4\n frame_data_list.append(bone_data_list)\n data_count+=1\n y = np.array(frame_data_list, dtype='float')\n if target_bone == 'ALL':\n # y = y.reshape(end-start, 3*bone_num)\n y = y.reshape(end-start, 3*len(curve_dict))\n else:\n y = y.reshape(end-start, 3)\n\n return y\n\n\n def get_y_divided(self, start, end, target_bone='ALL', img_mirror_x=False):\n '''\n ボーン情報を読み込む。情報はボーン毎に分割する。\n リターンする情報の形式は、[<ボーン数>, <フレーム数>, <ボーン情報数=3>]\n start : 開始フレーム番号\n end : 終了フレーム番号+1\n target_bone : \n '''\n\n # CSVファイルから教師データを読み込む\n bone_dict = dict()\n frame_data_list = []\n with open(self.lavel_file, 'r', encoding='utf8') as f:\n reader = csv.reader(f)\n # ヘッダー情報からボーン数を取得\n head = next(reader)\n bone_num = int(head[1])\n head = next(reader)\n for i in range(bone_num):\n data = next(reader)\n bone_name = data[0]\n parent_name = data[1]\n bone_len_ = data[2]\n bone_index = i\n bone_dict[bone_name] = bone_index\n head = next(reader)\n\n #bone_num = 32 #---------------------------\n\n bone_data_list = []\n if target_bone == 'ALL':\n #for i in range(bone_num):\n for i in range(len(curve_dict)):\n bone_data_list.append([])\n else:\n bone_data_list.append([])\n\n for data in reader:\n frame_num = int(data[0])\n if frame_num < start:\n continue\n if frame_num >= end:\n break\n\n #------------------- img_mirror_x のサポート -----------\n bone_name_list = []\n bone_data_dict = {}\n for column in range(1, bone_num*4+1, 4):\n bone_name_list.append(data[column]) #Bone name の順番\n if img_mirror_x == True:\n bone_data_name = mirror_dict[data[column]] # 左右反対のボーン名\n bone_data = [float(data[column+1]) * (-1), # Bone vecX (X軸にそって反転)\n float(data[column+2]), # Bone vecY\n float(data[column+3])] # Bone vecZ\n else:\n bone_data_name = data[column]\n bone_data = [float(data[column+1]), # Bone vecX\n float(data[column+2]), # Bone vecY\n float(data[column+3])] # Bone vecZ\n bone_data_dict[bone_data_name] = bone_data\n\n data_i = 0\n for bone_name in bone_name_list:\n #if bone_name == 'upper_arm.L':\n # print(\"#### upper_arm.L\")\n # print(\" x: \" + str(bone_data[0]))\n # print(\" y: \" + str(bone_data[1]))\n # print(\" z: \" + str(bone_data[2]))\n #if bone_name == 'upper_arm.R':\n # print(\"#### upper_arm.R\")\n # print(\" x: \" + str(bone_data[0]))\n # print(\" y: \" + str(bone_data[1]))\n # print(\" z: \" + str(bone_data[2]))\n if not bone_name in curve_dict.keys(): # 復元に使用するボーン以外はスキップ\n continue\n if target_bone == 'ALL':\n bone_data = bone_data_dict[bone_name]\n bone_data_list[data_i].append( bone_data )\n data_i += 1\n elif target_bone == bone_name:\n bone_data = bone_data_dict[bone_name]\n bone_data_list[data_i].append( bone_data )\n data_i += 1\n break\n #-------------------------------------------------------\n #data_i = 0\n #for column in range(1, bone_num*4+1, 4):\n # # data[column] Bone name\n # if target_bone == 'ALL':\n # bone_data_list[data_i].append( [float(data[column+1]), # Bone vecX\n # float(data[column+2]), # Bone vecY\n # float(data[column+3])] ) # Bone vecZ\n # data_i += 1\n # elif target_bone == data[column]:\n # bone_data_list[data_i].append( [float(data[column+1]), # Bone vecX\n # float(data[column+2]), # Bone vecY\n # float(data[column+3])] ) # Bone vecZ\n # data_i += 1\n # break\n\n y = []\n for i in range(len(bone_data_list)):\n numpy_data = np.array(bone_data_list[i], dtype='float')\n numpy_data = numpy_data.reshape(end-start, 3)\n y.append(numpy_data)\n\n # 複数に分割した教師データの一番外側はnumpyではなくリストでなければならない\n # y = np.array(y, dtype='float')\n \n return y\n\n def gen_data(self, start, end,\n target_bone='ALL', divided=True, image_only=False,\n img_mirror_x = False, # img_mirror_xはdivided=Trueでしかサポートしていない\n img_filters=[], data_filters=[]):\n '''\n 画像データおよび教師データの読み込み\n x の shape : [<end-start(frames)>, <width>, <height>, <3(RBB)>]\n y の shape : [<end-start(frames)>, <3*bone_num(params)>]\n '''\n\n x = np.array([], dtype='float')\n for i in range(start, end):\n im = Image.open(self.path + '/' + self.prefix + \"{0:0>5}.png\".format(i))\n\n # imgフィルタの適用\n for filter in img_filters:\n im = filter(im)\n # X軸反転適用\n if img_mirror_x == True:\n filter = myfilter.gen_img_mirror_x_filter()\n im = filter(im)\n\n # im.show()\n # time.sleep(2)\n\n img_rgb = im.convert('RGB')\n img_width = img_rgb.size[0]\n img_height = img_rgb.size[1]\n\n img_data = np.array(list(img_rgb.getdata()), dtype='float')\n img_size, img_chanel = img_data.shape\n\n #print(\"width=\" + str(img_width))\n #print(\"height=\" + str(img_height))\n #print(\"chanel=\" + str(img_chanel))\n img_data = img_data.reshape(img_width, img_height, img_chanel)\n\n for filter in data_filters:\n img_data = filter(img_data)\n\n img_width, img_height, img_chanel = img_data.shape # フィルタ適用後のサイズ\n\n x = np.append(x, img_data)\n #show_img(img_data)\n #time.sleep(3)\n\n x = x.reshape(end-start, img_width, img_height, img_chanel)\n\n if image_only == True:\n y = []\n else:\n # CSVファイルから教師データを読み込む\n if divided == True:\n y = self.get_y_divided(start, end, target_bone, img_mirror_x=img_mirror_x)\n #y = self.get_y_divided(start, end, target_bone, img_mirror_x=False)\n #y = self.get_y_divided(start, end, target_bone, img_mirror_x=True)\n else:\n y = self.get_y(start, end, target_bone)\n \n return (x, y)\n\n def gen_timeline_data(self, timestep, start, end, monochrome=False):\n '''\n 画像データおよび教師データの読み込み\n x の shape : [<end-start(frames)>, <timestap>, <width>, <height>, <3(RBB)>]\n y の shape : [<end-start(frames)>, <3*bone_num(params)>]\n '''\n\n data = []\n dummy_data = [[0] * 3] * self.width * self.height\n for i in range(timestep-1):\n data.append(dummy_data)\n\n for i in range(start, end):\n im = Image.open(self.path + '/' + self.prefix + \"{0:0>5}.png\".format(i))\n rgb_im = im.convert('RGB')\n img_data = list(rgb_im.getdata())\n #print(\"### [\" + str(len(img_data)) + \"]\")\n #print(\"### [\" + str(len(img_data[0])) + \"]\")\n data.append(img_data)\n\n #for d in data:\n # print(\"### [\" + str(len(d)) + \"][\" + str(len(d[0])) + \"]\")\n time_dist_data = []\n for i in range(len(data)-timestep+1):\n # time_dist_data.append(copy.deepcopy(data[i:i+timestep]))\n time_dist_data.append(data[i:i+timestep])\n\n x = np.array(time_dist_data, dtype='float')\n x = x / 255\n x = x.reshape(end-start, timestep, self.width, self.height, 3)\n\n if monochrome == True:\n x = x.transpose(4, 0, 1, 2, 3)\n x = (x[0]+x[1]+x[2])/3\n x = x.reshape(end-start, timestep, self.width, self.height, 1)\n\n # CSVファイルから情報を読み込む\n bone_dict = dict()\n frame_data_list = []\n with open(self.lavel_file, 'r', encoding='utf8') as f:\n reader = csv.reader(f)\n # ヘッダー情報からボーン数を取得\n head = next(reader)\n bone_num = int(head[1])\n head = next(reader)\n for i in range(bone_num):\n data = next(reader)\n bone_dict[data[0]] = data\n head = next(reader)\n data_count=0\n for data in reader:\n frame_num = int(data[0])\n if frame_num < start:\n continue\n if frame_num >= end:\n break\n i = 2\n bone_data_list = []\n for bone_count in range(bone_num):\n bone_data_list.append([float(data[i]), float(data[i+1]), float(data[i+2])])\n i+=4\n frame_data_list.append(bone_data_list)\n data_count+=1\n y = np.array(frame_data_list, dtype='float')\n y = y.reshape(end-start, 3*bone_num)\n\n return (x, y)\n \n def gen_timeline_divided_data(self, timestep, start, end, monochrome=False, normalize=True, image_only=False):\n '''\n 画像データおよび教師データの読み込み\n x の shape : [<end-start(frames)>, <timestap>, <width>, <height>, <3(RBB)>]\n y の shape : [<end-start(frames)>, <3*bone_num(params)>, <1>]\n '''\n\n data = []\n dummy_data = [[0] * 3] * self.width * self.height\n for i in range(timestep-1):\n data.append(dummy_data)\n\n for i in range(start, end):\n im = Image.open(self.path + '/' + self.prefix + \"{0:0>5}.png\".format(i))\n rgb_im = im.convert('RGB')\n img_data = list(rgb_im.getdata())\n #print(\"### [\" + str(len(img_data)) + \"]\")\n #print(\"### [\" + str(len(img_data[0])) + \"]\")\n data.append(img_data)\n\n #for d in data:\n # print(\"### [\" + str(len(d)) + \"][\" + str(len(d[0])) + \"]\")\n time_dist_data = []\n for i in range(len(data)-timestep+1):\n # time_dist_data.append(copy.deepcopy(data[i:i+timestep]))\n time_dist_data.append(data[i:i+timestep])\n\n x = np.array(time_dist_data, dtype='float')\n x = x / 255\n x = x.reshape(end-start, timestep, self.width, self.height, 3)\n\n if monochrome == True:\n x = x.transpose(4, 0, 1, 2, 3)\n x = (x[0]+x[1]+x[2])/3\n x = x.reshape(end-start, timestep, self.width, self.height, 1)\n\n if normalize == True:\n for f in range(end-start):\n for t in range(timestep):\n m = x[f, t].max()\n if m > 0.0:\n x[f, t] = x[f, t]/m # 最大を1.0にする\n\n # mask = x[f, t] == 0.0\n # x[f, t][mask] = -1.0 # 画素のない部分を-1.0にする\n\n '''\n 確認のために描画\n if t == 1:\n show_img(x[f, t])\n print(str(x[f, t]))\n print(\"max : \" + str(m))\n time.sleep(5)\n '''\n \n if image_only == True:\n y = []\n else:\n # CSVファイルから情報を読み込む\n bone_dict = dict()\n \n bone_data_list = []\n #frame_data_list = []\n with open(self.lavel_file, 'r', encoding='utf8') as f:\n reader = csv.reader(f)\n # ヘッダー情報からボーン数を取得\n head = next(reader)\n bone_num = int(head[1])\n head = next(reader)\n for i in range(bone_num):\n data = next(reader)\n bone_dict[data[0]] = data\n head = next(reader)\n \n for i in range(bone_num*3):\n bone_data_list.append([])\n \n for data in reader:\n frame_num = int(data[0])\n if frame_num < start:\n continue\n if frame_num >= end:\n break\n data_i = 0\n for column in range(1, bone_num*4+1, 4):\n # data[column] Bone name\n bone_data_list[data_i].append(float(data[column+1])) # Bone vecX\n data_i += 1\n bone_data_list[data_i].append(float(data[column+2])) # Bone vecY\n data_i += 1\n bone_data_list[data_i].append(float(data[column+3])) # Bone vecZ\n data_i += 1\n \n y = []\n for i in range(len(bone_data_list)):\n numpy_data = np.array(bone_data_list[i], dtype='float')\n numpy_data = numpy_data.reshape(end-start)\n y.append(numpy_data)\n\n return (x, y)\n\n\n def get_bone_data(self):\n '''\n leval_file(デフォルトは'bone_info.csv')からボーンの情報を読み込む\n '''\n\n # CSVファイルから情報を読み込む\n bone_list = list()\n with open(self.lavel_file, 'r', encoding='utf8') as f:\n reader = csv.reader(f)\n\n # ヘッダー情報からボーン数を取得\n head = next(reader)\n bone_num = int(head[1])\n\n # ボーンの情報を取得\n head = next(reader)\n for i in range(bone_num):\n # (<bone_name>, <parent_name>, <bone_length>) を追加\n data = next(reader)\n bone_list.append((data[0], data[1], data[2]))\n\n return bone_list\n\n def gen_data_test(self, start, end,\n target_bone='ALL', divided=True, image_only=False,\n img_mirror_x=False,\n img_filters=[], data_filters=[]):\n '''\n 画像データおよび教師データの読み込み\n x の shape : [<end-start(frames)>, <width>, <height>, <3(RBB)>]\n y の shape : [<end-start(frames)>, <3*bone_num(params)>]\n '''\n\n x = np.array([], dtype='float')\n for i in range(start, end):\n im = Image.open(self.path + '/' + self.prefix + \"{0:0>5}.png\".format(i))\n\n # imgフィルタの適用\n for filter in img_filters:\n im = filter(im)\n\n if img_mirror_x == True:\n filter = myfilter.gen_img_mirror_x_filter()\n im = filter(im)\n\n # im.show()\n # time.sleep(2)\n\n img_rgb = im.convert('RGB')\n img_width = img_rgb.size[0]\n img_height = img_rgb.size[1]\n\n img_data = np.array(list(img_rgb.getdata()), dtype='float')\n img_size, img_chanel = img_data.shape\n\n #print(\"width=\" + str(img_width))\n #print(\"height=\" + str(img_height))\n #print(\"chanel=\" + str(img_chanel))\n img_data = img_data.reshape(img_width, img_height, img_chanel)\n\n for filter in data_filters:\n img_data = filter(img_data)\n\n img_width, img_height, img_chanel = img_data.shape # フィルタ適用後のサイズ\n\n #img_data = img_data / 255\n \n x = np.append(x, img_data)\n\n x = x.reshape(end-start, img_width, img_height, img_chanel)\n #print(\"x.shape=\" + str(x.shape))\n\n if image_only == True:\n y = []\n else:\n # CSVファイルから教師データを読み込む\n if divided == True:\n y = self.get_y_divided(start, end, target_bone, img_mirror_x=img_mirror_x)\n else:\n y = self.get_y(start, end, target_bone)\n \n return (x, y)\n\n# sute ???\ndef transform_image(im, resize, move_x, move_y):\n (width, height) = im.size\n\n # リサイズした後の中心を補正\n x = (width/2) / resize - (width/2)\n y = (height/2) / resize - (height/2)\n\n # 移動分を追加\n x = x + (width * move_x)\n y = y + (height * move_y)\n\n # Affine変換のデータ作成\n data = (1/resize, 0, -x, 0, 1/resize, -y)\n\n im2 = im.transform(im.size, Image.AFFINE, data, Image.BILINEAR)\n return im2\n\ndef show_img(img_data):\n width, height, chanel = img_data.shape\n max = img_data.max()\n if max > 1.0:\n img_data = img_data / 255\n if chanel == 1:\n w_data = np.zeros((width, height, 3))\n for w in range(width):\n for h in range(height):\n for c in range(3):\n w_data[w, h, c] = img_data[w, h, 0]\n if img_data[w, h, 0] == 0: # 0 の所はグリーンで表示する\n w_data[w, h, 1] = 1.0\n else:\n #w_data = img_data\n w_data = np.zeros((width, height, chanel))\n for w in range(width):\n for h in range(height):\n is_alpha = True\n for c in range(chanel):\n w_data[w, h, c] = img_data[w, h, c]\n if w_data[w, h, c] != 0:\n is_alpha = False\n if is_alpha == True:\n w_data[w, h, 1] = 1.0 # 0 の所はグリーンで表示する\n\n img = (w_data*255).astype('uint8')\n im = Image.fromarray(img)\n im.show()\n\n\n"
},
{
"alpha_fraction": 0.47606533765792847,
"alphanum_fraction": 0.4931108057498932,
"avg_line_length": 27.73265266418457,
"blob_id": "565059e668273c32616ca974cca9c2d095ab5117",
"content_id": "b84c352c70954043e7ba4e3561c0fbceb4b4e9ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15922,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 490,
"path": "/V09/myfilter.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nimport numpy as np\nimport time\n\n#####################################################\n# デバッグ用サブルーチン\n#####################################################\ndef show_img(img_data):\n '''\n img_dataの画像を表示する\n '''\n width, height, chanel = img_data.shape\n max = img_data.max()\n if max > 1.0:\n img_data = img_data / 255\n if chanel == 1:\n w_data = np.zeros((width, height, 3))\n for w in range(width):\n for h in range(height):\n for c in range(3):\n w_data[w, h, c] = img_data[w, h, 0]\n if img_data[w, h, 0] == 0: # 0 の所はグリーンで表示する\n w_data[w, h, 1] = 1.0\n else:\n #w_data = img_data\n w_data = np.zeros((width, height, chanel))\n for w in range(width):\n for h in range(height):\n is_alpha = True\n for c in range(chanel):\n w_data[w, h, c] = img_data[w, h, c]\n if w_data[w, h, c] != 0:\n is_alpha = False\n if is_alpha == True:\n w_data[w, h, 1] = 1.0 # 0 の所はグリーンで表示する\n\n img = (w_data*255).astype('uint8')\n im = Image.fromarray(img)\n im.show()\n\n\n###############################################\n# フィルタで使用するサブルーチン\n###############################################\ndef image_to_img_data(im):\n '''\n ImageをImg_dataに変換\n [注意] : 返す情報はRGBの3チャネルの情報となる。\n (width, height, chanel)\n '''\n img_rgb = im.convert('RGB')\n img_width = img_rgb.size[0]\n img_height = img_rgb.size[1]\n\n img_data = np.array(list(img_rgb.getdata()), dtype='float')\n img_size, img_chanel = img_data.shape\n\n img_data = img_data.reshape(img_width, img_height, img_chanel)\n\n return img_data\n\ndef img_data_to_image(img_data):\n '''\n img_dataをImageに変換\n [注意] : img_dataがモノクロの場合、3チャネルに変換してImageに変換される。\n '''\n new_width, new_height, new_chanel = img_data.shape\n max = img_data.max()\n if max > 1.0:\n img_wk = img_data / 255\n else:\n img_wk = img_data\n if new_chanel == 1:\n w_data = np.zeros((new_width, new_height, 3))\n for w in range(new_width):\n for h in range(new_height):\n for c in range(3):\n w_data[h, w, c] = img_wk[h, w, 0]\n img_wk = w_data\n\n img = (img_wk*255).astype('uint8')\n im = Image.fromarray(img)\n return im\n\ndef get_region_xy(img_data):\n '''\n 画像が写っている範囲を取得する。\n 次の情報を返す。\n (x_min : x座標の最小値\n x_max : x座標の最大値\n y_min : y座標の最小値\n y_max : y座標の最大値)\n '''\n\n width, height, chanel = img_data.shape\n min_x = -1\n min_y = -1\n max_x = width\n max_y = height\n\n # min_x を探す\n for w in range(width):\n for h in range(height):\n is_data = False\n for c in range(chanel):\n if img_data[h, w, c] != 0:\n is_data = True\n break\n if is_data:\n min_x = w\n break\n if min_x > -1:\n break\n\n # max_x を探す\n for w in range(width-1, -1, -1):\n for h in range(height):\n is_data = False\n for c in range(chanel):\n if img_data[h, w, c] != 0:\n is_data = True\n break\n if is_data:\n max_x = w\n break\n if max_x < width:\n break\n\n # min_y を探す\n for h in range(height):\n for w in range(width):\n is_data = False\n for c in range(chanel):\n if img_data[h, w, c] != 0:\n is_data = True\n break\n if is_data:\n min_y = h\n break\n if min_y > -1:\n break\n\n # max_y を探す\n for h in range(height-1, -1, -1):\n for w in range(width):\n is_data = False\n for c in range(chanel):\n if img_data[h, w, c] != 0:\n is_data = True\n break\n if is_data:\n max_y = h\n break\n if max_y < height:\n break\n\n #print(\"min_x=\"+str(min_x))\n #print(\"max_x=\"+str(max_x))\n #print(\"min_y=\"+str(min_y))\n #print(\"max_y=\"+str(max_y))\n return min_x, max_x, min_y, max_y\n\n###################################################\n# Imageに適用するフィルタのジェネレータ\n###################################################\ndef gen_img_resize_filter(width, height):\n '''\n Imageのサイズを変更するフィルタ\n 画像も拡大/縮小される\n '''\n def filter(img):\n img2 = img.resize((width, height))\n return img2\n return filter\n\ndef gen_img_mirror_x_filter():\n '''\n Imageのサイズはそのままで、画像を拡大/縮小するフィルタ\n '''\n def filter(img):\n org_width, org_height = img.size\n # Affine変換のデータ作成\n data = (-1, 0, org_width, 0, 1, 0)\n img2 = img.transform(img.size, Image.AFFINE, data, Image.NEAREST)\n return img2\n return filter\n\ndef gen_img_zoom_filter(resize):\n '''\n Imageのサイズはそのままで、画像を拡大/縮小するフィルタ\n '''\n def filter(img):\n org_width, org_height = img.size\n # リサイズした後の中心を補正\n x = (org_width/2) / resize - (org_width/2)\n y = (org_height/2) / resize - (org_height/2)\n \n # Affine変換のデータ作成\n data = (1/resize, 0, -x, 0, 1/resize, -y)\n \n img2 = img.transform(img.size, Image.AFFINE, data, Image.NEAREST)\n #img2 = img.transform(img.size, Image.AFFINE, data, Image.BOX)\n #img2 = img.transform(img.size, Image.AFFINE, data, Image.BILINEAR)\n #img2 = img.transform(img.size, Image.AFFINE, data, Image.HAMMING)\n #img2 = img.transform(img.size, Image.AFFINE, data, Image.BICUBIC)\n #img2 = img.transform(img.size, Image.AFFINE, data, Image.LANCZOS)\n\n return img2\n return filter\n\ndef gen_img_move_filter(move_x, move_y):\n '''\n 画像を移動するフィルタ\n '''\n def filter(img):\n org_width, org_height = img.size\n\n # 移動分を追加\n #x = (org_width * move_x)\n #y = (org_height * move_y)\n x = move_x\n y = move_y\n \n # Affine変換のデータ作成\n data = (1, 0, -x, 0, 1, -y)\n \n img2 = img.transform(img.size, Image.AFFINE, data, Image.BILINEAR)\n return img2\n return filter\n\ndef gen_img_region_filter():\n '''\n 画像が写っている範囲を画像のフレーム内に収まる最大まで拡大する。\n '''\n def filter(img):\n # 変更前の情報を取得\n width, height = img.size\n # img_dataを取得\n img_data = image_to_img_data(img)\n # 画像が存在する領域を取得\n min_x, max_x, min_y, max_y = get_region_xy(img_data)\n\n #print(\"min_x=\" + str(min_x))\n #print(\"max_x=\" + str(max_x))\n #print(\"min_y=\" + str(min_y))\n #print(\"max_y=\" + str(max_y))\n\n # 画像が存在する領域の幅と高さ\n new_width = max_x - min_x + 1\n new_height = max_y - min_y + 1\n\n # 画像を中心に移動したときの余白の大きさを求める\n x_margin = int((width - new_width) / 2)\n y_margin = int((height - new_height) / 2)\n\n # 画像を中心に移動\n move_x = x_margin - min_x\n move_y = y_margin - min_y\n move_filter = gen_img_move_filter(move_x, move_y)\n img = move_filter(img)\n\n #print(\"x_margin=\" + str(x_margin))\n #print(\"y_margin=\" + str(y_margin))\n #print(\"move_x=\" + str(move_x))\n #print(\"move_y=\" + str(move_y))\n\n # 画像を領域を枠いっぱいに拡大\n x_zoom = width / new_width\n y_zoom = height / new_height\n if x_zoom < y_zoom:\n zoom = x_zoom\n else:\n zoom = y_zoom\n zoom_filter = gen_img_zoom_filter(zoom)\n img = zoom_filter(img)\n\n return img\n return filter\n\n\n######################################################\n# img_dataに適用するフィルタのジェネレータ\n######################################################\ndef gen_monochrome_filter():\n '''\n 白黒にするフィルタ\n '''\n def filter(img_data):\n width, height, chanel = img_data.shape\n img_data = img_data.transpose(2, 0, 1)\n img_data = (img_data[0]+img_data[1]+img_data[2])/3\n img_data = img_data.reshape(width, height, 1)\n return img_data\n return filter\n\ndef gen_normalize_filter():\n '''\n データの最大値を1.0にするフィルタ\n '''\n def filter(img_data):\n cond = img_data == 0\n max = img_data.max()\n #print(\"max=\" + str(max))\n #m255 = 0\n #m254 = 0\n #m253 = 0\n #for x in range(256):\n # for y in range(256):\n # #print(str(img_data[x][y][0]))\n # if img_data[x][y][0] == 255: m255+=1\n # if img_data[x][y][0] == 254: m254+=1\n # if img_data[x][y][0] == 253: m253+=1\n #print(\"m255=\" + str(m255))\n #print(\"m254=\" + str(m254))\n #print(\"m253=\" + str(m253))\n # print(\"max x:y = \" + str(x) + \":\" + str(y))\n #if max > 0.0:\n # img_data = img_data/max # 最大を1.0にする\n if max > 1.0:\n #print(\"max0=\" + str(max))\n cond255 = img_data == 255\n cond254 = img_data < 254\n max = img_data[cond254].max() # 254以下のMax\n #print(\"max1=\" + str(max))\n img_data[cond255] = max\n # img_data = img_data + (255-max) # 画像の明暗は調整しない\n img_data = img_data/255\n #img_data = img_data/max\n #print(\"max3=\" + str(img_data.max()))\n #print(\"minc=\" + str(img_data.min()))\n else:\n img_data = img_data + (1.0-max)\n\n img_data[cond]=0\n\n #show_img(img_data)\n #time.sleep(3)\n return(img_data)\n return filter\n\ndef gen_nega_filter():\n '''\n 画像の照度を反転(ネガ)するフィルタ\n '''\n def filter(img_data):\n width, height, chanel = img_data.shape\n cond = img_data == 0\n max = img_data.max()\n img_data = img_data * (-1)\n img_data = img_data + max\n img_data[cond]=0\n return img_data\n return filter\n \n\ndef gen_region_filter_old():\n '''\n 画像が写っている範囲を画像のフレーム内に収まる最大まで拡大する。\n [注意] \n このフィルタはimg_dataとImageの変換が行われるので、\n gen_img_region_filter()\n が使用できる場合は、このフィルタを使わず、\n gen_img_region_filter()\n を使用すること。\n [注意] 没となった古いロジック。使用しないこと。\n '''\n def filter(img_data):\n # 変更前の情報を取得\n width, height, chanel = img_data.shape\n # 画像が存在する領域を取得\n min_x, max_x, min_y, max_y = get_region_xy(img_data)\n\n # 画像が存在する領域の幅と高さ\n new_width = max_x - min_x + 1\n new_height = max_y - min_y + 1\n\n # 余分な領域を削除\n del_width = [i for i in range(max_x+1, width)]\n img_data = np.delete(img_data, del_width, 1)\n del_width = [i for i in range(min_x)]\n img_data = np.delete(img_data, del_width, 1)\n\n del_height = [i for i in range(max_y+1, height)]\n img_data = np.delete(img_data, del_height, 0)\n del_height = [i for i in range(min_y)]\n img_data = np.delete(img_data, del_height, 0)\n\n # 元の幅と高さの比率と同じになるように足りない領域を追加\n if new_width/width > new_height/height:\n new_height2 = height * new_width / width\n append_size = new_height2 - new_height\n append_list = np.zeros((int(append_size/2), new_width, chanel))\n img_data = np.append(img_data, append_list, axis=0)\n append_list = np.zeros((int(append_size)-int(append_size/2), new_width, chanel))\n img_data = np.append(append_list, img_data, axis=0)\n else:\n new_width2 = height * new_height / height\n append_size = new_width2 - new_width\n append_list = np.zeros((new_height, int(append_size/2), chanel))\n img_data = np.append(img_data, append_list, axis=1)\n append_list = np.zeros((new_height, int(append_size)-int(append_size/2), chanel))\n img_data = np.append(append_list, img_data, axis=1)\n\n # チャネル数を取得\n new_width, new_height, new_chanel = img_data.shape\n\n # 変換後の情報を元にImageに変換\n max = img_data.max()\n im = img_data_to_image(img_data)\n\n # Imageを元の大きさにリサイズ\n im = im.resize((width, height), Image.NEAREST)\n\n # 元の大きさにしたImageからimg_dataを作成\n img_data = image_to_img_data(im)\n if max <= 1.0:\n img_data = img_data / 255\n\n # 元のチャネル数に戻す\n if new_chanel == 1:\n img_data = gen_monochrome_filter()(img_data)\n\n return img_data\n return filter\n\ndef gen_region_filter():\n '''\n 画像が写っている範囲を画像のフレーム内に収まる最大まで拡大する。\n [注意] \n このフィルタはimg_dataとImageの変換が行われるので、\n gen_img_region_filter()\n が使用できる場合は、このフィルタを使わず、\n gen_img_region_filter()\n を使用すること。\n '''\n def filter(img_data):\n # 変更前の情報を取得\n width, height, chanel = img_data.shape\n # 画像が存在する領域を取得\n min_x, max_x, min_y, max_y = get_region_xy(img_data)\n\n #print(\"min_x=\" + str(min_x))\n #print(\"max_x=\" + str(max_x))\n #print(\"min_y=\" + str(min_y))\n #print(\"max_y=\" + str(max_y))\n\n # 画像が存在する領域の幅と高さ\n new_width = max_x - min_x + 1\n new_height = max_y - min_y + 1\n\n # 画像を中心に移動したときの余白の大きさを求める\n x_margin = int((width - new_width) / 2)\n y_margin = int((height - new_height) / 2)\n\n # img_dataをimageに変換\n max = img_data.max()\n im = img_data_to_image(img_data)\n\n # 画像を中心に移動\n move_x = x_margin - min_x\n move_y = y_margin - min_y\n move_filter = gen_img_move_filter(move_x, move_y)\n im = move_filter(im)\n\n #print(\"x_margin=\" + str(x_margin))\n #print(\"y_margin=\" + str(y_margin))\n #print(\"move_x=\" + str(move_x))\n #print(\"move_y=\" + str(move_y))\n\n # 画像を領域を枠いっぱいに拡大\n x_zoom = width / new_width\n y_zoom = height / new_height\n if x_zoom < y_zoom:\n zoom = x_zoom\n else:\n zoom = y_zoom\n zoom_filter = gen_img_zoom_filter(zoom)\n im = zoom_filter(im)\n\n # 元の大きさにしたImageからimg_dataを作成\n img_data = image_to_img_data(im)\n if max <= 1.0:\n img_data = img_data / 255\n\n # 元のチャネル数に戻す\n if chanel == 1:\n img_data = gen_monochrome_filter()(img_data)\n\n return img_data\n return filter\n\n"
},
{
"alpha_fraction": 0.47140151262283325,
"alphanum_fraction": 0.47556817531585693,
"avg_line_length": 44.91304397583008,
"blob_id": "890decbe0bf154a066999669f5b4d1268f1373e1",
"content_id": "3e914b30e576445ec61a6d71ce2abe1574d3d500",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5604,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 115,
"path": "/20180127/PredictedBoneData.py",
"repo_name": "morio-kino/hello-world",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport csv\n\nclass PredictedBoneData(object):\n\n def __init__(self, bone_list, predict_data, target_bone_name, target_bone_num):\n '''\n [引数]\n lone_list : (<ボーン名>, <親ボーン名>, <ボーンの長さ>) のリスト\n predict_data : predict_data[<ボーンインデックス>][<データ番号>][<XYZの番号>] = -1.0 ~ 1.0\n [変数]\n bone_num : ボーン数\n data_num : データ数\n bone_name_list : ボーン名のリスト。データ出力時はこの順に出力する。\n bone_info_dict : ボーン名 => (<ボーン名>, <親ボーン名>, <ボーンの長さ>) の辞書。\n bone_data_dict : ボーン名 => [(<vecX>, <vecY>, <vecZ>), ...] データ数分格納したリスト の辞書\n '''\n self.bone_list = bone_list\n self.predict_data = predict_data\n\n #self.bone_num = len(self.bone_list)\n self.target_bone_name = target_bone_name\n self.target_bone_num = target_bone_num\n if self.target_bone_num == 1:\n self.data_num = len(self.predict_data)\n else:\n self.data_num = len(self.predict_data[0])\n\n self.bone_name_list = list()\n self.bone_info_dict = dict()\n self.bone_data_dict = dict()\n\n #print(\"##### \" + str(len(predict_data)))\n #print(\"##### \" + str(len(predict_data[0])))\n for bone_index in range(self.target_bone_num):\n bone_name, parent_name, bone_length = self.bone_list[bone_index]\n self.bone_name_list.append(bone_name)\n self.bone_info_dict[bone_name] = (bone_name, parent_name, bone_length)\n\n bone_data_list = list()\n for data_index in range(self.data_num):\n if self.target_bone_num == 1:\n bone_data_list.append((self.predict_data[data_index][0],\n self.predict_data[data_index][1],\n self.predict_data[data_index][2]))\n self.bone_data_dict[target_bone_name] = bone_data_list\n else:\n bone_data_list.append((self.predict_data[bone_index][data_index][0],\n self.predict_data[bone_index][data_index][1],\n self.predict_data[bone_index][data_index][2]))\n self.bone_data_dict[bone_name] = bone_data_list\n\n def write_to_csv(self, out_file, converter=None):\n '''\n 予測した情報をファイルに出力する。\n '''\n\n with open(out_file, 'w', newline='') as f:\n writer = csv.writer(f)\n \n # Boneのヘッダー出力\n writer.writerow(['[Bone num]', str(self.target_bone_num)])\n writer.writerow(['[Bone name]', '[Parent name]', '[Bone length]'])\n # Boneの長さの情報を出力\n for bone_name in self.bone_name_list:\n writer.writerow(self.bone_info_dict[bone_name])\n #for bone_info in self.bone_list:\n # writer.writerow(bone_info)\n \n # フレームのヘッダー情報出力\n bone_header = []\n bone_header.append('[Frame No.]')\n for bone_index in range(1, self.target_bone_num+1):\n bone_header.append('[Bone' + str(bone_index) + ' name]')\n bone_header.append('[Bone' + str(bone_index) + ' vecX]')\n bone_header.append('[Bone' + str(bone_index) + ' vecY]')\n bone_header.append('[Bone' + str(bone_index) + ' vecZ]')\n writer.writerow(bone_header)\n \n #data_len = len(self.predict_data[0])\n for data_index in range(self.data_num):\n data = list()\n data.append(str(data_index+1)) # Frame No.\n for bone_name in self.bone_name_list:\n data.append(bone_name) # Bone<N> name\n\n bone_data_list = self.bone_data_dict[bone_name]\n vecX, vecY, vecZ = bone_data_list[data_index]\n #print(\"### bone name : \" + bone_name)\n #print(\"old_X = \" + str(vecX))\n #print(\"old_Y = \" + str(vecY))\n #print(\"old_Z = \" + str(vecZ))\n if converter != None:\n vecX, vecY, vecZ = converter(vecX, vecY, vecZ)\n #print(\"new_X = \" + str(vecX))\n #print(\"new_Y = \" + str(vecY))\n #print(\"new_Z = \" + str(vecZ))\n data.append(str(vecX)) # Bone<N> vecX\n data.append(str(vecY)) # Bone<N> vecY\n data.append(str(vecZ)) # Bone<N> vecZ\n writer.writerow(data)\n\n '''\n for bone_index in range(self.target_bone_num):\n bone_name, parant_name, bone_length = self.bone_list[bone_index]\n data.append(bone_name) # Bone<N> name\n vexX = self.predict_data[bone_index][i][0]\n vexY = self.predict_data[bone_index][i][1]\n vexZ = self.predict_data[bone_index][i][2]\n data.append(str(vexX)) # Bone<N> vecX\n data.append(str(vexY)) # Bone<N> vecY\n data.append(str(vexZ)) # Bone<N> vecZ\n writer.writerow(data)\n '''\n"
}
] | 24 |
BenRemer/Traveling-Salesman-Problem | https://github.com/BenRemer/Traveling-Salesman-Problem | d186f656b368facb22320989f5ca97aa0ca75136 | 398d2a6ee3e8829b76e2d9f6ba97838937b6baba | e0e0bd2e7cfe2a19feb237a1925371d32f28bb3d | refs/heads/master | 2020-05-04T05:13:21.847957 | 2019-04-19T20:45:09 | 2019-04-19T20:45:09 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6347253322601318,
"alphanum_fraction": 0.643909215927124,
"avg_line_length": 35.07500076293945,
"blob_id": "838c583376ddd88978a61d02513df51a37036a48",
"content_id": "488a2837736d1aa784766a726b1c1cf0a2981ea7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5771,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 160,
"path": "/tsp-3510.py",
"repo_name": "BenRemer/Traveling-Salesman-Problem",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport operator\nimport random\nimport sys\nimport multiprocessing\nimport time\nimport objects\n\n# Randomizes the cities and returns as a route\ndef create_route(cities):\n size = len(cities)\n route = random.sample(cities, size)\n # print(route, \"\\n\")\n return route\n\n# Creates a population of size size of routes\ndef initial_population(size, cities):\n population = []\n for i in range(size):\n population.append(create_route(cities))\n return population\n\n# Ranks each route in the population and sorts it by their fitness\ndef rank_routes(population):\n fitness = {}\n for i in range(len(population)):\n fitness[i] = objects.Fitness(population[i]).route_fitness()\n sorted_list = sorted(fitness.items(), key = operator.itemgetter(1), reverse = True) \n # print(sorted_list,'\\n')\n return sorted_list\n\n# Selects a population of routes and saves selected indexes routes \ndef mate(ranked_population, save_size, population):\n selected = []\n array = np.array(ranked_population)\n # print(array)\n cumulative_sum = []\n cumulative_percent = []\n fitness_sum = 0\n for i in range(len(ranked_population)):\n cumulative_sum.append(ranked_population[i][1] + fitness_sum)\n fitness_sum += ranked_population[i][1]\n # print(cumulative_sum)\n for i in range(len(ranked_population)):\n cumulative_percent.append((cumulative_sum[i] * 100) / fitness_sum)\n # print(cumulative_percent)\n for i in range(save_size): # Save the top 'save_size' contenders\n selected.append(ranked_population[i][0])\n for i in range(len(ranked_population) - save_size):\n random_pick = random.random() * 100\n for i in range(len(ranked_population)):\n if random_pick <= cumulative_percent[i]:\n selected.append(ranked_population[i][0])\n break\n # print(select)\n pool = []\n for i in range(len(selected)):\n index = selected[i]\n # print(population[index])\n pool.append(population[index])\n # print(pool)\n return pool\n\n# Takes two parents, get random 'genes' from them and splice them together in the same order\ndef breed(father, mother):\n child = []\n genes_father = []\n genes_mother = []\n gene1 = int(random.random() * len(father))\n gene2 = int(random.random() * len(mother))\n start = min(gene1, gene2)\n end = max(gene1, gene2)\n for i in range(start, end):\n genes_father.append(father[i])\n # print(genes_father)\n genes_mother = [item for item in mother if item not in genes_father] # Keep genes not being kept in father's\n # print(genes_mother)\n child = genes_father + genes_mother\n # print(child)\n return child\n\n# Runs breeding function over the entire population but saving 'save_selection' of top routs without changing\ndef breed_pop(pool, save_size):\n children = []\n keep = len(pool) - save_size\n selection = random.sample(pool, len(pool))\n for i in range(save_size):\n children.append(pool[i])\n for i in range(keep):\n child = breed(selection[i], selection[len(pool) - i - 1])\n children.append(child)\n return children\n\n# Randomly 'mutates' a child by swapping random locations\ndef mutate(child, mutation_rate):\n route_size = len(child)\n for i in range(route_size):\n if (random.random() < mutation_rate):\n j = int(random.random() * len(child))\n city1 = child[i]\n city2 = child[j]\n child[j] = city1\n child[i] = city2\n return child\n\n# Runs mutate function over entire population\ndef mutate_pop(population, mutation_rate):\n mutated = []\n route_size = len(population)\n for i in range(route_size):\n child = mutate(population[i], mutation_rate)\n mutated.append(child)\n return mutated\n\n# Takes a generation, ranks them, mates them and saves the next generation\ndef next_generation(current_generation, save_size, mutation_rate):\n pop_ranked = rank_routes(current_generation)\n pool = mate(pop_ranked, save_size, current_generation)\n children = breed_pop(pool, save_size)\n next_generation = mutate_pop(children, mutation_rate)\n return next_generation\n\n# Runs program over certain time limit or till all generations have been made\ndef find_tsp(population, pop_size, save_size, mutation_rate, generations, t_end):\n current_gen = initial_population(pop_size, population)\n for i in range(generations):\n if time.time() >= t_end:\n print('Time limit exceeded. Ending Program.')\n break\n current_gen = next_generation(current_gen, save_size, mutation_rate)\n best_route = current_gen[rank_routes(current_gen)[0][0]]\n distance = str(1 / rank_routes(current_gen)[0][1])\n return best_route, distance\n\n# Create timer and run code\ndef main():\n if len(sys.argv) < 4:\n print('Need correct args, run with: \\ngenetic.py <input> <output> <time>')\n return\n t_end = time.time() + int(sys.argv[3]) - 1\n cityList = []\n input_file = sys.argv[1]\n output_file = sys.argv[2]\n f = open(input_file, \"r\") # Open file\n for line in f:\n line = line.strip('\\n') # Strip off new line character\n split = line.split(\" \", 3) # Split into three parts\n cityList.append(objects.City(x = int(float(split[1])), y = int(float(split[2])), node_id = int(split[0])))\n f.close()\n print('Running for ' + sys.argv[3] + ' seconds.')\n rout, distance = find_tsp(population = cityList, pop_size = 250, save_size = 20, mutation_rate = 0.01, generations = 150000, t_end = t_end)\n f = open(output_file, 'w')\n f.write(distance + '\\n')\n for city in rout:\n f.write(str(city))\n f.write(' ')\n f.write(str(rout[0]))\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.581267237663269,
"alphanum_fraction": 0.6005509495735168,
"avg_line_length": 19.961538314819336,
"blob_id": "ce66c695dc39b4ef4a1ccc8e04de49c92c092bec",
"content_id": "70432d573c760a3cdb9385588f92aabda35e7741",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 52,
"path": "/race.py",
"repo_name": "BenRemer/Traveling-Salesman-Problem",
"src_encoding": "UTF-8",
"text": "from timeit import default_timer as timer\nimport timeit\nimport time\nimport numpy as np\n\ndef timerStupid():\n start = time.time()\n print(\"hello\")\n end = time.time()\n print(\"Run-time for algo one\" + end - start)\n\ndef timerSmarter():\n #\n print(timeit.timeit(stmt=func1, number=10000))\n print(timeit.timeit(stmt=func2, number=10000))\n\ndef func1():\n print(\"hello\")\n _norm= np.linalg.norm()\n\n #args will need to be hardcoded here\n\ndef func2():\n print(\"hello\")\n #args will need to be hardocded here\n\ndef main():\n\t# input_file = sys.argv[1]\n\t# output_file = sys.argv[2]\n\t# time = sys.argv[3]\n\t# points = read_file(input_file)\n\t# for point in range(1,len(points)):\n\t# \tprint(point, ':' , points[point])\n\t#event = threading.Event\n\t#for(i)\n\n start = time.time()\n\n f = open(\"tsp.txt\", \"r\")\n lines = f.readlines()\n d = open(\"output.txt\", \"w\")\n for line in lines:\n d.write(line)\n\n while(time.time() - start < 10):\n x = 1\n end = time.time()\n print(\"Run-time for algo one\" + str(end - start))\n\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5197182893753052,
"alphanum_fraction": 0.5281690359115601,
"avg_line_length": 28.58333396911621,
"blob_id": "0e5bbf410756d74c484a32fba65c56a2096d1be1",
"content_id": "4471acf9486b75c0446aca974fa3fe445f67d10c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1420,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 48,
"path": "/objects.py",
"repo_name": "BenRemer/Traveling-Salesman-Problem",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n# City object\nclass City:\n # Initializes the object\n def __init__(self, x, y, node_id):\n self.x = x\n self.y = y\n self.node_id = node_id\n\n # Finds distance between two cities\n def distance(self, city):\n dist_x = abs(self.x - city.x)\n dsit_y = abs(self.y - city.y)\n distance = np.sqrt((dist_x ** 2) + (dsit_y ** 2))\n return distance\n # Sends over the node ID\n def __repr__(self):\n return str(self.node_id)\n\n# Crates a fitness object to see how good the route is\nclass Fitness:\n # Initializes and sets everything to 0\n def __init__(self, route):\n self.route = route\n self.distance = 0\n self.fitness = 0\n\n # Gets distance for the route\n def route_distance(self):\n if self.distance == 0:\n dist = 0\n for i in range(len(self.route)):\n c_from = self.route[i]\n c_to = None\n if (i + 1) < len(self.route):\n c_to = self.route[i + 1]\n else:\n c_to = self.route[0]\n dist += c_from.distance(c_to)\n self.distance = dist\n return self.distance\n\n # Calculates the fitness of the route, higher is better\n def route_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / float(self.route_distance())\n return self.fitness\n"
},
{
"alpha_fraction": 0.7581989765167236,
"alphanum_fraction": 0.7687604427337646,
"avg_line_length": 68.19230651855469,
"blob_id": "5df175877b14d1efbf34b5e720b9bd541db18bbd",
"content_id": "02709e57666c89027f476ad83a88e832a1d4b0f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1811,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 26,
"path": "/README.txt",
"repo_name": "BenRemer/Traveling-Salesman-Problem",
"src_encoding": "UTF-8",
"text": "Name of Group Members: Joshua Vayle and Ben Remer\nEmail addresses: [email protected] and [email protected] \nDate of Submission: 04/19/2019\n\n\nFiles submitted:\n tsp-3510.py → the main file that contains not only the main method (find_tsp) but also all of the methods for the genetic algorithm. This is the most important file in the project submission\n objects.py → this is the object file that holds the object definitions for City (our name for the vertex object) and Fitness (which stores the route, its distance, and the fitness of the route (fitness = 1/distance of the route)\n README.txt → this file is the one you have open right now and contains a small overview of the project and the instructions on how to run it\n Algorithm.pdf → a small writeup on how the algorithm works and our rationale for choosing a genetic algorithm\n mat-output.txt → the output file from the 30 node test case\n\nExecution Instructions\nto run our project simply:\n0) install numpy if not installed by running \"pip install numpy\"\n1) open the command line\n2) using cd command navigate to the location of the project\n3) run the initialization command as specified in the instructions:\n <project file path>: genetic <input-coordinates.txt> <output-tour.txt> <time>\n\nKnown Bugs/Limitations:\nThe only possible limitation is that since the algorithm does do random breeding and mutation the solution route that it\nfinds will be inconsistent. In other words it might not find the same solution every time. This means it is\ntheoretically possible that our algorithm fails to get under the threshold (min total cycle cost) one time you run it,\nbut very well might get it the next time. Therefore, it might be necessary to run the algorithm multiple times to get an\n idea of the average solution it is coming up with.\n"
}
] | 4 |
jacobabello/shipments | https://github.com/jacobabello/shipments | fef78f047ae8b673b05a0df5c007b6093e9de95c | 49b8c1de880c8ddead280c9dde5c6cd78c66edd3 | 76357af20582ce54c36f06aaf1b35da9b620ce65 | refs/heads/master | 2020-03-23T00:45:57.634709 | 2018-07-15T00:19:08 | 2018-07-15T00:19:08 | 140,884,368 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5784558057785034,
"alphanum_fraction": 0.5921544432640076,
"avg_line_length": 33.91304397583008,
"blob_id": "bb2fcbd26a23e0187f6ce1c09a40dd1dcc88496a",
"content_id": "ec25006c8aa0867425aa34e306aa17dcc52ac180",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1606,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 46,
"path": "/shipments/billoflading.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from shipments.shipment_component import ShipmentComponent\n\n\nclass BillOfLading(ShipmentComponent):\n\n __billtype__ = {\n '02': 'Regular Bill',\n '03': 'In-bond Automated',\n '05': 'Empty Equipment IIT',\n '13': 'Master Bill',\n '14': 'House Bill',\n '15': 'Master FROB',\n '16': 'House FROB',\n '17': 'Simple BOL FROB',\n '18': 'Master BOL w/in-bond'\n }\n\n def __init__(self, billoflading_number, bill_type_code, voyage_number):\n\n if bill_type_code not in self.__billtype__.keys():\n raise ValueError('Bill type code %s is not recognized' % bill_type_code)\n\n self.billoflading_number = billoflading_number\n self.bill_type_code = bill_type_code\n self.voyage_number = voyage_number\n self.master_billoflading_number = None\n\n def generate_json(self):\n\n json_response = {\n 'Bill of lading number': self.billoflading_number,\n 'Bill type': self.__billtype__[self.bill_type_code],\n 'Voyage number': self.voyage_number\n }\n\n if self.bill_type_code in ('14', '16'):\n if self.master_billoflading_number is None:\n raise ValueError('House bill of lading detected please add master '\n 'bill of lading number: add_master_bill_of_lading(eeee)')\n else:\n json_response['Master bill of lading number'] = self.master_billoflading_number\n\n return json_response\n\n def add_master_bill_of_lading_number(self, master_id):\n self.master_billoflading_number = master_id\n"
},
{
"alpha_fraction": 0.615476667881012,
"alphanum_fraction": 0.615476667881012,
"avg_line_length": 23.578432083129883,
"blob_id": "c2ef51843315ef227b17f86a17c1941c880ef5a5",
"content_id": "418b0b85a0d95f3165be8ebf12264be5dbecb3bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2507,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 102,
"path": "/shipments/manifest.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from shipments.carrier import Carrier\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass Manifest(ShipmentComponent):\n\n def generate_json(self):\n return {\n 'Carrier': self.carrier.generate_json(),\n 'Vessel name': self.get_vessel_name(),\n 'Port of unlading': self.get_port_of_unlading(),\n 'Foreign port of lading': self.get_foreign_port_of_lading(),\n 'Manifest Quantity': self.get_manifest_quantity(),\n 'Manifest Unit': self.get_manifest_unit(),\n 'Weight': self.get_weight(),\n 'Weight Unit': self.get_weight_unit(),\n 'Measurement': self.get_measurement(),\n 'Measurement Unit': self.get_measurement_unit()\n }\n\n def set_carrier_code(self, carrier_code):\n self.carrier = Carrier(carrier_code)\n\n return self\n\n def set_vessel_name(self, vessel_name):\n self.vessel_name = vessel_name\n\n return self\n\n def set_port_of_unlading(self, port):\n self.port_of_unlading = port\n\n return self\n\n def set_foreign_port_of_lading(self, port):\n self.port_of_lading = port\n\n return self\n\n def set_manifest_quantity(self, quantity):\n self.manifest_quantity = quantity\n\n return self\n\n def set_manifest_quantity_unit(self, unit):\n self.manifest_quantity_unit = unit\n\n return self\n\n def set_weigh(self, weight):\n self.weight = weight\n\n return self\n\n def set_weigh_unit(self, unit):\n self.weight_unit = unit\n\n return self\n\n def set_measurement(self, measurement):\n self.measurement = measurement\n\n return self\n\n def set_measurement_unit(self, unit):\n self.measurement_unit = unit\n\n return self\n\n def get_carrier(self):\n \"\"\"\n :rtype: Carrier\n \"\"\"\n return self.carrier\n\n def get_vessel_name(self):\n return self.vessel_name\n\n def get_port_of_unlading(self):\n return self.port_of_unlading\n\n def get_foreign_port_of_lading(self):\n return self.port_of_lading\n\n def get_manifest_quantity(self):\n return self.manifest_quantity\n\n def get_manifest_unit(self):\n return self.manifest_quantity_unit\n\n def get_weight(self):\n return self.weight\n\n def get_weight_unit(self):\n return self.weight_unit\n\n def get_measurement(self):\n return self.measurement\n\n def get_measurement_unit(self):\n return self.measurement_unit\n"
},
{
"alpha_fraction": 0.6553080081939697,
"alphanum_fraction": 0.6553080081939697,
"avg_line_length": 28.921567916870117,
"blob_id": "885a8f31f37167a252e208b3e6366b1bf24264e5",
"content_id": "bbdd3a83be49acdad03efdceb5c73b9c6bb293ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1526,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 51,
"path": "/shipments/shipment.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from shipments.billoflading import BillOfLading\nfrom shipments.container import Container\nfrom shipments.manifest import Manifest\nfrom shipments.company import Company\n\n\ndef type_hint(object, object_type):\n if (type(object)) != object_type:\n raise ValueError('object %s is not type of %s' % (object, object_type))\n\n\nclass Shipment(object):\n\n def __init__(self, billoflading, manifest):\n type_hint(billoflading, BillOfLading)\n type_hint(manifest, Manifest)\n\n self.billoflading = billoflading\n self.manifest = manifest\n self.containers = {}\n self.company = {}\n\n def add_container(self, container_number):\n container = Container(container_number)\n self.containers.update({container_number: container})\n\n return container\n\n def get_container_by_container_number(self, container_number):\n if container_number not in self.containers.keys():\n raise ValueError('Container number %s is missing' % container_number)\n\n return self.containers[container_number]\n\n def add_company(self, company_type, name, address):\n\n if company_type not in ('consignee', 'shipper'):\n raise ValueError('Company Type %s is invalid' % company_type)\n\n company = Company(name, address)\n self.company.update({\n company_type: company\n })\n\n return company\n\n def get_company_by_type(self, company_type):\n \"\"\"\n :rtype: Company\n \"\"\"\n return self.company[company_type]\n"
},
{
"alpha_fraction": 0.6544502377510071,
"alphanum_fraction": 0.6544502377510071,
"avg_line_length": 18.200000762939453,
"blob_id": "b53db910531f2d86f6fd064b45874e2e2ecd370a",
"content_id": "0fa37000878b317e688b4db4cb85a37bf38604e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 10,
"path": "/setup.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nsetup(\n name='shipment',\n description='Simple OOP representation of shipments components',\n author='[email protected]',\n packages=[\n 'shipments'\n ]\n)"
},
{
"alpha_fraction": 0.533707857131958,
"alphanum_fraction": 0.540730357170105,
"avg_line_length": 25.370370864868164,
"blob_id": "9289e501d6d61007bb871025b0f69a94b087152d",
"content_id": "138ed80c12679e90a60578286001eac290df677c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1424,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 54,
"path": "/shipments/carrier.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import os\nimport sqlite3\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass Carrier(ShipmentComponent):\n\n def __init__(self, carrier_code):\n con = sqlite3.connect(os.path.dirname(os.path.abspath(__file__)) + \"/../sqlite/carrier_codes.sqlite\",\n isolation_level=None)\n\n cur = con.cursor()\n\n cur.execute(\"SELECT * \"\n \"FROM carrier_codes_2 \"\n \"WHERE carrier_codes_2.scac = \\\"%s\\\"\" % carrier_code)\n\n result = cur.fetchone()\n\n if result is not None:\n self.scac_code = str(result[1]).upper()\n self.name = result[3].upper()\n self.address = result[4].upper()\n self.city = result[5].upper()\n self.state = result[6].upper()\n self.country = result[8].upper()\n\n def generate_json(self):\n return {\n 'scac_code': self.scac_code,\n 'name': self.name,\n 'address': self.address,\n 'city': self.city,\n 'state': self.state,\n 'country': self.country\n }\n\n def get_scac_code(self):\n return self.scac_code\n\n def get_name(self):\n return self.name\n\n def get_address(self):\n return self.address\n\n def get_city(self):\n return self.city\n\n def get_state(self):\n return self.state\n\n def get_country(self):\n return self.country\n"
},
{
"alpha_fraction": 0.5935335159301758,
"alphanum_fraction": 0.5935335159301758,
"avg_line_length": 20.649999618530273,
"blob_id": "2fde880fb3bcbb8860d337b9687b341966ac6f90",
"content_id": "79f670869a2e78ed963078c9f4cacc06862f1add",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 20,
"path": "/shipments/company.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from shipments.shipment_component import ShipmentComponent\n\n\nclass Company(ShipmentComponent):\n\n def __init__(self, name, address):\n self.name = name\n self.address = address\n\n def generate_json(self):\n return {\n 'Name': self.get_name(),\n 'Address': self.get_address()\n }\n\n def get_name(self):\n return self.name\n\n def get_address(self):\n return self.address\n"
},
{
"alpha_fraction": 0.765625,
"alphanum_fraction": 0.765625,
"avg_line_length": 12,
"blob_id": "58c9efcc65c80b9e66aa0bac0dae2cdc71d897d4",
"content_id": "00babd9cd7af5a07f7f6b32cf30c3c78338d4a3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 5,
"path": "/tests/test_hazmat.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\n\n\nclass test_hazmat(unittest.TestCase):\n pass"
},
{
"alpha_fraction": 0.6397355794906616,
"alphanum_fraction": 0.6448769569396973,
"avg_line_length": 47.625,
"blob_id": "1c70ae37787d0d1e9ebf0f2217169ec79ea069b5",
"content_id": "a431a82b08810fbbd178572cbbe67ac53343c374",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2723,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 56,
"path": "/tests/test_main_shipment.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom shipments import generate_shipment\nfrom shipments.container import Container\nfrom shipments.shipment import Shipment, Manifest, BillOfLading\n\n\nclass test_main_shipment(unittest.TestCase):\n def test_generate_shipment_should_return_shipment_obj(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n self.assertIsInstance(shipment, Shipment)\n\n def test_shipment_can_be_initialized_by_using_billoflading_and_manifest(self):\n with self.assertRaises(ValueError):\n generate_shipment(None, Manifest)\n with self.assertRaises(ValueError):\n generate_shipment(BillOfLading, None)\n\n def test_shipment_can_add_container(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n\n container = shipment.add_container('containernumber')\n self.assertIsInstance(container, Container)\n\n def test_can_get_container_object_by_container_number(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n\n container = shipment.add_container('containernumber')\n self.assertEqual(shipment.get_container_by_container_number('containernumber'), container)\n\n def test_throw_exception_if_container_number_is_missing(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n\n with self.assertRaises(ValueError):\n shipment.get_container_by_container_number('containernumber')\n\n def test_add_company_type_consignee(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n consignee = shipment.add_company('consignee', 'Consignee Name', 'Consignee Address')\n self.assertEqual(shipment.get_company_by_type('consignee'), consignee)\n\n def test_add_company_type_shipper(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n shipper = shipment.add_company('shipper', 'shipper Name', 'shipper Address')\n self.assertEqual(shipment.get_company_by_type('shipper'), shipper)\n\n def test_cannot_add_invalid_company_type(self):\n shipment = generate_shipment(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'),\n Manifest())\n with self.assertRaises(ValueError):\n shipment.add_company('invalid_company_type', 'invalid Name', 'invalid Address')\n"
},
{
"alpha_fraction": 0.5672857761383057,
"alphanum_fraction": 0.567611575126648,
"avg_line_length": 27.155963897705078,
"blob_id": "1d3567842f14aa3d07cc2741b2df8ee403f2ee98",
"content_id": "c976cf8d90e370f213be77b8f396c9b414b8af27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3079,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 109,
"path": "/shipments/container.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from shipments.cargo import Cargo\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass Container(ShipmentComponent):\n\n __type_of_services__ = {\n 'BB': 'Break Bulk',\n 'CS': 'Container Station',\n 'CY': 'Container Yard',\n 'HH': 'House‑to‑House',\n 'HL': 'Headload or Devanning',\n 'HP': 'House‑to‑Pier',\n 'MD': 'Mixed Delivery',\n 'NC': 'Non Containerized',\n 'PH': 'Pier to House',\n 'PP': 'Pier to Pier',\n 'RR': 'Roll on ‑ Roll Off'\n }\n\n def __init__(self, container_number):\n self.container_number = container_number\n self.cargoes = {}\n self.list_of_cargoes = []\n\n def generate_json(self):\n json_data = {\n 'Container Number': self.container_number,\n 'Length': self.get_length(),\n 'Height': self.get_height(),\n 'Width': self.get_width(),\n 'Load status': self.get_load_status(),\n 'Type of service': self.get_type_of_service()\n }\n\n json_cargoes = []\n\n if len(self.list_of_cargoes) > 0:\n for cargo in self.list_of_cargoes:\n json_cargoes.append({\n 'Product description': cargo.get_product_description(),\n 'Piece count': cargo.get_piece_count()\n })\n\n json_data.update({'Cargoes': json_cargoes})\n\n return json_data\n\n def set_length(self, lenght):\n self.lenght = lenght\n return self\n\n def set_height(self, height):\n self.height = height\n return self\n\n def set_width(self, width):\n self.width = width\n return self\n\n def set_load_status(self, load_status):\n self.load_status = load_status\n return self\n\n def set_type_of_service(self, type_of_service):\n\n if type_of_service not in self.__type_of_services__.keys():\n raise ValueError('Type Of Service %s is invalid' % type_of_service)\n\n self.type_of_service = type_of_service\n return self\n\n def get_length(self):\n return self.lenght\n\n def get_height(self):\n return self.height\n\n def get_width(self):\n return self.width\n\n def get_load_status(self):\n return self.load_status\n\n def get_type_of_service(self):\n return self.__type_of_services__[self.type_of_service]\n\n def add_cargo(self, sequence_number, product_description, piece_count):\n \"\"\"\n :rtype: Cargo\n \"\"\"\n if sequence_number in self.cargoes.keys():\n raise ValueError('Sequence Number %s is already added' % sequence_number )\n\n cargo = Cargo(sequence_number, product_description, piece_count)\n self.cargoes.update({sequence_number: cargo})\n self.list_of_cargoes.append(cargo)\n\n def get_cargo_by_sequence_number(self, sequence_number):\n \"\"\"\n :rtype: Cargo\n \"\"\"\n return self.cargoes[sequence_number]\n\n def get_all_cargoes(self):\n \"\"\"\n @rtype list of Cargo\n \"\"\"\n return self.list_of_cargoes\n"
},
{
"alpha_fraction": 0.6242004036903381,
"alphanum_fraction": 0.6305969953536987,
"avg_line_length": 36.52000045776367,
"blob_id": "49c59dc13c37485f07009361c0f6c130158de072",
"content_id": "37de122f466662278d085eacbcefc960e8a4350b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1876,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 50,
"path": "/tests/test_billoflading.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom shipments.billoflading import BillOfLading\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass test_billoflading(unittest.TestCase):\n def test_bill_of_lading_should_be_object_of_components(self):\n self.assertIsInstance(BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE'), ShipmentComponent)\n\n def test_bill_type_code_not_recognize(self):\n with self.assertRaises(ValueError):\n BillOfLading('SAMPLEID', '99', 'SAMPLEVOYAGE')\n\n def test_generate_json_simple_bill_of_lading(self):\n real_json = {\n 'Bill of lading number': 'SAMPLEID',\n 'Bill type': 'Regular Bill',\n 'Voyage number': 'SAMPLEVOYAGE'\n }\n\n bill_of_lading = BillOfLading('SAMPLEID', '02', 'SAMPLEVOYAGE')\n self.assertEqual(bill_of_lading.generate_json(), real_json)\n\n def test_generate_json_master(self):\n real_json = {\n 'Bill of lading number': 'SAMPLEID',\n 'Bill type': 'Master Bill',\n 'Voyage number': 'SAMPLEVOYAGE'\n }\n\n bill_of_lading = BillOfLading('SAMPLEID', '13', 'SAMPLEVOYAGE')\n self.assertEqual(bill_of_lading.generate_json(), real_json)\n\n def test_generate_json_house(self):\n real_json = {\n 'Bill of lading number': 'SAMPLEID',\n 'Bill type': 'House Bill',\n 'Voyage number': 'SAMPLEVOYAGE',\n 'Master bill of lading number': 'MASTERID'\n }\n\n bill_of_lading = BillOfLading('SAMPLEID', '14', 'SAMPLEVOYAGE')\n bill_of_lading.add_master_bill_of_lading_number('MASTERID')\n self.assertEqual(bill_of_lading.generate_json(), real_json)\n\n\n def test_force_user_to_add_master_id(self):\n bill_of_lading = BillOfLading('SAMPLEID', '14', 'SAMPLEVOYAGE')\n with self.assertRaises(ValueError):\n bill_of_lading.generate_json()\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 15.11111068725586,
"blob_id": "8de9ca7e7255b1e9b97ac05699cd2fc088f92c9d",
"content_id": "03db176690138df007b0bdffcea4b50970e72ada",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 9,
"path": "/shipments/shipment_component.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import abc\n\n\nclass ShipmentComponent(object):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def generate_json(self):\n pass"
},
{
"alpha_fraction": 0.6218130588531494,
"alphanum_fraction": 0.6310198307037354,
"avg_line_length": 35.20512771606445,
"blob_id": "f0a2ef5b53643b32dc50818cbef1aa2550b80f82",
"content_id": "b0cd1305c22f1798a91cdd680193024bd1253a51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1412,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 39,
"path": "/tests/test_carrier.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import os\nimport unittest\nimport unittest.mock\nfrom shipments.carrier import Carrier\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass test_carrier(unittest.TestCase):\n def test_carrier_should_be_object_of_components(self):\n self.assertIsInstance(Carrier('AAGJ'), ShipmentComponent)\n\n def test_carrier_should_read_sqlite_file(self):\n path = '/home/jacob/ninja/shipments/shipments/../sqlite/carrier_codes.sqlite'\n\n with unittest.mock.patch('sqlite3.connect') as mock_sqlite_read:\n Carrier('AAGJ')\n mock_sqlite_read.assert_called_with(path, isolation_level=None)\n\n def test_carrier_code_data(self):\n carrier = Carrier('AAGJ')\n self.assertEqual(carrier.get_scac_code(), 'AAGJ')\n self.assertEqual(carrier.get_name(), 'A & A LOGISTICS LLC')\n self.assertEqual(carrier.get_address(), '7600 NW 90TH')\n self.assertEqual(carrier.get_city(), 'JOHNSTON')\n self.assertEqual(carrier.get_state(), 'IA')\n self.assertEqual(carrier.get_country(), '')\n\n def test_json_return(self):\n real_json = {\n 'scac_code': 'AAGJ',\n 'name': 'A & A LOGISTICS LLC',\n 'address': '7600 NW 90TH',\n 'city': 'JOHNSTON',\n 'state': 'IA',\n 'country': ''\n }\n\n carrier = Carrier('AAGJ')\n self.assertEqual(carrier.generate_json(), real_json)\n"
},
{
"alpha_fraction": 0.8518518805503845,
"alphanum_fraction": 0.8518518805503845,
"avg_line_length": 26,
"blob_id": "287f9787edd02a764143609470056261eb467019",
"content_id": "c218310e723e24b5cfd8286b73a94777f985c4eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "# shipments\nOOP implementation of Shipment Components\n"
},
{
"alpha_fraction": 0.5332219004631042,
"alphanum_fraction": 0.57647305727005,
"avg_line_length": 34.7164192199707,
"blob_id": "f865b4db826e5ea4046ba2ced1526129cf37f0f8",
"content_id": "e7e9eb8e9dbbed76378aa74568b201b4e971442d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4786,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 134,
"path": "/tests/test_container.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom shipments.cargo import Cargo\nfrom shipments.container import Container\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass test_container(unittest.TestCase):\n\n def test_container_should_be_object_of_components(self):\n self.assertIsInstance(Container('CONTINERNUMBER'), ShipmentComponent)\n\n def test_container_data(self):\n container = Container('000001')\n\n self.assertEqual(container.set_length(100), container)\n self.assertEqual(container.set_height(100), container)\n self.assertEqual(container.set_width(100), container)\n self.assertEqual(container.set_load_status('Loaded'), container)\n self.assertEqual(container.set_type_of_service('BB'), container)\n self.assertEqual(container.get_length(), 100)\n self.assertEqual(container.get_height(), 100)\n self.assertEqual(container.get_width(), 100)\n self.assertEqual(container.get_load_status(), 'Loaded')\n self.assertEqual(container.get_type_of_service(), 'Break Bulk')\n\n def test_invalid_type_of_service(self):\n container = Container('000001')\n with self.assertRaises(ValueError):\n container.set_type_of_service('ZZ')\n\n def test_get_cargo_by_sequence_number(self):\n container = Container('000001')\n container.add_cargo('001', 'Fake Cargo Description', 10)\n\n self.assertIsInstance(container.get_cargo_by_sequence_number('001'), Cargo)\n\n def test_get_all_cargoes(self):\n container = Container('000001')\n container.add_cargo('001', 'Fake Cargo 1st Description', 10)\n container.add_cargo('002', 'Fake Cargo 2nd Description', 20)\n\n cargoes = container.get_all_cargoes()\n self.assertEqual(len(cargoes), 2)\n self.assertIsInstance(cargoes[0], Cargo)\n self.assertIsInstance(cargoes[1], Cargo)\n\n def test_cannot_add_existing_sequence_number(self):\n container = Container('000001')\n container.add_cargo('001', 'Fake Cargo 1st Description', 10)\n\n with self.assertRaises(ValueError):\n container.add_cargo('001', 'Fake Cargo 1st Description', 10)\n\n def test_json_data_single_container(self):\n real_json = {\n 'Container Number': '000001',\n 'Length': 100,\n 'Height': 100,\n 'Width': 100,\n 'Load status': 'Loaded',\n 'Type of service': 'Break Bulk'\n }\n\n container = Container('000001')\\\n .set_height(100)\\\n .set_length(100)\\\n .set_width(100)\\\n .set_load_status('Loaded')\\\n .set_type_of_service('BB')\n\n self.assertEqual(container.generate_json(), real_json)\n\n def test_json_data_cargo(self):\n real_json = {\n 'Container Number': '000001',\n 'Length': 100,\n 'Height': 100,\n 'Width': 100,\n 'Load status': 'Loaded',\n 'Type of service': 'Break Bulk',\n 'Cargoes': [\n {\n 'Product description': \"Fake Product Description\",\n 'Piece count': 10\n }\n ]\n }\n\n container = Container('000001')\n container.set_height(100)\n container.set_length(100)\n container.set_width(100)\n container.set_load_status('Loaded')\n container.set_type_of_service('BB')\n container.add_cargo('001', 'Fake Product Description', 10)\n\n self.assertEqual(container.generate_json(), real_json)\n\n def test_json_data_multiple_cargo(self):\n real_json = {\n 'Container Number': '000001',\n 'Length': 100,\n 'Height': 100,\n 'Width': 100,\n 'Load status': 'Loaded',\n 'Type of service': 'Break Bulk',\n 'Cargoes': [\n {\n 'Product description': \"Fake Product Description 1\",\n 'Piece count': 10\n },\n {\n 'Product description': \"Fake Product Description 2\",\n 'Piece count': 20\n },\n {\n 'Product description': \"Fake Product Description 3\",\n 'Piece count': 30\n }\n ]\n }\n\n container = Container('000001')\n container.set_height(100)\n container.set_length(100)\n container.set_width(100)\n container.set_load_status('Loaded')\n container.set_type_of_service('BB')\n container.add_cargo('001', 'Fake Product Description 1', 10)\n container.add_cargo('002', 'Fake Product Description 2', 20)\n container.add_cargo('003', 'Fake Product Description 3', 30)\n\n self.assertEqual(container.generate_json(), real_json)\n"
},
{
"alpha_fraction": 0.6448703408241272,
"alphanum_fraction": 0.6753100156784058,
"avg_line_length": 36,
"blob_id": "d107425d0f7e57df12b2d53991363ad3bdcb79f4",
"content_id": "7cf1cb7e14c1f973e2645a932f23f5e7384fa7e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 887,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 24,
"path": "/tests/test_cargo.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom shipments.cargo import Cargo\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass test_cargo(unittest.TestCase):\n def test_container_should_be_object_of_components(self):\n self.assertIsInstance(Cargo('001', 'Fake Cargo 1st Description', 10), ShipmentComponent)\n\n def test_cargo_data(self):\n cargo = Cargo('001', 'Fake Cargo 1st Description', 10)\n self.assertEqual(cargo.get_sequence_number(), '001')\n self.assertEqual(cargo.get_product_description(), 'Fake Cargo 1st Description')\n self.assertEqual(cargo.get_piece_count(), 10)\n\n def test_json_return(self):\n real_json = {\n 'Product description': 'Fake Cargo 1st Description',\n 'Piece count': 10\n }\n\n cargo = Cargo('001', 'Fake Cargo 1st Description', 10)\n self.assertEqual(cargo.generate_json(), real_json)"
},
{
"alpha_fraction": 0.61692875623703,
"alphanum_fraction": 0.6309802532196045,
"avg_line_length": 42.33333206176758,
"blob_id": "c666fefbd9ac1eff4ca2dccff93fd76e5557e99d",
"content_id": "5a7f968110694791f8f37b962674d927bf735663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2989,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 69,
"path": "/tests/test_manifest.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom shipments.carrier import Carrier\nfrom shipments.manifest import Manifest\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass test_manifest(unittest.TestCase):\n def test_manifest_should_be_object_of_components(self):\n self.assertIsInstance(Manifest(), ShipmentComponent)\n\n def test_manifest_data(self):\n manifest = Manifest()\n self.assertEqual(manifest.set_carrier_code('AAGJ'), manifest)\n self.assertEqual(manifest.set_vessel_name('SAMPLENAME'), manifest)\n self.assertEqual(manifest.set_port_of_unlading('Fake US Port'), manifest)\n self.assertEqual(manifest.set_foreign_port_of_lading('Fake Foreign Port'), manifest)\n self.assertEqual(manifest.set_manifest_quantity(100), manifest)\n self.assertEqual(manifest.set_manifest_quantity_unit('PCS'), manifest)\n self.assertEqual(manifest.set_weigh(100), manifest)\n self.assertEqual(manifest.set_weigh_unit('KG'), manifest)\n self.assertEqual(manifest.set_measurement(100), manifest)\n self.assertEqual(manifest.set_measurement_unit('V'), manifest)\n\n self.assertIsInstance(manifest.get_carrier(), Carrier)\n self.assertEqual(manifest.get_vessel_name(), 'SAMPLENAME')\n self.assertEqual(manifest.get_port_of_unlading(), 'Fake US Port')\n self.assertEqual(manifest.get_foreign_port_of_lading(), 'Fake Foreign Port')\n self.assertEqual(manifest.get_manifest_quantity(), 100)\n self.assertEqual(manifest.get_manifest_unit(), 'PCS')\n self.assertEqual(manifest.get_weight(), 100)\n self.assertEqual(manifest.get_weight_unit(), 'KG')\n self.assertEqual(manifest.get_measurement(), 100)\n self.assertEqual(manifest.get_measurement_unit(), 'V')\n\n def test_json_data(self):\n real_json = {\n 'Carrier': {\n 'scac_code': 'AAGJ',\n 'name': 'A & A LOGISTICS LLC',\n 'address': '7600 NW 90TH',\n 'city': 'JOHNSTON',\n 'state': 'IA',\n 'country': ''\n },\n 'Vessel name': 'SAMPLENAME',\n 'Port of unlading': 'Fake US Port',\n 'Foreign port of lading': 'Fake Foreign Port',\n 'Manifest Quantity': 100,\n 'Manifest Unit': 'PCS',\n 'Weight': 100,\n 'Weight Unit': 'KG',\n 'Measurement': 100,\n 'Measurement Unit': 'V'\n }\n\n manifest = Manifest()\n manifest.set_carrier_code('AAGJ')\n manifest.set_vessel_name('SAMPLENAME')\n manifest.set_port_of_unlading('Fake US Port')\n manifest.set_foreign_port_of_lading('Fake Foreign Port')\n manifest.set_manifest_quantity(100)\n manifest.set_manifest_quantity_unit('PCS')\n manifest.set_weigh(100)\n manifest.set_weigh_unit('KG')\n manifest.set_measurement(100)\n manifest.set_measurement_unit('V')\n\n self.assertEqual(manifest.generate_json(), real_json)"
},
{
"alpha_fraction": 0.668571412563324,
"alphanum_fraction": 0.668571412563324,
"avg_line_length": 28.16666603088379,
"blob_id": "c30ed24f9fcef3ac4b9d98dcf4b1136e81089c00",
"content_id": "369c58cee84553f3b8cef2a08fef26c4380cab01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 700,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 24,
"path": "/shipments/cargo.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from shipments.shipment_component import ShipmentComponent\n\n\nclass Cargo(ShipmentComponent):\n\n def __init__(self, sequence_number, product_description, piece_count):\n self.sequence_number = sequence_number\n self.product_description = product_description\n self.piece_count = piece_count\n\n def generate_json(self):\n return {\n 'Product description': self.get_product_description(),\n 'Piece count': self.get_piece_count()\n }\n\n def get_sequence_number(self):\n return self.sequence_number\n\n def get_product_description(self):\n return self.product_description\n\n def get_piece_count(self):\n return self.piece_count\n"
},
{
"alpha_fraction": 0.6721938848495483,
"alphanum_fraction": 0.6721938848495483,
"avg_line_length": 33.130435943603516,
"blob_id": "b558fc49a2bc09d90fdb97874b10d9f11a41f9e4",
"content_id": "d652cbfcb3033e6292a4329597e54da74e9145f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 784,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 23,
"path": "/tests/test_company.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom shipments.company import Company\nfrom shipments.shipment_component import ShipmentComponent\n\n\nclass test_company(unittest.TestCase):\n def test_container_should_be_object_of_components(self):\n self.assertIsInstance(Company('shipper Name', 'shipper Address'), ShipmentComponent)\n\n def test_company_data(self):\n company = Company('shipper Name', 'shipper Address')\n self.assertEqual(company.get_name(), 'shipper Name')\n self.assertEqual(company.get_address(), 'shipper Address')\n\n def test_json_data(self):\n real_json = {\n 'Name': 'shipper Name',\n 'Address': 'shipper Address'\n }\n\n cargo = Company('shipper Name', 'shipper Address')\n self.assertEqual(cargo.generate_json(), real_json)"
},
{
"alpha_fraction": 0.7407407164573669,
"alphanum_fraction": 0.7407407164573669,
"avg_line_length": 23.58333396911621,
"blob_id": "817099cfc7854162ba42088b30967b9af1b445c8",
"content_id": "06fc0eebc46e6e79844afffd683ed2c31bb799b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/shipments/__init__.py",
"repo_name": "jacobabello/shipments",
"src_encoding": "UTF-8",
"text": "from .shipment import Shipment\nfrom .billoflading import BillOfLading\nfrom .manifest import Manifest\n\n\ndef generate_shipment(billoflading, manifest):\n \"\"\"\n :type billoflading BillOfLading\n :type manifest Manifest\n :rtype: Shipment\n \"\"\"\n return Shipment(billoflading, manifest)\n\n\n"
}
] | 19 |
vchallier/Twitter-Politics | https://github.com/vchallier/Twitter-Politics | 001d4ad7b5157a668ea40d610f5047e41ecf1b8a | be525932680551e85135cc12b4148785be74fbd2 | 6fb3b1c2f499b6c784264dd460aa1edf49866c3a | refs/heads/master | 2021-05-02T16:04:18.942686 | 2018-02-07T21:22:05 | 2018-02-07T21:22:05 | 120,666,333 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7894737124443054,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 18,
"blob_id": "24d850e3ca6430e1b4c538a2d0f2587da076a5f9",
"content_id": "f330118bc145e75ebc80c55e0959eb897a41050c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 19,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 1,
"path": "/README.md",
"repo_name": "vchallier/Twitter-Politics",
"src_encoding": "UTF-8",
"text": "# Twitter-Politics\n"
},
{
"alpha_fraction": 0.6592614054679871,
"alphanum_fraction": 0.6647432446479797,
"avg_line_length": 36.65217208862305,
"blob_id": "fb08eb37c08c099f2335bf73cc9820265e8dd509",
"content_id": "f8da8ff1271d8d11bcc8fd775e094ee2a916fff8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3466,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 92,
"path": "/data_collection/tweet_import.py",
"repo_name": "vchallier/Twitter-Politics",
"src_encoding": "UTF-8",
"text": "import tweepy\nimport time\nimport csv\nimport json\nfrom config import ckey, csecret, atoken, asecret\n\nprint(ckey, csecret, atoken, asecret)\n \n# OAuth process, using the keys and tokens\nauth = tweepy.OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\n# Creation of the actual interface, using authentication\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\ndef get_collected():\n already_collected = []\n with open('already_collected.csv') as f:\n readCSV = csv.reader(f, delimiter=',')\n for row in readCSV:\n already_collected.append(row[0])\n return already_collected\n\ndef get_trump_supporters():\n trump_supporters = []\n with open('trump_supporters.csv') as f:\n readCSV = csv.reader(f, delimiter=',')\n for row in readCSV:\n trump_supporters.append(row[0])\n return trump_supporters\n\ndef get_tweets_list(api, owner, slug, outfile):\n members = []\n already_collected = get_collected()\n for page in tweepy.Cursor(api.list_members, owner, slug).items():\n members.append(page)\n # create a list containing all usernames\n members = [ m.screen_name for m in members[0:200] if m.screen_name not in already_collected]\n print(members)\n for screen_name in members[0:10]:\n account_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name).items(750)\n process_tweets(account_tweets, screen_name, outfile)\n return\n\ndef process_tweets(account_tweets, screen_name, outfile):\n tweets_list = []\n for status in account_tweets:\n new_tweet = dict()\n new_tweet['account'] = screen_name\n new_tweet['hashtags'] = [hashtag['text'] for hashtag in status._json['entities']['hashtags']]\n new_tweet['urls'] = [url['expanded_url'] for url in status._json['entities']['urls']]\n new_tweet['mentions'] = [user['screen_name'] for user in status._json['entities']['user_mentions']]\n print(new_tweet)\n tweets_list.append(new_tweet)\n with open(outfile, 'a') as fout:\n for tweet in tweets_list:\n fout.write(json.dumps(tweet))\n fout.write('\\n')\n with open('already_collected.csv', 'a') as fout:\n writer=csv.writer(fout)\n writer.writerow([screen_name])\n return\n\ndef retrieve_followers_from_user(user):\n ''' polls twitter for whom a user is following and returns their screen name and location '''\n screen_names = []\n already_collected = get_collected()\n i = 0\n for user in tweepy.Cursor(api.friends, screen_name=user).items():\n if user.screen_name not in already_collected:\n screen_names.append(user.screen_name)\n i += 1\n if i == 10:\n break \n return screen_names\n\ndef get_supporters_tweet(master_account, party):\n if party == 'Republican':\n outfile = 'tweets_republican_support.txt'\n else:\n outfile = 'tweets_democrat_support.txt'\n screen_names = retrieve_followers_from_user(master_account)\n for screen_name in screen_names:\n account_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name).items(750)\n process_tweets(account_tweets, screen_name, outfile)\n return\n \n\n#get_tweets_list(api, 'TheDemocrats', 'house-democrats', 'tweets_democrats_house.txt')\n#get_tweets_list(api, 'HouseGOP', 'house-republicans', 'tweets_republicans_house.txt')\nget_supporters_tweet('HouseDemocrats', 'Democrats')\nget_supporters_tweet('Trump_Support', 'Republicans')\n\n\n"
}
] | 2 |
d7miiZ/Stock-Exchange-Database | https://github.com/d7miiZ/Stock-Exchange-Database | bd6323fa15c820a4754631ec405086c7ac52e8f8 | 7d277bb20242a66db7631d99d8d4ae0f235afa9b | 07f33d4161e1fc02dcfcae20781b012ff23f5e2c | refs/heads/main | 2023-01-31T12:06:36.769452 | 2020-12-09T16:30:45 | 2020-12-09T16:30:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6164460778236389,
"alphanum_fraction": 0.6297929883003235,
"avg_line_length": 42.1268310546875,
"blob_id": "571ea75e2f59391048a334cfa5066f4965d86fa6",
"content_id": "f0b38b8e018f108cd77b158b1a843f4f2630b380",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8841,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 205,
"path": "/Contents/GUIPY/gui.py",
"repo_name": "d7miiZ/Stock-Exchange-Database",
"src_encoding": "UTF-8",
"text": "import tkinter as tk \nfrom tkinter import * \nimport mysql.connector \n\n\ncursor = None # This executes scripts\ndb = None \n \ndef logintodb(user, passw): \n global db\n db = mysql.connector.connect(host =\"localhost\", \n user = user, \n password = passw, \n db =\"Stock_Exchange\") \n global cursor \n cursor = db.cursor()\n\ndef runQuery(query, table):\n # A Table in the database \n try: \n cursor.execute(query) \n print(\"Query Excecuted successfully\") \n except: \n db.rollback() \n print(\"Error occured\")\n finally:\n # Update text\n cursor.execute(f\"select * from {tableToName[table]};\") \n myresult = cursor.fetchall() \n s = []\n for t in myresult:\n s.append(\" \".join([str(tt) for tt in t]))\n texts[table].set(\"\\n\".join(s))\n labels[table].pack()\n\ndef runQueryQ(query, i):\n try: \n print(\"Query Excecuted successfully\") \n cursor.execute(query) \n myresult = cursor.fetchall() \n s = []\n for t in myresult:\n s.append(\" \".join([str(tt) for tt in t]))\n textsQ[i-1].set(\"\\n\".join(s))\n labelsQ[i-1].pack()\n except: \n db.rollback() \n print(\"Error occured\")\n\ndef raiseTk(frame):\n frame.tkraise()\n\ndef getterToDB(qformat, getters, table):\n getters = tuple(g.get() for g in getters)\n qformat = qformat.format(*getters)\n runQuery(qformat, table)\n \n# DB Setup\nwith open(\"dblogin.txt\", \"r\") as file:\n name = file.readline()\n password = file.readline()\nlogintodb(name, password)\n\n# Setup\nroot = tk.Tk() \nw = 1600\nh = 900\nroot.geometry(f\"{w}x{h}\") \nroot.title(\"Stock Market Database\") \ncanvas = Canvas(root, bg =\"blue\", height = 250, width = 300) \nlblfrstrow = tk.Label(root, text =\"Stock Market Database\") \nlblfrstrow.place(x = w/2, y = h/10) \n\n\ntables = [\"mark\", \"inv\", \"bank\" , \"stock\" , \"company\" , \"ceo\" , \"secures\" , \"owns\"]\nnames = [\"Stock_Market\", \"Investor\", \"Bank\" , \"Stock\" , \"Company\" , \"CEO\" , \"Secures\" , \"Owns\"]\ntableToName = {t:n for t, n in zip(tables, names)}\ntableToIndex= {t:i for t, i in zip(tables, range(10))}\nattributeList = [[\"Market_Name\", \"Currency\", \"Market_Cap_in_trillion\", \"Number_Of_Stock_Symbols\"],\n [\"IBAN_number\", \"National_ID\", \"Sex\", \"NetWorth\", \"First_Name\", \"Middle_Name\", \"Last_Name\"],\n [\"Commercial_Registration_Number\",\"Name\",\"Number_Of_Branches\"],\n [\"Stock_Symbol\" , \"Number_Of_Public_Stocks_in_thousand\" , \"Stock_Price\" , \"Market_Name\" , \"Commercial_Registration_Number\"],\n [\"Commercial_Registration_Number\" , \"Number_Of_Directors\" , \"Company_Equity_in_thousand\" , \"Company_Name\"],\n [\"Name\" , \"Sex\" , \"Age\" , \"Commercial_Registration_Number\"],\n [\"Stock_Symbol\" , \"IBAN_number\" , \"Commercial_Registration_Number\"],\n [\"Stock_Symbol\" , \"IBAN_number\" , \"Number_Of_Shares\"]]\n\nprimary = [[\"Market_Name\"],\n [\"IBAN_number\"],\n [\"Commercial_Registration_Number\"],\n [\"Stock_Symbol\"],\n [\"Commercial_Registration_Number\" ],\n [\"Name\", \"Commercial_Registration_Number\"],\n [\"Stock_Symbol\" , \"IBAN_number\" , \"Commercial_Registration_Number\"],\n [\"Stock_Symbol\" , \"IBAN_number\"]]\n\ncommands = {}\ntexts = {}\nlabels = {}\nfor table in tables:\n commands[table] = {}\n texts[table] = StringVar()\n#Add Comaands\ncommands[\"mark\"][\"insert\"] = \"\"\"insert into Stock_Market(Market_Name,Currency,Market_Cap_in_trillion,Number_Of_Stock_Symbols) values (\"{}\", \"{}\", {}, {});\"\"\"\ncommands[\"inv\"][\"insert\"] = \"\"\"insert into Investor(IBAN_number, National_ID, Sex, NetWorth, First_Name, Middle_Name, Last_Name) values ('{}','{}','{}',{},'{}','{}','{}');\"\"\"\ncommands[\"bank\"][\"insert\"] = \"\"\"insert into Bank(Commercial_Registration_Number, Name, Number_Of_Branches) values ('{}','{}', {});\"\"\"\ncommands[\"stock\"][\"insert\"] = \"\"\"insert into Stock(Stock_Symbol, Number_Of_Public_Stocks_in_thousand, Stock_Price, Market_Name, Commercial_Registration_Number) values ('{}','{}', {});\"\"\"\ncommands[\"company\"][\"insert\"] = \"\"\"insert into Company(Commercial_Registration_Number,Number_Of_Directors,Company_Equity_in_thousand,Company_Name) values ('{}','{}', {});\"\"\"\ncommands[\"ceo\"][\"insert\"] = \"\"\"insert into CEO(Name,Sex,Age,Commercial_Registration_Number) values ('{}','{}', {});\"\"\"\ncommands[\"secures\"][\"insert\"] = \"\"\"insert into Secures(Stock_Symbol,IBAN_number,Commercial_Registration_Number) values ('{}','{}', {});\"\"\"\ncommands[\"owns\"][\"insert\"] = \"\"\"insert into Owns(Stock_Symbol,IBAN_number,Number_Of_Shares) values ('{}','{}', {});\"\"\"\n#Delete commands\ncommands[\"mark\"][\"delete\"] = \"\"\"delete from Stock_Market where Market_Name='{}';\"\"\"\ncommands[\"inv\"][\"delete\"] = \"\"\"delete from Investor where IBAN_number='{}';\"\"\"\ncommands[\"bank\"][\"delete\"] = \"\"\"delete from Bank where Commercial_Registration_Number='{}';\"\"\"\ncommands[\"stock\"][\"delete\"] = \"\"\"delete from Stock where Stock_Symbol='{}';\"\"\"\ncommands[\"company\"][\"delete\"] = \"\"\"delete from Company where Commercial_Registration_Number='{}';\"\"\"\ncommands[\"ceo\"][\"delete\"] = \"\"\"delete from CEO where Name='{}' AND Commercial_Registration_Number='{}';\"\"\"\ncommands[\"secures\"][\"delete\"] = \"\"\"delete from Secures where Stock_Symbol='{}' AND IBAN_number='{}' AND Commercial_Registration_Number='{}';\"\"\"\ncommands[\"owns\"][\"delete\"] = \"\"\"delete from Owns where Stock_Symbol='{}' AND IBAN_number='{}';\"\"\"\n\n\n# Setup Add and Delete\nframes = {}\nfields = {}\nfieldsDelete = {}\nfor attributes, name, table in zip(attributeList, names, tables):\n frames[table] = (LabelFrame(root, text=name, padx=25, pady=25, bg=\"white\"))\n frames[table].place(x=w/10, y=h/7, width=w/1.2, height=h/1.3)\n\n # Adding\n fields[table] = {}\n for name in attributes:\n fields[table][name] = Entry(frames[table], width=100)\n fields[table][name].insert(0, name)\n fields[table][name].pack()\n\n add = lambda q=commands[table][\"insert\"], vals=fields[table].values(), t=table: getterToDB(q, vals, t)\n Button(frames[table], text=\"Add\", command=add).pack()\n\n # Deleting \n fieldsDelete[table] = {}\n for name in attributes:\n if name in primary[tableToIndex[table]]:\n fieldsDelete[table][name] = Entry(frames[table], width=100)\n fieldsDelete[table][name].insert(0, name)\n fieldsDelete[table][name].pack()\n\n delete = lambda q=commands[table][\"delete\"], vals=fieldsDelete[table].values(), t=table: getterToDB(q, vals, t)\n Button(frames[table], text=\"Delete\", command=delete).pack()\n\n # Text\n labels[table] = Label(frames[table], textvariable=texts[table])\n\ncoms = [\n \"select * from Investor where NetWorth > 420000000;\",\n \"select * from Stock where Number_Of_Public_Stocks_in_thousand < 2000;\",\n 'select Last_Name from Investor where IBAN_number = \"NL12RAB0813971873\";',\n \"\"\"select Investor.* from Secures natural join Investor where Secures.Commercial_Registration_Number=1110001116 group by Investor.IBAN_number;\"\"\",\n 'select Investor.* from Owns natural join Investor where Owns.Stock_Symbol=\"4001\";',\n 'select Company.* from Company natural join Stock where Stock.Stock_Price > 103.5;',\n 'select Stock.* from Stock natural join Company where Number_Of_Directors >= 3;,',\n 'select Bank.* from Owns natural join Secures natural join Bank where Stock_Symbol=\"4161\";',\n 'select CEO.* from CEO natural join Company natural join Stock where Stock_Price < 200;',\n 'select CEO.* from CEO natural join Company natural join Stock natural join Investor where NetWorth > 50000 and CEO.Sex=\"M\" group by Name;',\n]\ndef defaultQueries(num):\n runQueryQ(coms[num-1], num)\n \n# Queries 10\nframesQ = {}\nbuttonsQ = {}\ntextsQ = []\nlabelsQ = []\nfor i in range(10):\n framesQ[i] = (LabelFrame(root, text=f\"Query {i+1}\", padx=25, pady=25, bg=\"white\"))\n framesQ[i].place(x=w/10, y=h/7, width=w/1.2, height=h/1.3)\n Button(framesQ[i], text=\"Show\", command=lambda i=i:defaultQueries(i+1)).pack()\n textsQ.append(StringVar())\n labelsQ.append(Label(framesQ[i], textvariable=textsQ[i]))\n\n s = StringVar()\n s.set(coms[i])\n Label(framesQ[i], textvariable=s).pack()\n\n\n\nmenubar = Menu(root)\nfilemenu = Menu(menubar, tearoff=0)\nqueries = Menu(menubar, tearoff=0)\n\nfor name, table in zip(names, tables):\n filemenu.add_command(label=name, command=lambda table=table: frames[table].tkraise())\n\nfor i in range(10):\n queries.add_command(label=f\"Query {i+1}\", command=lambda i=i: framesQ[i].tkraise()) \n\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Exit\", command=root.quit)\nmenubar.add_cascade(label=\"Tables\", menu=filemenu)\nmenubar.add_cascade(label=\"Queries\", menu=queries)\nroot.config(menu=menubar)\n\nframes[\"mark\"].tkraise()\nroot.mainloop() "
},
{
"alpha_fraction": 0.6756756901741028,
"alphanum_fraction": 0.6756756901741028,
"avg_line_length": 11.333333015441895,
"blob_id": "be1eeb9bf3d8455e79a9b42db252cab601fad12f",
"content_id": "d170ddd78d8d27a3760ecfbcf3a30fd3b9466f27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 6,
"path": "/Contents/GUIPY/README.md",
"repo_name": "d7miiZ/Stock-Exchange-Database",
"src_encoding": "UTF-8",
"text": "Add dbLogin.txt\nwith the formatting:\n<br> \n< USERNAME >\n<br>\n< PASSWORD >\n"
},
{
"alpha_fraction": 0.6320754885673523,
"alphanum_fraction": 0.7264150977134705,
"avg_line_length": 34,
"blob_id": "4e566d7014fa30b5eb3a272e5cd8323c30f1f3d9",
"content_id": "c2d82a2a4425e2eac0301f1cb329a47464ccc47b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 3,
"path": "/README.md",
"repo_name": "d7miiZ/Stock-Exchange-Database",
"src_encoding": "UTF-8",
"text": "# Stock Exchange Database\n### This is a project for the course CSC380 Databse design\n### Date: 9/12/2020 \n"
},
{
"alpha_fraction": 0.4533785581588745,
"alphanum_fraction": 0.7063353657722473,
"avg_line_length": 36.484535217285156,
"blob_id": "ab96b72ed96440a3fc7e6d2e9a9b2811d0f0b1ef",
"content_id": "ce9beaa50869975afba9dcfdf6b41c1bc4bbb206",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 10907,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 291,
"path": "/Contents/Stock.sql",
"repo_name": "d7miiZ/Stock-Exchange-Database",
"src_encoding": "UTF-8",
"text": "drop database Stock_Exchange;\ncreate database Stock_Exchange;\n\n\tuse Stock_Exchange;\n\n\tcreate table Stock_Market(\n\t\tMarket_Name varchar(100),\n\t\tCurrency char(3) not null,\n\t\tMarket_Cap_in_trillion decimal(3,2),\n\t\tNumber_Of_Stock_Symbols int,\n\n\t\tconstraint Market_Name_PK primary key(Market_Name), \n\t\tconstraint Currency_Check check(Currency like \"___\")\n\t\t);\n\n\tcreate table Company(\n\t\tCommercial_Registration_Number varchar(100),\n\t\tNumber_Of_Directors int,\n\t\tCompany_Equity_in_thousand int,\n\t\tCompany_Name varchar(50) not null,\n\n\t\tconstraint Commercial_Registration_Number_PK primary key(Commercial_Registration_Number),\n\t\tconstraint Number_Of_Directors_Check check(Number_Of_Directors >= 1)\n\t\t);\n\n\tcreate table Investor(\n\t\tIBAN_number varchar(100),\n\t\tNational_ID varchar(50) not null,\n\t\tSex char(1) not null,\n\t\tNetWorth int,\n\t\tFirst_Name varchar(20) not null,\n\t\tMiddle_Name varchar(20) not null,\n\t\tLast_Name varchar(20) not null,\n\n\t\tconstraint IBAN_number_PK primary key(IBAN_number)\n\t\t);\n\n\tcreate table Bank(\n\t\tCommercial_Registration_Number varchar(100),\n\t\tName varchar(50) not null,\n\t\tNumber_Of_Branches int,\n\n\t\tconstraint Commercial_Registration_Number_PK primary key(Commercial_Registration_Number),\n\t\tconstraint Number_Of_Branches_Check check(Number_Of_Branches >= 1)\n\t\t);\n\n\tcreate table Stock(\n\t\tStock_Symbol varchar(10),\n\t\tNumber_Of_Public_Stocks_in_thousand int,\n\t\tStock_Price decimal(8,2),\n\t\tMarket_Name varchar(100),\n\t\tCommercial_Registration_Number varchar(100),\n\n\t\tconstraint Stock_Symbol_PK primary key(Stock_Symbol),\n\t\tconstraint Market_Name_FK foreign key(Market_Name) references Stock_Market(Market_Name) on delete cascade on update cascade,\n\t\tconstraint Commercial_Registration_Number_FK1 foreign key(Commercial_Registration_Number) references Company(Commercial_Registration_Number) on delete cascade on update cascade\n\t\t);\n\n\tcreate table CEO(\n\t\tName varchar(100),\n\t\tSex char(1) not null,\n\t\tAge int not null,\n\t\tCommercial_Registration_Number varchar(100),\n\n\t\tconstraint Compound_PK primary key(Name , Commercial_Registration_Number),\n\t\tconstraint Commercial_Registration_Number_FK2 foreign key(Commercial_Registration_Number) references Company(Commercial_Registration_Number) on update cascade on delete cascade,\n\t\tconstraint Age_Check check(Age >= 23)\n\t\t);\n\n\tcreate table Secures(\n\t\tStock_Symbol varchar(10),\n\t\tIBAN_number varchar(100),\n\t\tCommercial_Registration_Number varchar(100),\n\n\t\tconstraint Compound_PK primary key(Stock_Symbol , IBAN_number , Commercial_Registration_Number),\n\t\tconstraint Stock_Symbol_FK foreign key(Stock_Symbol) references Stock(Stock_Symbol) on update cascade on delete cascade,\n\t\tconstraint IBAN_number_FK foreign key(IBAN_number) references Investor(IBAN_number) on update cascade on delete cascade,\n\t\tconstraint Commercial_Registration_Number_FK3 foreign key(Commercial_Registration_Number) references Bank(Commercial_Registration_Number) on update cascade on delete cascade\n\t\t);\n\n\tcreate table Owns(\n\t\tStock_Symbol varchar(10),\n\t\tIBAN_number varchar(100),\n\t\tNumber_Of_Shares int,\n\n\t\tconstraint Compound_PK primary key(Stock_Symbol , IBAN_number),\n\t\tconstraint Stock_Symbol_FK2 foreign key(Stock_Symbol) references Stock(Stock_Symbol) on update cascade on delete cascade,\n\t\tconstraint IBAN_number_FK2 foreign key(IBAN_number) references Investor(IBAN_number) on update cascade on delete cascade\n\t\t);\n\n\tinsert into Stock_Market(\n\t\tMarket_Name,\n\t\tCurrency,\n\t\tMarket_Cap_in_trillion,\n\t\tNumber_Of_Stock_Symbols\n\t\t) values\n\t\t(\"NASDAQ\", \"USD\", 1.72, 8100),\n\t\t(\"Tadawul\", \"SAR\", 2.19, 202),\n\t\t(\"Tokyo Stock Exchange\", \"JPY\", 5.67, 2292),\n\t\t(\"Euronext\", \"EUR\", 4.52, 1462),\n\t\t(\"Shanghai Stock Exchange\", \"CNY\", 5.01, 1041);\n\n\tinsert into Investor(\n\t\tIBAN_number,\n\t\tNational_ID,\n\t\tSex,\n\t\tNetWorth,\n\t\tFirst_Name,\n\t\tMiddle_Name,\n\t\tLast_Name\n\t\t) values\n\t\t('US281264541519999339','9498117064','M',3590070,'Brent','Aahil','Chamberlain'),\n\t\t('DE16500105174529546245','2126117000','M',99801000,'Andreas ','Sheikh ','Perez'),\n\t\t('US341922553675553244','8277836920','M',191000050,'Macauley','Lugo','Nguyen'),\n\t\t('JP821756900040913954463245','8409156649','M',962214000,'Masahiro ','Michiko ','Sakurai'),\n\t\t('JP601009600050571982645704','4142356652','M',1009860022,'Hideki','Matsumoto','Kamiya'),\n\t\t('CH4250514369317289437157','7734693388','M',5156,'Li','Dong','Brody'),\n\t\t('CH7550515162589388976886','4992719807','M',39059,'Bo','Zhou','Galindo'),\n\t\t('SA2125267688145539388743','1304026480','M',358630,'Abdo','Saad','Husain'),\n\t\t('SA4038369116113433969462','1011353190','M',459174,'Abdullah','Mohammed','Hamed'),\n\t\t('NL12RAB0813971873','2300599347','M',173115,'Ralphie','O Brien','Tyson');\n\n\n\tinsert into Bank(\n\t\tCommercial_Registration_Number,\n\t\tName,\n\t\tNumber_Of_Branches\n\t\t) values\n\t\t('1110001116','New York Community Bank',255),\n\t\t('1010001054','Riyadh Bank',341),\n\t\t('5016604726','MUFG Bank',398),\n\t\t('2311911035','HSBC',4000),\n\t\t('1516125270','Bank of China',23682);\n\t\t\n\tinsert into Company(\n\t\tCommercial_Registration_Number,\n\t\tNumber_Of_Directors,\n\t\tCompany_Equity_in_thousand,\n\t\tCompany_Name\n\t\t) values\n\t\t('4457756495',22,62060000,'Amazon'),\n\t\t('5315073274',11,39800000,'Saudi Aramco'),\n\t\t('6229087803',2,20480000,'Toyota Motor'),\n\t\t('9598342161',20,13100000,'Volkswagen'),\n\t\t('6713620014',11,43000000,'Alibaba Group'),\n\t\t('8643657928',25,25003600,'Google'),\n\t\t('8857232768',13,70000000,'SABIC'),\n\t\t('1792169223',17,30310000,'Nokia'),\n\t\t('5098220895',2,43000000,'Sinopec'),\n\t\t('4001894494',2,89048000,'Softbank');\n\n\n\tinsert into CEO(\n\t\tName,\n\t\tSex,\n\t\tAge,\n\t\tCommercial_Registration_Number\n\t\t) values\n\t\t('Jeff Bezos','M',56,'4457756495'),\n\t\t('Amin H. Nasser','M',60,'5315073274'),\n\t\t('Akio Toyoda','M',64,'6229087803'),\n\t\t('Herbert Diess','M',62,'9598342161'),\n\t\t('Daniel Zhang','M',48,'6713620014'),\n\t\t('Sundar Pichai','M',48,'8643657928'),\n\t\t('Yousef Al-Benyan','M',57,'8857232768'),\n\t\t('Pekka Lundmark','M',56,'1792169223'),\n\t\t('Fu Chengyu','M',69,'5098220895'),\n\t\t('Masayoshi Son','M',63,'4001894494');\n\n\n\tinsert into Stock(\n\t\tStock_Symbol,\n\t\tNumber_Of_Public_Stocks_in_thousand,\n\t\tStock_Price,\n\t\tMarket_Name,\n\t\tCommercial_Registration_Number\n\t\t) values\n\t\t('AMZN',500890,3185,'NASDAQ','4457756495'),\n\t\t('2222',940699,35.9,'Tadawul','5315073274'),\n\t\t('4161',297472,7301,'Tokyo Stock Exchange','6229087803'),\n\t\t('VOW3',295089,149.08,'Euronext','9598342161'),\n\t\t('HKG',274500,269,'Shanghai Stock Exchange','6713620014'),\n\t\t('GOOGL',289886,1787,'NASDAQ','8643657928'),\n\t\t('2010',660000,96,'Tadawul','8857232768'),\n\t\t('4001',215350,7250,'Tokyo Stock Exchange','4001894494'),\n\t\t('NOKIA',988089,3.42,'Euronext','1792169223'),\n\t\t('SPC',1501,301,'Shanghai Stock Exchange','5098220895');\n\n\tinsert into Owns(\n\t\tStock_Symbol,\n\t\tIBAN_number,\n\t\tNumber_Of_Shares\n\t\t) values\n\t\t(\"AMZN\", \"US281264541519999339\", 420),\n\t\t(\"AMZN\", \"US341922553675553244\", 321),\n\t\t(\"AMZN\", \"NL12RAB0813971873\", 40),\n\t\t(\"2222\", \"SA2125267688145539388743\", 1),\n\t\t(\"2222\", \"DE16500105174529546245\", 91),\n\t\t(\"2222\", \"CH7550515162589388976886\", 48),\n\t\t(\"4161\", \"SA2125267688145539388743\", 2),\n\t\t(\"4161\", \"CH4250514369317289437157\", 1323),\n\t\t(\"4161\", \"JP821756900040913954463245\", 99),\n\t\t(\"VOW3\", \"CH7550515162589388976886\", 123),\n\t\t(\"VOW3\", \"US341922553675553244\", 2311),\n\t\t(\"VOW3\", \"DE16500105174529546245\", 1053),\n\t\t(\"HKG\", \"JP601009600050571982645704\", 9),\n\t\t(\"HKG\", \"SA4038369116113433969462\", 32),\n\t\t(\"HKG\", \"JP821756900040913954463245\", 1942),\n\t\t(\"GOOGL\", \"US341922553675553244\", 26),\n\t\t(\"GOOGL\", \"NL12RAB0813971873\", 861),\n\t\t(\"GOOGL\", \"SA4038369116113433969462\", 34),\n\t\t(\"2010\", \"CH4250514369317289437157\", 39),\n\t\t(\"2010\", \"CH7550515162589388976886\", 504),\n\t\t(\"4001\", \"JP821756900040913954463245\", 73),\n\t\t(\"4001\", \"NL12RAB0813971873\", 163),\n\t\t(\"4001\", \"US341922553675553244\", 16),\n\t\t(\"NOKIA\", \"JP601009600050571982645704\", 19),\n\t\t(\"NOKIA\", \"SA2125267688145539388743\", 3),\n\t\t(\"NOKIA\", \"DE16500105174529546245\", 51),\n\t\t(\"SPC\", \"CH7550515162589388976886\", 342),\n\t\t(\"SPC\", \"DE16500105174529546245\", 4),\n\t\t(\"SPC\", \"SA2125267688145539388743\", 999);\n\n\n\tinsert into Secures(\n\t\tStock_Symbol,\n\t\tIBAN_number,\n\t\tCommercial_Registration_Number\n\t\t) values\n\t\t(\"AMZN\", \"US281264541519999339\", \"1110001116\"),\n\t\t(\"AMZN\", \"US341922553675553244\", \"1110001116\"),\n\t\t(\"AMZN\", \"NL12RAB0813971873\", \"2311911035\"),\n\t\t(\"2222\", \"SA2125267688145539388743\", \"1010001054\"),\n\t\t(\"2222\", \"DE16500105174529546245\", \"2311911035\"),\n\t\t(\"2222\", \"CH7550515162589388976886\", \"1516125270\"),\n\t\t(\"4161\", \"SA2125267688145539388743\", \"1010001054\"),\n\t\t(\"4161\", \"CH4250514369317289437157\", \"1516125270\"),\n\t\t(\"4161\", \"JP821756900040913954463245\", \"5016604726\"),\n\t\t(\"VOW3\", \"CH7550515162589388976886\", \"1516125270\"),\n\t\t(\"VOW3\", \"US341922553675553244\", \"1110001116\"),\n\t\t(\"VOW3\", \"DE16500105174529546245\", \"2311911035\"),\n\t\t(\"HKG\", \"JP601009600050571982645704\", \"5016604726\"),\n\t\t(\"HKG\", \"SA4038369116113433969462\", \"1010001054\"),\n\t\t(\"HKG\", \"JP821756900040913954463245\", \"5016604726\"),\n\t\t(\"GOOGL\", \"US341922553675553244\", \"1110001116\"),\n\t\t(\"GOOGL\", \"NL12RAB0813971873\", \"1010001054\"),\n\t\t(\"GOOGL\", \"SA4038369116113433969462\", \"2311911035\"),\n\t\t(\"2010\", \"CH4250514369317289437157\", \"5016604726\"),\n\t\t(\"2010\", \"CH7550515162589388976886\", \"1516125270\"),\n\t\t(\"4001\", \"JP821756900040913954463245\", \"1010001054\"),\n\t\t(\"4001\", \"NL12RAB0813971873\", \"2311911035\"),\n\t\t(\"4001\", \"US341922553675553244\", \"5016604726\"),\n\t\t(\"NOKIA\", \"JP601009600050571982645704\", \"2311911035\"),\n\t\t(\"NOKIA\", \"SA2125267688145539388743\", \"1010001054\"),\n\t\t(\"NOKIA\", \"DE16500105174529546245\", \"2311911035\"),\n\t\t(\"SPC\", \"CH7550515162589388976886\", \"1516125270\"),\n\t\t(\"SPC\", \"DE16500105174529546245\", \"1010001054\"),\n\t\t(\"SPC\", \"SA2125267688145539388743\", \"1010001054\");\n\n\t-- describe Stock_Market;\n\t-- describe Stock;\n\t-- describe Company;\n\t-- describe CEO;\n\t-- describe Investor;\n\t-- describe Bank;\n\t-- describe Secures;\n\t-- describe Owns;\n\n\t-- select * from Stock_Market;\n\t-- select * from Stock;\n\t-- select * from Company;\n\t-- select * from CEO;\n\t-- select * from Investor;\n\t-- select * from Bank;\n\t-- select * from Owns;\n\t-- select * from Secures;\n\n\t-- Queries\n\t-- select * from Investor where NetWorth > 420000000;\n\t-- select * from Stock where Number_Of_Public_Stocks_in_thousand < 2000; \n\t-- select Last_Name from Investor where IBAN_number = \"NL12RAB0813971873\";\n\t/* select Investor.* from Secures natural join Investor \n\t where Secures.Commercial_Registration_Number=1110001116 group by Investor.IBAN_number;*/ \n\t-- select Investor.* from Owns natural join Investor where Owns.Stock_Symbol=\"4001\";\n\t-- select Company.* from Company natural join Stock where Stock.Stock_Price > 103.5;\n\t-- select Stock.* from Stock natural join Company where Number_Of_Directors >= 3; \n\t-- select Bank.* from Owns natural join Secures natural join Bank where Stock_Symbol=\"4161\";\n\t-- select CEO.* from CEO natural join Company natural join Stock where Stock_Price < 200;\n\t/* select CEO.* from CEO natural join Company \n\t natural join Stock natural join Investor\n\t where NetWorth > 50000 and CEO.Sex=\"M\" group by Name;*/ -- FULL SET"
}
] | 4 |
marisamathura/cs3357networks | https://github.com/marisamathura/cs3357networks | a9e104df96e632ce51b5114fbdb67485573b5a4c | 2f65cb5285ff2740e99f46764e53a6e41f987ac1 | 24bc7f8e0c2abdae2c0d3f7c0119df958a7af833 | refs/heads/master | 2020-04-07T14:57:55.431795 | 2017-02-15T22:16:13 | 2017-02-15T22:16:13 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7061567306518555,
"alphanum_fraction": 0.7266790866851807,
"avg_line_length": 31.484848022460938,
"blob_id": "f2c56aac2398a96b82256f33166c64ccb4fe63e3",
"content_id": "278011d715bcdaa6ef880d67e2f74a62f880fcfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1078,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 33,
"path": "/Python_Server.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "import socket\nimport datetime\n\nTCP_IP = '192.168.131.129'\nTCP_PORT = 5005\n\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserverSocket.bind((TCP_IP, TCP_PORT))\nserverSocket.listen(1)\nprint('Server Address: ', TCP_IP)\nprint('Server ready to receive')\nwhile 1:\n\t# Receive connection\n\tconn, addr = serverSocket.accept()\n\tprint('Client Address: ', addr)\n\tprint('Connection to Client is Established')\n\t# Process Client Request here\n\tbyte_sentence = conn.recv(1024)\n\t# User message is decoded\n\tsentence = byte_sentence.decode()\n\treply_string = sentence\n\tif sentence == 'What is the current date and time?':\n\t\tnow = datetime.datetime.now()\n\t\t#“Current Date and Time – MM/DD/YYYY hh:mm:ss”\n\t\treply_string = now.strftime(\"Current Date and Time: %m/%d/%Y %I:%M:%S\")\n\t\tprint('Successfully sent time to client')\n\telse:\n\t\treply_string = \"Invalid request: I have chortles!\"\n\t\tprint('The client requested nothing but chortles')\n\t# ensure that response is encoded in ascii format when sent\n\treply_byte = reply_string.encode('ascii')\n\tconn.send(reply_byte)\n\tconn.close()\n"
},
{
"alpha_fraction": 0.59176105260849,
"alphanum_fraction": 0.6074150204658508,
"avg_line_length": 31.804054260253906,
"blob_id": "4430ecbf8454e6e4398a9f9cc89df66b0c807beb",
"content_id": "bb605cf6fb3c0c86544ff04dc6a32734fa59221a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4855,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 148,
"path": "/UDP_Client.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "#UDP_Client.py\n#David Cosman\n#\n#A simple UDP Client implementing rdt2.2\n#Coded in Python 3.5\n\nimport binascii\nimport socket\nimport struct\nimport sys\nimport hashlib\n\ndef main():\n UDP_IP = \"127.0.0.1\"\n UDP_PORT = 5005\n MY_UDP_IP = \"192.168.131.130\"\n unpacker = struct.Struct('I I 32s')\n \n print(\"UDP target IP:\", UDP_IP)\n print(\"UDP target port:\", UDP_PORT)\n\n #byte data to be sent in packets\n data = [b'NCC-1701', b'NCC-1664', b'NCC-1017']\n #Create client socket\n clientSock = CreateSocket(MY_UDP_IP, UDP_PORT);\n #Set seqNum to 0 as default.\n seq = 0\n for i in range(len(data)):\n print(\"\\n+++Sending packet to server with values:+++\")\n #ACK = 1, 0 is NAK\n ack = 0;\n #Build the UDP Packet\n UDP_Packet = BuildPacket(ack, seq, data[i]);\n #Send the UDP Packet\n SendPacket (clientSock, UDP_Packet, UDP_IP, UDP_PORT);\n print(\"Packet successfully sent\") #---yay---\n while ack == 0:\n saved_Packet = UDP_Packet\n #Handle timeout --to test if it works--\n while True:\n try:\n #Receive reply\n UDP_Packet = RcvData(clientSock, unpacker);\n break\n except socket.timeout:\n print(\"Timeout reached, resending packet\")\n SendPacket(clientSock, saved_Packet, UDP_IP, UDP_PORT);\n #Check Server reply on ack --REWRITE & CLEAN UP--\n if UDP_Packet[0] != 0:\n print(\"Server replied: Packet ACK\")\n else:\n print(\"Server replied: Packet NAK\")\n #Create the Checksum for comparison\n chksum = CreateChksum(UDP_Packet[0],UDP_Packet[1]);\n \t #Compare Checksums to test for corrupt replies\n ack = Checksum(UDP_Packet, chksum);\n #Is the sequence order correct?\n seqCorrect = Checkseq(UDP_Packet, seq);\n if ack == 0 or seqCorrect == False:\n print(\"--Resending packet--\")\n #send again if corrupt or no seq match.\n UDP_Packet = BuildPacket(ack, seq, data[i]);\n SendPacket (clientSock, UDP_Packet, UDP_IP, UDP_PORT);\n #Update the seq number in client side\n if seq == 0:\n seq = 1\n else:\n seq = 0\n\n clientSock.close(); #terminate client connection\n\n#Method to recieve and unpack data from packets\n#Used for ACK packets from server\n#Returns packet in tuple form\ndef RcvData(sock, unpacker):\n data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes\n UDP_Packet = unpacker.unpack(data) # extract data\n print(\"--Reply Received!--\")\n print(\"received from:\", addr)\n print(\"received reply:\", UDP_Packet)\n return UDP_Packet; \n\n#Creates a Socket that can send and recieve packets\n#Using bind because we need to recieve acks from server\ndef CreateSocket(IP, PORT):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(0.9)\n sock.bind((IP, PORT))\n return sock;\n\n#Sends the specified packet to given IP and PORT\ndef SendPacket (sock, packet, IP, PORT):\n sock.sendto(packet, (IP, PORT))\n return;\n\n#Builds the packet with specified values\n#Returns the built packet in tuple form\ndef BuildPacket (ack, seq, data):\n print(\"ACK: \", ack)\n print(\"Seq num: \", seq)\n print(\"data: \",data)\n #Create Checksum\n chksum = CreateChksum(ack, seq, data);\n values = (ack,seq,data,chksum)\n #Structure is as follows; AckNum, SeqNum, data, Chksum\n UDP_Packet_Data = struct.Struct('I I 8s 32s')\n UDP_Packet = UDP_Packet_Data.pack(*values)\n return UDP_Packet;\n\n#Creates the checksum value for the packet to send\n#Returns the checksum value\ndef CreateChksum(ack, seq, data = None):\n if (data != None):\n values = (ack,seq,data)\n UDP_Data = struct.Struct('I I 8s')\n else:\n values = (ack,seq)\n UDP_Data = struct.Struct('I I')\n packed_data = UDP_Data.pack(*values)\n chksum = bytes(hashlib.md5(packed_data).hexdigest(), encoding=\"UTF-8\")\n print(\"Chksum: \",chksum)\n return chksum;\n\n#Compares computed chksum value with original packet's\n#Returns ACK = 1, NAK = 0\n#\n#not to be confused with CreateChksum\ndef Checksum(UDP_Packet, chksum):\n ack = 0\n if UDP_Packet[2] == chksum:\n print ('Checksums Match; recieved packet correct')\n ack = 1\n else:\n print ('Checksums do not Match; recieved packet corrupt')\n return ack;\n\n#checks the sequence number of the current packet.\n#returns True if matching, False otherwise.\ndef Checkseq(UDP_Packet, seq):\n if UDP_Packet[1] == seq:\n print ('Sequence numbers match; ',seq)\n return True;\n else:\n print ('Sequence number misalignment; expected ',seq)\n return False;\n\nif __name__ == \"__main__\":\n main();\n"
},
{
"alpha_fraction": 0.7286501526832581,
"alphanum_fraction": 0.7561983466148376,
"avg_line_length": 32,
"blob_id": "aec5e095957f51ccd080a0c7f3dff46166157ea6",
"content_id": "7e34887740d949c7a1f1b6fdfcaf7c3e844b1e0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 726,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 22,
"path": "/Assignment2/Python_Client.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "import socket\n# Connection to establish with the Client\nTCP_IP = '192.168.131.129'\nTCP_PORT = 5005\n\n# Establish connection\nprint(\"Attempting to contact server at \",TCP_IP,\":\",TCP_PORT)\nclientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclientSocket.connect((TCP_IP, TCP_PORT))\nprint (\"Connection to Server Established\")\n# Valid request: What is the current date and time?\nsentence = input('Type request to server: ')\n\n# Encode sentence in bytes to send to server\nbyte_sentence = sentence.encode('ascii')\nclientSocket.send(byte_sentence)\n# once the result arrives, decode it to ascii\nbyte_result = clientSocket.recv(1024)\nresult = byte_result.decode('ascii')\nprint ('From Server: ', result)\n\nclientSocket.close()\n"
},
{
"alpha_fraction": 0.7005987763404846,
"alphanum_fraction": 0.71856290102005,
"avg_line_length": 15.699999809265137,
"blob_id": "7a38415adf4016c6676a4c4acb746984f07aeade",
"content_id": "39947c1e568cf899f721d3b33ed0a65a39e9e849",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 10,
"path": "/PythonTutorial/Exercise Files/Ch2/variables_start.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "# \n# Example file for variables\n# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)\n\n# Declare a variable and initialize it\nf = 0;\nprint f\n\ndel f\nprint f\n"
},
{
"alpha_fraction": 0.6326530575752258,
"alphanum_fraction": 0.6326530575752258,
"avg_line_length": 24,
"blob_id": "a265928fb7b9a98f7f83d7ad23f8156a43a7db5d",
"content_id": "7d5d398df3a8a2db625ef29637b9fcb5cffe2626",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 2,
"path": "/PythonTutorial/Exercise Files/test.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "x = input('Press something: ')\nprint x + \" hello\""
},
{
"alpha_fraction": 0.7069970965385437,
"alphanum_fraction": 0.7361515760421753,
"avg_line_length": 32.29999923706055,
"blob_id": "360d3fea54a740d7523123fed80e8f88d382a508",
"content_id": "3470e47e30eedd4487a0a9385e69e74f6a84173a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 20,
"path": "/Python_Client.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "import socket\r\n# Connection to establish with the Client\r\nTCP_IP = '192.168.131.129'\r\nTCP_PORT = 5005\r\n# Establish connection\r\nprint(\"Attempting to contact server at \",TCP_IP,\":\",TCP_PORT)\r\nclientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclientSocket.connect((TCP_IP, TCP_PORT))\r\nprint (\"Connection to Server Established\")\r\n\r\nsentence = input('Type request to server: ')\r\n# Encode sentence in bytes to send to server\r\nbyte_sentence = sentence.encode()\r\nclientSocket.send(byte_sentence)\r\n# once the result arrives, decode it to ascii\r\nbyte_result = clientSocket.recv(1024)\r\nresult = byte_result.decode('ascii')\r\nprint ('From Server: ', result)\r\n\r\nclientSocket.close()\r\n"
},
{
"alpha_fraction": 0.6494845151901245,
"alphanum_fraction": 0.6494845151901245,
"avg_line_length": 23.5,
"blob_id": "e031b9d95c2f4c32005534d1a755e70125802e78",
"content_id": "a34e3a3b4e6d2baff7ac4d13ff85098ee90a9295",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 4,
"path": "/Assignment2/test.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "# test python file\nprint (\"Test Start\")\nx = input ('write something here: ')\nprint (\"Hello \" + x)"
},
{
"alpha_fraction": 0.4585253596305847,
"alphanum_fraction": 0.5345622301101685,
"avg_line_length": 14.535714149475098,
"blob_id": "7f29d405240b41cb81f608e0d7a5406e994720fa",
"content_id": "f7696b75f30cdb1717e2756e526abd666ec916a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 28,
"path": "/test.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "<<<<<<< HEAD\ndef main ():\n\ttest(0,0,b'hello');\n\ttest(0,1);\n\ndef test(a, b, c = None):\n\tif (c != None):\n\t\tprint (\"no data\")\n\telse:\n\t\tprint (\"theres data\")\n\treturn;\n\nif __name__ == \"__main__\":\n=======\ndef main ():\n\ttest(0,0,b'hello');\n\ttest(0,1);\n\ndef test(a, b, c = None):\n\tif (c != None):\n\t\tprint (\"no data\")\n\telse:\n\t\tprint (\"theres data\")\n\treturn;\n\nif __name__ == \"__main__\":\n>>>>>>> 2d225daa6d64105381ac12cc1238bc36c0d2ad31\n\tmain();"
},
{
"alpha_fraction": 0.6637499928474426,
"alphanum_fraction": 0.6862499713897705,
"avg_line_length": 24,
"blob_id": "08d06ed8c6900339edad8379b31b4ad7ecc94429",
"content_id": "09634f2beaebb444983d3df910bc4df73e67fe94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 32,
"path": "/PythonTutorial/Exercise Files/Ch2/classes_start.py",
"repo_name": "marisamathura/cs3357networks",
"src_encoding": "UTF-8",
"text": "#\n# Example file for working with classes\n# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)\n# self = refers to the object itself, the instance being operated on\n# ^ does not have to be called, must be included always\n\nclass myClass():\n def method1(self):\n print \"myClass method1\"\n \n def method2(self, someString):\n print \"myClass method2: \" + someString\n\nclass anotherClass(myClass): #inheriting myClass\n def method2(self):\n print \"anotherClass method2\"\n \n def method1(self):\n myClass.method1(self);\n print \"anotherClass method1\"\n\ndef main():\n # exercise the class methods\n c = myClass() #instantiates a version of myClass\n c.method1()\n c.method2(\"This is a string\")\n c2 = anotherClass()\n c2.method1()\n c2.method2()\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 9 |
HNYuuu/BUPT-classroom-finding | https://github.com/HNYuuu/BUPT-classroom-finding | e2ae9ed9c0e6ec9c1facc76ebe3161ef0e4b9924 | 563023cf964ed5d02a92307983b1f987ab531f11 | 000671b6daaaab2d4d4e98d2198bede50befcf42 | refs/heads/master | 2021-03-31T01:00:48.154858 | 2018-03-13T03:09:29 | 2018-03-13T03:09:29 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7475410103797913,
"alphanum_fraction": 0.8229508399963379,
"avg_line_length": 22.461538314819336,
"blob_id": "721b3ce43e2925baff3d7cffc5708ae735aeb6f2",
"content_id": "9a38b2503fdd9f3aa11c011d3998ebb454498e5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 739,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 13,
"path": "/README.md",
"repo_name": "HNYuuu/BUPT-classroom-finding",
"src_encoding": "UTF-8",
"text": "# BUPT-classroom-finding\n\n自制北邮本部空教室查询程序,使用方法如下:\n\n`第一行`请选择开始自习的时间段(8-10为一二节课,10-12为三四节课,13-15为五六节课,15-17为七八节课,17-18为晚饭时间,18-20为晚上选修课时间)\n\n`第二行`请选择结束自习的时间段,该时间段可以大于等于开始时间段\n\n`第三行`请选择想去的教学楼\n\n点击`search`按钮即可以搜索符合要求的教室,因为**本着不换教室的原则**,所以如果没有符合要求的教室,请缩小时间范围或者更改教学楼再进行尝试\n\n**务必注意:该程序必须在接入校内网络时才可以使用,否则无法爬取教务处空教室信息**\n"
},
{
"alpha_fraction": 0.6033769845962524,
"alphanum_fraction": 0.6447277665138245,
"avg_line_length": 21.858266830444336,
"blob_id": "02d9ecc13ff55f6115a92184a592f4632ae23942",
"content_id": "bb75872eb6ca987bad437f8c42a9cdd3ec2a7d0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3118,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 127,
"path": "/main_win.py",
"repo_name": "HNYuuu/BUPT-classroom-finding",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport tkinter as tk\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n# import sys\n\n# reload(sys) \n# sys.setdefaultencoding('utf-8') \n\ncache = {}\nresult_cache = {}\ntemp = {}\nflag = 0\ntime_map = {'8-10':0, '10-12':1, '13-15':2, '15-17':3, '17-18':4, '18-20':5}\nbuilding_map = {u'教一':1, u'教二':2, u'教三':3, u'教四':4}\n\np = re.compile(r'-(\\d+),')\n\nr = requests.get(\"http://jwxt.bupt.edu.cn/zxqDtKxJas.jsp\")\n# print(r.text)\nsoup = BeautifulSoup(r.text, 'html.parser')\nmain_table = soup.find_all('table')[-1]\n# print(type(main_table))\n\nfor i in range(6):\n\tcache[i] = main_table.find_all('tr')[i]\n# print(cache)\n\nfor i in range(6):\n\tcurrent_class_state = cache[i].find_all('td')[-1]\n\t# print(current_class_state)\n\tfor string in current_class_state.stripped_strings:\n\t\t# print(string)\n\t\tif '教一楼' in string:\n\t\t\tflag = 1\n\t\t\tcontinue\n\t\telif '教二楼' in string:\n\t\t\tflag = 2\n\t\t\tcontinue\n\t\telif '教三楼' in string:\n\t\t\tflag = 3\n\t\t\tcontinue\n\t\telif '教四楼' in string:\n\t\t\tflag = 4\n\t\t\tcontinue\n\t\telif '图书馆' in string:\n\t\t\tcontinue\n\t\telse:\n\t\t\ttemp[flag] = p.findall(string)\n\t\t\t# print(temp)\n\t# print(temp)\n\tcache[i] = temp.copy()\n\ttemp.clear()\n# print(cache)\n# Spider end\n\nroot = tk.Tk()\nroot.title(\"今天哪有空\")\n\nroot.geometry('500x250')\n\nspace = tk.Label(root, height=1)\nspace.pack()\n\nvar_start = tk.StringVar()\nvar_start.set('8-10')\nstart_option = tk.OptionMenu(root, var_start, '8-10', '10-12', '13-15', '15-17', '17-18', '18-20')\nstart_option.config(width=8)\nstart_option.pack()\n\nvar_end = tk.StringVar()\nvar_end.set('8-10')\nend_option = tk.OptionMenu(root, var_end, '8-10', '10-12', '13-15', '15-17', '17-18', '18-20')\nend_option.config(width=8)\nend_option.pack()\n\nvar_building = tk.StringVar()\nvar_building.set('教一')\nbuilding_option = tk.OptionMenu(root, var_building, '教一', '教二', '教三', '教四')\nbuilding_option.config(width=8)\nbuilding_option.pack()\n\ndef give_me_data():\n\tstart_text = time_map[var_start.get()]\n\tend_text = time_map[var_end.get()]\n\tbuilding_text = building_map[var_building.get()]\n\ttemp = []\n\n\tif start_text > end_text:\n\t\tvar.set('您输入的有误,请重新输入')\n\t\t# TODO: handle some special case\n\t\tpass\n\telse:\n\t\tfor i in range(start_text, end_text+1):\n\t\t\tif building_text not in cache[i].keys():\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\ttemp.append(cache[i][building_text])\n\t\t# print(temp)\n\t\tif len(temp) != end_text-start_text+1:\n\t\t\tvar.set('不好意思,没有符合要求的教室,别学习了')\n\t\t\treturn\n\t\tresult = list(set.intersection(*map(set, temp)))\n\t\t# print(result)\n\t\tresult = list(result)\n\t\tprint(result)\n\t\tif len(result) == 0:\n\t\t\tvar.set('不好意思,没有符合要求的教室,别学习了')\n\t\telif len(result) == 1:\n\t\t\tvar.set('符合要求的空教室有: ' + result[0])\n\t\telse:\n\t\t\tresult.sort()\n\t\t\tvar.set('符合要求的空教室有:' + ' '.join(result))\n\t\treturn\n\nspace2 = tk.Label(root, height=1)\nspace2.pack()\n\nsearch = tk.Button(root, text='search', command=give_me_data).pack()\n\nvar = tk.StringVar()\nshow = tk.Label(root, textvariable=var, height=30, wraplength=450)\nshow.pack()\n\ntk.mainloop()"
}
] | 2 |
yucheng-zeng/DEEP-LEARNING | https://github.com/yucheng-zeng/DEEP-LEARNING | a09f9715d4ccabd9a56692bbc1d68a98d96a4e2f | 2a523f3ba007d3eed885c13b5c9afb6de6b48364 | ef75ec5538a457825b70ae89b4dbd0e4f3c4af02 | refs/heads/master | 2021-10-22T09:42:29.466774 | 2019-03-09T16:55:48 | 2019-03-09T16:55:48 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5977588891983032,
"alphanum_fraction": 0.6128284335136414,
"avg_line_length": 30.180723190307617,
"blob_id": "3c18de3d41638f0f6d21be1ffc7c4e8c0fb68e29",
"content_id": "0473e10090bb94ec767e41ea9905e9ef603e1605",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3630,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 83,
"path": "/优化算法总汇/BDG.py",
"repo_name": "yucheng-zeng/DEEP-LEARNING",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n'''\n优点:全局最优解;易于并行实现;总体迭代次数不多\n缺点:当样本数目很多时,训练过程会很慢,每次迭代需要耗费大量的时间\n'''\n\ndef createDataSet():\n # 构造训练数据\n x = np.arange(-10, 10, 0.5) # 根据start与stop指定的范围以及step设定的步长, 获去数据集, 返回一个列表\n m = len(x) # 训练数据点数目\n target_data = x * 2 + 5 + np.random.randn(m) # 随机设置随即设置x对应的y值\n return x, target_data\n\n\ndef BGD(x, target_data, loop_max=1000,epsilon=1e-2):\n '''\n :param x: 训练数据点X轴坐标\n :param target_data: 训练数据点Y轴坐标\n :param input_data:\n :param loopmax: 终止条件,最大迭代次数(防止死循环)\n :param epsilon: 终止条件,目标函数与拟合函数的距离当的距离小于epsilon时, 退出\n :return: theta 训练结束之后的参数\n '''\n\n m = len(x) # 训练数据点数目\n x0 = np.full(m, 1.0) # 获取一个长度为m, 每个元素都是1.0 的列表\n # T 表示转秩矩阵, 不加T 第一行元素为依次为x0,第二行元素依次为x, 得到2*m矩阵\n input_data = np.vstack([x0, x]).T # 构造矩阵, 第一列元素为依次为x0,第二列元素依次为x,得到m*2矩阵,方便做矩阵乘法\n\n # 初始化权值\n np.random.seed(0) #\n theta = np.random.randn(input_data.shape[1]) # 初始化theta,, 生成一个列表,第一个元素是w(维度与输入空间一致), 第二个元素是b,\n\n alpha = 0.01 # 步长(注意取值过大会导致振荡即不收敛,过小收敛速度变慢)\n diff = 0. # 记录梯度\n count = 0 # 循环次数\n error = 0 # 用于记录均方误差\n while count < loop_max:\n count += 1\n\n # 标准梯度下降是在权值更新前对所有样例汇总误差,而随机梯度下降的权值是通过考查某个训练样例来更新的\n # 在标准梯度下降中,权值更新的每一步对多个样例求和,需要更多的计算\n\n sum_m = np.zeros(2)\n for i in range(m):\n # 以下数求损失函数的梯度\n diff = (np.dot(theta, input_data[i]) - target_data[i]) * input_data[i]\n # 可以在迭代theta的时候乘以步长alpha, 也可以在梯度求和的时候乘以步长alpha\n sum_m = sum_m + diff # 当alpha取值过大时,sum_m会在迭代过程中会溢出\n sum_m = sum_m/m\n if np.linalg.norm(sum_m)<epsilon: # 设置阀值, 如果梯度过小, 退出\n break\n theta = theta - alpha * sum_m # 注意步长alpha的取值,过大会导致振荡,过小收敛速度太慢\n error = calMSE(x,target_data,theta) # 计算均方误差\n if error < epsilon:\n break\n print('loop count = %d' % count, '\\tw:', theta, 'error=',error)\n return theta\n\ndef draw(x,target_data,theta):\n # check with scipy linear regression\n # intercept 回归曲线的截距\n # slope 回归曲线的斜率\n slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, target_data)\n print('intercept = %s slope = %s' % (intercept, slope))\n plt.plot(x, target_data, 'g.')\n plt.plot(x, x * theta[1] + theta[0], 'r')\n plt.show()\n\ndef calMSE(x,target_data,theta):\n MSE = 0\n for i in range(len(x)):\n temp = x[i]*theta[1]+theta[0] - target_data[i]\n MSE += temp*temp\n return MSE/len(x)\n\nif __name__=='__main__':\n x, target_data= createDataSet()\n theta = BGD(x,target_data)\n draw(x,target_data,theta)\n"
},
{
"alpha_fraction": 0.5253505706787109,
"alphanum_fraction": 0.5400934815406799,
"avg_line_length": 31.905324935913086,
"blob_id": "7a0c6a0af105a03643353135b0b66202ec1b61b7",
"content_id": "e76977ca2b9d651a962e344220d1fae0d94e9ee6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6738,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 169,
"path": "/FNN/FNN.py",
"repo_name": "yucheng-zeng/DEEP-LEARNING",
"src_encoding": "UTF-8",
"text": "# =============================================================================\n# 深度前馈网络框架\n# 多项式(正弦)拟合任务,需要numpy包和matplotlib包;\n# =============================================================================\n\n# ------------------ 定义深度前馈网络 -------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass MyDfn:\n __WtInitVar = 0.01 # 初始权值服从标准正态分布,该参数控制方差\n __BsInitAmp = 0.01 # 初始阈值服从均匀分布,该参数控制取值范围\n __miu = 0.02 # 随机梯度下降学习率\n\n # 网络初始化函数\n def __init__(self, InputLen=1, LayerNum=0, UnitNum=[], ActiveFcs=[]):\n self.LayerNum = LayerNum # 网络层数(int)\n self.InputLen = InputLen # 网络输入数据长度(int)\n self.UnitNum = [] # 网络各层的单元数,numpy数组\n self.ActiveFcs = [] # 网络各层的激活函数list,内容为函数指针\n\n self.WeightMts = [] # 网络各层权值list,内容为numpy矩阵\n self.BiasVc = [] # 网络各层阈值list,内容为numpy矩阵\n\n # 如果网络层数等于0\n if (self.LayerNum == 0):\n return\n # 每层网络的单元数目\n if (UnitNum.size == LayerNum):\n self.UnitNum = UnitNum\n else:\n print(\"UnitNum长度和LayerNum不等\")\n return\n # 每层网络的激活函数和导数对应的函数指针\n if (len(ActiveFcs) != self.LayerNum):\n print(\"ActiveFcs维度有误\")\n return\n else:\n self.ActiveFcs = ActiveFcs\n # 初始化网络\n self.WeightMts.append(self.__WtInitVar * np.random.randn(UnitNum[0], InputLen))\n self.BiasVc.append(self.__BsInitAmp * np.random.rand(UnitNum[0], 1))\n for idi in range(1, self.LayerNum):\n self.WeightMts.append(self.__WtInitVar * np.random.randn(UnitNum[idi], UnitNum[idi - 1]))\n self.BiasVc.append(self.__BsInitAmp * np.random.rand(UnitNum[idi], 1))\n\n # 显示网络结构函数\n def PrintNetworkInfo(self):\n print(\"网络层数=%d\" % self.LayerNum)\n if (self.LayerNum >= 1):\n print(\"第1层:输入数据长度=%d,该层单元数=%d\" % (self.InputLen, self.UnitNum[0]))\n for idi in range(1, self.LayerNum):\n print(\"第%d层:输入数据长度=%d,该层单元数=%d\" % (idi + 1, self.UnitNum[idi - 1], self.UnitNum[idi]))\n\n # 前馈函数(Input为numpy列向量)\n def Forward(self, Input):\n if (Input.shape != (self.InputLen, 1)):\n print(\"输入数据维度和网络不符\")\n return 0.0\n # 第一个元素是网络输入值,后面依次是各层输出值\n self.LyVals = [Input] # self.LyVals是一个长度为(self.LayerNum+1)的列表,输出计算结果\n self.LyDris = [] # self.LyDris是一个长度为self.LayerNum的列表,每个元素都是对应层输出的导数\n for idi in range(self.LayerNum):\n ZVal = np.dot(self.WeightMts[idi], self.LyVals[idi]) + self.BiasVc[idi] # 防射\n ValTmp, DriTmp = self.ActiveFcs[idi](ZVal) # Sigmoid函数进行非线性变换\n self.LyVals.append(ValTmp)\n self.LyDris.append(DriTmp)\n return self.LyVals[self.LayerNum]\n\n # 反向传播函数(LossDri为numpy列向量)\n def BackPropagation(self, LossDri):\n '''\n :param LossDri: 输出层的导数\n :return: None\n '''\n ErrPost = LossDri * self.LyDris[self.LayerNum - 1] # 最后的隐藏层的导数\n DeltaErr = [ErrPost] # 保留子节点的导数\n # 从上到下迭代计算梯度\n for idi in range(self.LayerNum - 2, -1, -1):\n ErrPri = np.dot(self.WeightMts[idi + 1].T, ErrPost) * self.LyDris[idi]\n DeltaErr.append(ErrPri)\n ErrPost = ErrPri\n # 更新参数\n for idi in range(self.LayerNum):\n self.WeightMts[idi] -= self.__miu * np.dot(DeltaErr[self.LayerNum - 1 - idi], self.LyVals[idi].T)\n self.BiasVc[idi] -= self.__miu * DeltaErr[self.LayerNum - 1 - idi]\n\n\n# ----------------------激活函数必须前一个返回数值,后一个返回导数--------------------\n# 激活函数Sigmoid及其导数(第一个numpy向量是函数,第二个是导数)\ndef Sigmoid(x):\n y = 1.0 / (1.0 + np.exp(-x))\n return y, y * (1 - y)\n\n\n# 激活函数ReLu及其导数(第一个numpy向量是函数,第二个是导数)\ndef ReLU(x):\n y = x\n y[x <= 0] = 0\n Dri = np.ones(x.shape)\n Dri[x <= 0] = 0\n return y, Dri\n\n\n# 线性单元的激活函数及导数(第一个numpy向量是函数,第二个是导数)\ndef Linear(x):\n return x, np.ones(x.shape)\n\n\n# MSE均方误差损失函数及其导数(第一个是函数,第二个是导数)\ndef MSELoss(y, Label):\n diff = y - Label\n return np.dot(diff.T, diff), diff # 这里的倒数进行了缩放处理\n #return np.dot(diff.T, diff), 2.0 * diff\n\n\n# ------------------------------------------------------------------------\n\n# -------------------- 进行多项式拟合 -------------------------------------\nif __name__=='__main__':\n\n\n DatNum = 100 # 回归点数\n # 构造数据\n xdat = np.linspace(-3, 3, DatNum)\n ydat = np.sin(xdat) + 0.01 * np.random.randn(xdat.size)\n\n # 构建网络\n LyNum = 3 # 网络层数\n UtNum = np.array([20, 12, 1]) # 网络各层的单元数\n\n # 设计每层激活函数层\n ActFc = []\n for idj in range(LyNum - 1):\n ActFc.append(Sigmoid)\n ActFc.append(Linear)\n\n # 构造前馈网络\n Net = MyDfn(1, LyNum, UtNum, ActFc)\n\n # 显示前馈网络结构\n Net.PrintNetworkInfo()\n\n # 开始训练网络\n IterNum = 3000 # 迭代次数\n for Iter in range(IterNum):\n AveLoss = 0.0\n for DatIdx in range(DatNum):\n InputVal = np.array(xdat[DatIdx]).reshape([1, 1])\n LabelVal = np.array(ydat[DatIdx]).reshape([1, 1])\n OutputVal = Net.Forward(InputVal)\n Loss, LossDri = MSELoss(OutputVal, LabelVal)\n Net.BackPropagation(LossDri)\n AveLoss += Loss[0][0]\n print(\"第%d次迭代平均损失为%.3f\" % (Iter + 1, AveLoss / DatNum))\n\n # 展示训练成果\n NetOutput = np.zeros(DatNum)\n for DatIdx in range(DatNum):\n InputVal = np.array(xdat[DatIdx]).reshape([1, 1])\n OutputVal = Net.Forward(InputVal)\n NetOutput[DatIdx] = OutputVal[0][0]\n plt.scatter(xdat, ydat)\n plt.plot(xdat, NetOutput, 'r', lw=5)\n plt.xlabel(\"X Axis\")\n plt.ylabel(\"Y Axis\")\n plt.legend(labels=[\"Net Output\", \"Train Data\"])\n plt.show()\n\n"
}
] | 2 |
simrandeeplayal/Feature-Store | https://github.com/simrandeeplayal/Feature-Store | f4dea604e4a2dbfcb7680388413b2d455a19acf0 | 550065b0f1e803bf4b33b1fc0e61206d7b503933 | 6bf837586be1dceb702077fd947278db4f9978c1 | refs/heads/main | 2023-04-12T01:10:18.505671 | 2021-05-13T12:14:39 | 2021-05-13T12:14:39 | 367,038,025 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7352666258811951,
"alphanum_fraction": 0.780168354511261,
"avg_line_length": 52.5,
"blob_id": "e97539b99dacda3ca03291031e9e20dbc93c9df8",
"content_id": "b5c752425cf903269f5cefe1775b1be2d1888bf1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 1069,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 20,
"path": "/Feature Store/config.ini",
"repo_name": "simrandeeplayal/Feature-Store",
"src_encoding": "UTF-8",
"text": "[ECE file path]\nECEpath = /Volumes/My Passport/Raster data/ECE_015_030_05_N_P_AU_NAT_C_20140801.tif\nDescription = ECE file path should be change to where you have put the file in your local drive.\n\n[Depth of Soil file path]\nSoilpath = /Volumes/My Passport/Raster data/DES_000_200_05_N_P_AU_NAT_C_20140801.tif\nDescription = Soil file path should be change to where you have put the file in your local drive.\n\n[Sand file path]\nSandpath = /Volumes/My Passport/Raster data/SND_000_005_05_N_P_AU_NAT_C_20140801.tif\nDescription = Soil file path should be change to where you have put the file in your local drive.\n\n[Raw data file] \nRawpath = /Users/simrandeepsingh/processed_sampled_data.csv\nDescription = Raw file path should be change to where you have put the file in your local drive.\n\n[Raster Data Info]\nName = Soil and Landscape Grid of Australia\nSource = https://www.clw.csiro.au/aclep/soilandlandscapegrid/index.html \nDescription = The Soil and Landscape Grid of Australia provides relevant, consistent, comprehensive, nation-wide data in an easily-accessible format."
},
{
"alpha_fraction": 0.7958313822746277,
"alphanum_fraction": 0.7958313822746277,
"avg_line_length": 139.73333740234375,
"blob_id": "81fd85c4d5cff68d672a9fb75ef03e287dcdc7c4",
"content_id": "2dd2ec06cfa2a0669d307a2abb3e33701acb3009",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2119,
"license_type": "permissive",
"max_line_length": 536,
"num_lines": 15,
"path": "/Feature Store/README.txt",
"repo_name": "simrandeeplayal/Feature-Store",
"src_encoding": "UTF-8",
"text": "The Config.ini file helps to declare file paths so the user do not need to change anything in init file. Once, the package files are installed in the local system the user only needs to change the geotiff file paths in config files.\n\nThe main file of the python package is “__init__.py”. Python treats the folder as a module if it contains the “__init__.py” file. Additionally, since this is the first file loaded in a package, you can use it to run code that you want to run any time the module is loaded or to define the submodules to be exported. The init file will have the following methods:\n\na)\tReading the Raster files\nThe package is able to read raster data from Geotiff files. Here Rasterio library is used to read files. It outputs the Geotiff file in an array that consists of feature values. These feature values can be extracted using x,y values. \n\nb)\tDisplaying Map\nThe package also have a method to visualise the raster map. Here, since the soil raster data is of Australia. The map of Australia is shown. \n\nc)\tFeature Extract using featureextract method\nParticular soil feature value can be extracted from the Geotiff file using featureextract() method which is defined in the package. This method will take two input values; x and y. These two values are Latitude and longitude. Once the method is called with the parameters the method will return a soil value. Using the affine transformation the latitude and longitude values will transform into new x and y values which are used to traverse through the array and find a particular value. \n\nd)\tMerge Data\nAnother functionality of the package is to merge the feature values with any file. For example, if you have your own raw data with Latitude and longitude values. Then, you can simply import the package and call mergingdata() method which will return you a dataframe with three additionally soil values (Sand value, ECE and Depth of Soil) from feature store which are horizontally row matched which the provided raw data. With the help of this, one can include more attributes that can help train machine learning to gain better results.\n"
},
{
"alpha_fraction": 0.6494082808494568,
"alphanum_fraction": 0.6597633361816406,
"avg_line_length": 27.125,
"blob_id": "ff9e933b9af30133e28da4d5ab33ed6f875bb386",
"content_id": "c9b06194453313d82667419c37f6871e2a97cfe1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 676,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 24,
"path": "/Feature Store/setup.py",
"repo_name": "simrandeeplayal/Feature-Store",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n \nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Operating System :: MacOS :: Windows :: Windows 10',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3'\n]\n \nsetup(\n name='Rasterdata',\n version='0.0.1',\n description='A very basic featurestore',\n long_description=open('README.txt').read() + '\\n\\n' + open('CHANGELOG.txt').read(),\n url='', \n author='Simrandeep Singh',\n author_email='[email protected]',\n license='MIT', \n classifiers=classifiers,\n keywords='raster', \n packages=find_packages(),\n install_requires=['rasterio','numpy','matplotlib'] \n)\n\n"
},
{
"alpha_fraction": 0.7951127886772156,
"alphanum_fraction": 0.7951127886772156,
"avg_line_length": 124.17646789550781,
"blob_id": "029c258ab267b9929eeaf5ecd73d76e23c40a177",
"content_id": "7856ee3638591cfd34e467351724f4bebaa246c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2136,
"license_type": "no_license",
"max_line_length": 536,
"num_lines": 17,
"path": "/README.md",
"repo_name": "simrandeeplayal/Feature-Store",
"src_encoding": "UTF-8",
"text": "# Feature-Store\n\nThe Config.ini file helps to declare file paths so the user do not need to change anything in init file. Once, the package files are installed in the local system the user only needs to change the geotiff file paths in config files.\n\nThe main file of the python package is “__init__.py”. Python treats the folder as a module if it contains the “__init__.py” file. Additionally, since this is the first file loaded in a package, you can use it to run code that you want to run any time the module is loaded or to define the submodules to be exported. The init file will have the following methods:\n\na)\tReading the Raster files\nThe package is able to read raster data from Geotiff files. Here Rasterio library is used to read files. It outputs the Geotiff file in an array that consists of feature values. These feature values can be extracted using x,y values. \n\nb)\tDisplaying Map\nThe package also have a method to visualise the raster map. Here, since the soil raster data is of Australia. The map of Australia is shown. \n\nc)\tFeature Extract using featureextract method\nParticular soil feature value can be extracted from the Geotiff file using featureextract() method which is defined in the package. This method will take two input values; x and y. These two values are Latitude and longitude. Once the method is called with the parameters the method will return a soil value. Using the affine transformation the latitude and longitude values will transform into new x and y values which are used to traverse through the array and find a particular value. \n\nd)\tMerge Data\nAnother functionality of the package is to merge the feature values with any file. For example, if you have your own raw data with Latitude and longitude values. Then, you can simply import the package and call mergingdata() method which will return you a dataframe with three additionally soil values (Sand value, ECE and Depth of Soil) from feature store which are horizontally row matched which the provided raw data. With the help of this, one can include more attributes that can help train machine learning to gain better results.\n"
},
{
"alpha_fraction": 0.754807710647583,
"alphanum_fraction": 0.754807710647583,
"avg_line_length": 40.599998474121094,
"blob_id": "194a520cbe16d0688736431fda47f8d81925c57c",
"content_id": "01029b82f15ce075d9fd0030a88f9a744d652c0b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 208,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 5,
"path": "/Feature Store/Rasterdata/exceptions.py",
"repo_name": "simrandeeplayal/Feature-Store",
"src_encoding": "UTF-8",
"text": "\nclass ObjectNotFound(Exception):\n \"\"\"An Exception risen when an expected object not found\"\"\"\n\nclass IvalidOperation(Exception):\n \"\"\"An Exception risen when an attempted operation does not make sense\"\"\""
},
{
"alpha_fraction": 0.6085663437843323,
"alphanum_fraction": 0.6418798565864563,
"avg_line_length": 22.661972045898438,
"blob_id": "9bfe424f48c6a5d4d982515b15329871acc6a4aa",
"content_id": "313e7f30aa9f2af4b2d40461c9aee2a21d2f6ccd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1681,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 71,
"path": "/Feature Store/Rasterdata/__init__.py",
"repo_name": "simrandeeplayal/Feature-Store",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport rasterio\nimport rasterio.features\nimport rasterio.warp\nfrom matplotlib import pyplot as plt\nfrom rasterio.plot import show\nfrom configparser import ConfigParser\n\nfile = 'config.ini'\nconfig = ConfigParser()\nconfig.read(file)\n\nfile1 = config['ECE file path']['ECEpath']\nfile2 = config['Depth of Soil file path']['Soilpath']\nfile3 = config['Raw data file']['Rawpath']\nfile4 = config['Sand file path']['Sandpath']\n\ndataset1 = rasterio.open(file1)\ndataset2 = rasterio.open(file2)\ndataset3 = rasterio.open(file4)\n\nband1 = dataset1.read(1)\nband2 = dataset2.read(1)\nband3 = dataset3.read(1)\n\ndef featureextract(x,y):\n xs = x\n ys = y\n with dataset1 as src:\n rows1, cols1 = rasterio.transform.rowcol(src.transform, xs, ys) \n \n with dataset2 as src:\n row2, cols2 = rasterio.transform.rowcol(src.transform, xs, ys)\n \n with dataset3 as src:\n row3, cols3 = rasterio.transform.rowcol(src.transform, xs, ys)\n \n feature1 = band1[rows1, cols1]\n feature2 = band2[row2, cols2]\n feature3 = band3[row3, cols3]\n \n return [feature1, feature2 ,feature3]\n\n\ndef mergingdata():\n df_data = pd.read_csv(file3)\n df_data['ECE'] = 0.5\n df_data['DES'] = 0.5\n df_data['SND'] = 0.5\n\n for i, row in df_data.iterrows(): \n x = df_data['long'][i]\n y = df_data['lag'][i]\n a, b, c = featureextract(x,y)\n df_data['ECE'][i] = a\n df_data['DES'][i] = b\n df_data['SND'][i] = c\n \n df_data.to_excel(\"testpackage.xlsx\",index=False)\n\n return df_data\n\ndef showmap1():\n show(dataset1)\n\ndef showmap2():\n show(dataset2)\n\ndef showmap3():\n show(dataset3)\n\n"
}
] | 6 |
mayank8200/Fashion-Product-Images-Classification | https://github.com/mayank8200/Fashion-Product-Images-Classification | 116fd9acdf2da88254db3b8b4cc9aeb1cc4d2da5 | 4ebc5a371a826a331ca5807524f9ed797967b0fe | 211f38ae229a59def8199dadc2fc335464fff6ef | refs/heads/master | 2022-12-19T05:45:37.496179 | 2020-09-30T18:39:00 | 2020-09-30T18:39:00 | 291,935,964 | 1 | 1 | null | 2020-09-01T08:05:27 | 2020-09-25T10:22:48 | 2020-09-30T18:39:01 | Python | [
{
"alpha_fraction": 0.6266958713531494,
"alphanum_fraction": 0.6345732808113098,
"avg_line_length": 33.60606002807617,
"blob_id": "b51de298598eaa459b5c93c83467e816ab2cc75e",
"content_id": "5ae1c4acee1e046d42cf91dc8fd28103e8d34797",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2285,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 66,
"path": "/app.py",
"repo_name": "mayank8200/Fashion-Product-Images-Classification",
"src_encoding": "UTF-8",
"text": "import os\nfrom uuid import uuid4\nimport pickle\nfrom flask import Flask, request, render_template, send_from_directory\n\napp = Flask(__name__)\n# app = Flask(__name__, static_folder=\"images\")\n\nkey_list = pickle.load(open('key_list', 'rb'))\nval_list = pickle.load(open('val_list', 'rb'))\nkey_list_sub = pickle.load(open('key_list_sub', 'rb'))\nval_list_sub = pickle.load(open('val_list_sub', 'rb'))\n\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html\")\n\[email protected](\"/upload\", methods=[\"POST\"])\ndef upload():\n target = os.path.join(APP_ROOT, 'images1/')\n # target = os.path.join(APP_ROOT, 'static/')\n print(target)\n if not os.path.isdir(target):\n os.mkdir(target)\n else:\n print(\"Couldn't create upload directory: {}\".format(target))\n print(request.files.getlist(\"file\"))\n for upload in request.files.getlist(\"file\"):\n print(upload)\n print(\"{} is the file name\".format(upload.filename))\n filename = upload.filename\n destination = \"/\".join([target, filename])\n print (\"Accept incoming file:\", filename)\n print (\"Save it to:\", destination)\n upload.save(destination)\n #import tensorflow as tf\n import numpy as np\n from keras.preprocessing import image\n\n from keras.models import load_model\n new_model = load_model('model.h5')\n new_model_cat = load_model('MsubCatmodel.h5')\n\n new_model.summary()\n test_image = image.load_img('images1\\\\'+filename,target_size=(60,80))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = new_model.predict(test_image)\n result1 = new_model_cat.predict(test_image)\n ans = np.argmax(result)\n ans2 = np.argmax(result1)\n ans = key_list[val_list.index(ans)]\n ans2 = key_list_sub[val_list_sub.index(ans2)]\n \n # return send_from_directory(\"images\", filename, as_attachment=True)\n return render_template(\"template.html\",image_name=filename, text=ans , text2=ans2)\n\[email protected]('/upload/<filename>')\ndef send_image(filename):\n return send_from_directory(\"images1\", filename)\n\nif __name__ == \"__main__\":\n app.run(debug=False)\n\n"
},
{
"alpha_fraction": 0.7361468076705933,
"alphanum_fraction": 0.7482568621635437,
"avg_line_length": 39.05882263183594,
"blob_id": "9fd280a686998c0bb8d04c06754092da1cbaf0b6",
"content_id": "e5f15fd7adfdafd2dc65238aa967faf0b6fe8825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2725,
"license_type": "no_license",
"max_line_length": 373,
"num_lines": 68,
"path": "/README.md",
"repo_name": "mayank8200/Fashion-Product-Images-Classification",
"src_encoding": "UTF-8",
"text": "# Fashion Product Images Classification\nThe growing e-commerce industry presents us with a large dataset waiting to be scraped and researched upon. In addition to professionally shot high resolution product images, we also have multiple label attributes describing the product which was manually entered while cataloging. To add to this, we also have descriptive text that comments on the product characteristics.\n\n[](https://www.python.org/)<br>\n[](https://www.python.org/)<br>\n[](https://www.python.org/)<br>\n[](https://www.python.org/)<br>\n[](https://www.python.org/)<br>\n\n## Problem Statement\n```\nImplement web application for image captioning using Deep neural network for Fashion\nProduct Images (Small) dataset.\nRequirement:\n1. Design model with at least 90% validation accuracy.\n2. Create basic ui for given problem\na. Upload image\nb. Generated text\n(Dataset link: https://www.kaggle.com/paramaggarwal/fashion-productimagessmall)\n```\n# How To Run This Code\n```\n1. Open terminal / command prompt where python is installed.\n2. Locate the Project folder.\n3. type \"python app.py\" (without \"\").\n4. There will be some address like 127.0.0.1:5000/ .\n5. Copy it and open in Web Browser.\n6. Go to terminal / command prompt and press ctrl + c to stop the project.\n```\n\n# Technology Used\n[](https://shields.io/)<br>\n[](https://shields.io/)<br>\n[](https://shields.io/)\n\n\n# Homepage\nThe homepage of the web app where you get an option to upload new image.\n\n\n# After Uploading File\nFile can be uploaded either by drag and drop or by clicking on the empty space.\n\n\n\n# Prediction on Various Images\nWhen the file is uploaded you can see the category and sub-category is printed.\n\n\n\n<br>\n\n\n\n\n<br>\n\n\n\n\n<br>\n\n\n\n\n\n\n## Feel Free to contact if you have any project related doubt!\n\n"
},
{
"alpha_fraction": 0.6920635104179382,
"alphanum_fraction": 0.7197691202163696,
"avg_line_length": 26.0703125,
"blob_id": "b73d7a36edaefd2ee8b7248093b06b4d467d2144",
"content_id": "a62a1fcc16586a0e470d2a496cbf803358e82815",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3465,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 128,
"path": "/fashionClassifierSubCat.py",
"repo_name": "mayank8200/Fashion-Product-Images-Classification",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\ndf = pd.read_csv(\"styles.csv\",error_bad_lines=False)\ndf['image'] = df.apply(lambda row: str(row['id']) + \".jpg\", axis=1)\ndf = df.sample(frac=1).reset_index(drop=True)\ndf.head(10)\n\nbatch_size = 256\n\nfrom keras_preprocessing.image import ImageDataGenerator\n\nimage_generator = ImageDataGenerator(\n validation_split=0.2\n)\n\ntraining_generator = image_generator.flow_from_dataframe(\n dataframe=df,\n directory=\"images\",\n x_col=\"image\",\n y_col=\"subCategory\",\n target_size=(60,80),\n batch_size=batch_size,\n subset=\"training\"\n)\n\nvalidation_generator = image_generator.flow_from_dataframe(\n dataframe=df,\n directory=\"images\",\n x_col=\"image\",\n y_col=\"subCategory\",\n target_size=(60,80),\n batch_size=batch_size,\n subset=\"validation\"\n)\nimport pickle\nclasses = len(training_generator.class_indices)\nmy_dict = training_generator.class_indices\nkey_list = list(my_dict.keys()) \nval_list = list(my_dict.values())\nfilename1 = 'key_list_sub'\nfilename = 'val_list_sub'\npickle.dump(key_list, open(filename1, 'wb'))\npickle.dump(val_list, open(filename, 'wb'))\n\n\n#Importing keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\n\n#Initializing CNN\nclassifier = Sequential()\n\n#1Convolution\nclassifier.add(Conv2D(32,(3,3),input_shape = (60,80,3), activation = 'relu'))\n\n#2Pooling\nclassifier.add(MaxPooling2D(pool_size=(3, 3)))\n\n#adding 2nd 3rd and 4th convolution layer\nclassifier.add(Conv2D(32,(3,3), activation = 'relu'))\nclassifier.add(MaxPooling2D(pool_size=(3, 3)))\n\n\n#3Flattening\nclassifier.add(Flatten())\n\n#4Full_Connection\n\nclassifier.add(Dense(units=32,activation = 'relu'))\nclassifier.add(Dense(units=64,activation = 'relu'))\n\nclassifier.add(Dense(units=128,activation = 'relu'))\nclassifier.add(Dense(units=256,activation = 'relu'))\nclassifier.add(Dense(units=256,activation = 'relu'))\n\nclassifier.add(Dense(units=classes,activation = 'softmax'))\n\n#Compiling CNN\nclassifier.compile(optimizer='adam',\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n#classifier.summary()\n#classifier.fit(train_images, train_labels, epochs=25, batch_size=100)\n\n\nfrom math import ceil\n\nclassifier.fit_generator(\n generator=training_generator,\n steps_per_epoch=ceil(0.8 * (df.shape[0] / batch_size)),\n\n validation_data=validation_generator,\n validation_steps=ceil(0.2 * (df.shape[0] / batch_size)),\n\n epochs=5,\n verbose=1\n)\n\nloss, acc = classifier.evaluate_generator(validation_generator, steps=ceil(0.2 * (df.size / batch_size)))\nprint(\"\\n%s: %.2f%%\" % (classifier.metrics_names[1], acc * 100))\n\nclassifier.save(\"modelsubCategory.h5\")\nimport numpy as np\nfrom keras.preprocessing import image\nfilename = \"40826.jpg\"\nfrom keras.models import load_model\nnew_model = load_model('model.h5')\nnew_model.summary()\ntest_image = image.load_img('images\\\\'+filename,target_size=(60,80))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis = 0)\nresult = new_model.predict(test_image)\nval = np.argmax(result)\nmy_dict = training_generator.class_indices\nkey_list = list(my_dict.keys()) \nval_list = list(my_dict.values()) \nprint(key_list[val])\n\nimport pickle\n# save the model to disk\nfilename1 = 'key_list'\nfilename = 'val_list'\npickle.dump(key_list, open(filename1, 'wb'))\npickle.dump(val_list, open(filename, 'wb'))\n"
}
] | 3 |
safeyhalim/ml-hybrid-mega-case-study | https://github.com/safeyhalim/ml-hybrid-mega-case-study | 06ed56519a7b72438432c9fe34e136ab51fcdef9 | a29d43785d0bbb1bb843404eceff58c5166950bf | 67f6449f3a6c89056c9167e54cd43037c0640056 | refs/heads/master | 2023-07-25T17:27:36.115842 | 2021-08-29T17:51:29 | 2021-08-29T17:51:29 | 401,042,911 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8152173757553101,
"alphanum_fraction": 0.8152173757553101,
"avg_line_length": 91,
"blob_id": "b99c7eec4e12ba774ab2c67b8c5f9107a0d58a23",
"content_id": "5b5a9bcf244e0f82dd255e43d013419f1f328021",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 5,
"path": "/README.md",
"repo_name": "safeyhalim/ml-hybrid-mega-case-study",
"src_encoding": "UTF-8",
"text": "This repository is based on the Udemy course: Deep Learning A-Z™: Hands-On Artificial Neural Networks: https://www.udemy.com/course/deeplearning\n\nA model for fraud detection of bank customers'credit card applications using\na hybrid model that consists of both Unsupervised Deep Learning (Self Organizing Map) and Supervised Deep Learning (Artificial Neural Network).\nDataset source: https://archive.ics.uci.edu/ml/datasets/statlog+(australian+credit+approval)\n"
},
{
"alpha_fraction": 0.7523207068443298,
"alphanum_fraction": 0.762113630771637,
"avg_line_length": 69.02857208251953,
"blob_id": "88b3e6205638aa19df625104f76caacdd6a87776",
"content_id": "ad00a3b56ab93df726073e895a7e19e420bdbd58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9803,
"license_type": "no_license",
"max_line_length": 340,
"num_lines": 140,
"path": "/mega_case_study.py",
"repo_name": "safeyhalim/ml-hybrid-mega-case-study",
"src_encoding": "UTF-8",
"text": "# Mega Case Study - A Hybrid Deep Learning Model\n\n# Part 1 - Identifying the credit card frauds with the Self-Organizing Map (SOM)\n\n# Unsupervised Deep Learning with Self Organizing Map (SOM)\n# used for fraud detection among bank customers (Credit Card applications from customers)\n\n# Importing the libraries\nfrom minisom import MiniSom\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# Importing the dataset (the dataset is taken from: https://archive.ics.uci.edu/ml/datasets/statlog+(australian+credit+approval))\ndataset = pd.read_csv('Credit_Card_Applications.csv')\nprint(dataset)\n# We are going to create subsets from the dataset. The first will be the whole \n# dataset without the last column. The last column is the \"class\" of the customer\n# which shows whether her application for the credit card was actually approved or not\n# We want to separate that column from our input to the SOM \n# IMPORTANT: the SOM we are going to create will NOT predict Y because what we are doing\n# is unsupervised learning. We are only separating Y because it's irrelevant to the model\nX = dataset.iloc[:, :-1].values # get the dataset except for the last column\nY = dataset.iloc[:, -1].values # the last column (the class)\n\n# Feature Scaling\n# We will use Normalization (making all values between 0 and 1)\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\nX = sc.fit_transform(X)\n\n# Training SOM \n# Self Organizaing Map doesn't have a ready made implementation in scikitlearn\n# We are going to use an existing implementation of the Self Organizaing Maps: MiniSom 2.2.9 (the latest version at the time of writing)\n# https://pypi.org/project/MiniSom/\n# It was possible to installing this library with pip install, but following the tutorial in the course,\n# I chose to just add the minisom.py file to the project's working directly and work with it.\nfrom minisom import MiniSom\n\n# x, y in the argument list are the dimensions of the self organizing map. We chose x = 10 and y = 10 (10x10 grid) because it yields good results (also because our dataset is not that large)\n# We are free to choose the dimensions of the SOM so that the map fits our input space\n# input_len: the number of features we have in the input dataset(we have 14 features and the customer ID\n# which we are going to keep in order to determine the cheaters. So in total 15)\n# sigma: The radius of the neighborhood of the grid (We will keep the default value: 1.0)\n# learning_rate: decides by how much the weights are updated during the learning process in each iteration. The higher the learning_rate the faster there will be convergence\n# we will keep the default value: 0.5\n# decay_function: used to improve the convergence. We won't use it (We should be fine with the other hyperparameters)\n# random_seed: We won't use this hyperparameter\nsom = MiniSom(x = 10, y = 10, input_len = 15, sigma = 1.0, learning_rate = 0.5)\n# Check the following steps in the SOM lecture notes\nsom.random_weights_init(X)\n# num_iteration: the number of iterations we won't to apply steps 4 to 9 in the lecture notes\n# we chose 100 empirically (it's enough to yielding good results)\nsom.train_random(data = X, num_iteration = 100)\n\n# Visualizing the results:\n# The grid will show the winning nodes, and for each we are going to show the MID (Mean Inter-neuron Distance)\n# which is the mean of the distance between the winning node and it's neighboring nodes in its neighborhood\n# For a certain winning node, the more the MID the more the likelihood that this winning node is an outlier\n# Since in our example the majority of the winning node kinda represents the rules that are respected\n# when applying for a credit card, an outlier winning node represents a likely fradulent applicant\n# We are going to identify that using the colors on the map. The winning node will have colors so that the larger\n# the MID, the closet to white its color will be.\nfrom pylab import bone, pcolor, colorbar, plot, show # pylab is a matplotlib module\nbone() # creates a window that will contain the map\npcolor(som.distance_map().T) # som.distance_map() returns a matrix of MID for all the winning nodes. We need to apply the transpose method T to fit it to the pcolor method\ncolorbar()\nmarkers = ['o', 's'] # Create two markers o: circle, and s: square\ncolors = ['r', 'g'] # Create two colors r: red, and g: green\nfor i, x in enumerate(X): # i will be the indexes in X, and x will be the row of that index in the dataset (associated with the customer)\n w = som.winner(x) # returns the winning node associated with the customer. The winning node is the square in the plotted map\n plot(w[0] + 0.5, w[1] + 0.5, # w[0] and w[1] represent the lower left corner of the square representing the winning node in the plot. We want the maker to be displayed in the middle of the square, therefore we add 0.5\n markers[Y[i]], # if Y[i] = 0 that means that the customer's application was not approved, so we want to display a red circle in this case, if Y[1] mean approved and we want to display a green square\n markeredgecolor=colors[Y[i]], markerfacecolor = 'None', # we don't want to color the markers, because for some squares (winning nodes), there are some customers who got approved and other who didn't: in the same winning node square, we can find both red circles and green squares\n markersize = 10,\n markeredgewidth = 2)\n \nshow()\n\n# Finding the frauds\nmappings = som.win_map(data = X) # this method returns a mapping of all winning nodes to their associated data points in the dataset. The data returned is in the form of a dictionary where the key is a tuple that represents the coordinates of the winning node, and the value is the list of customers that are associated to that winning node\n# In the course, there were two squares whose color was white.\n# The coordinates of these squares are (5, 3) and (8, 3) respectively (the coordinates of a square are the ones of the bottom left point of the square)\n# White squares reflect high MID and therefore potentially contain the cheating customers\n# Note: that for every run of this code, the squares with white color change. But we will stick \n# to the squares (the coordinates) that were mentioned in the course.\nprint(mappings)\nfrauds = np.concatenate((mappings[(5, 3)], mappings[(8, 3)]), axis = 0) # concatenating the list of customers associated with the two winning nodes vertically\n# after the concatenation the list in frauds acutally contain the customers who are *potentially* frauds\nfrauds = sc.inverse_transform(frauds) # Since we scaled (normalized) the data set, we need to return the data associated with the list potentially fraudulent customers to its original form, so we do an inverse transform\n# after descaling, the first column is that of the customer IDs\n# We can therefore give this list of IDs to the bank employees so that they can conduct further investigation to know which customers actualy cheated although they were granted the credit card\n\n\n# Part 2 - Going from Unsupervised to Supervised Deep Learning (Artificial Neural Network)\n# Creating a matrix of features\ncustomers = dataset.iloc[:, 1:].values # We include all columns except the first one (which is the customer ID and we don't need it in the matrix of features). Note: here we included the last column because it's the class (whether the application for credit card was accepted or not) because we may need this feature later on\n# Creating the dependent variable:\nis_fraud = np.zeros(len(dataset)) # Creating a vector with the length of the dataset initialized with zeros (initially, we assume that all the customers are not fraudelent)\n# We are going to consider the customers who are in the white squares in the SOM above to be the fraudulent ones\n# Therefore we are going to set to 1 their positions in the is_fraud vector (the vector is indexed by the customer ID)\nfor i in range(len(dataset)):\n if dataset.iloc[i, 0] in frauds: # if the customer ID in question is in the frauds, we set the value for that customer ID in the in_fraud vector to 1 (the vector is indexed by the customer ID)\n is_fraud[i] = 1\n\n# Building the ANN:\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\ncustomers = sc.fit_transform(customers) # No need to scale the dependent variable because it takes the values 0 and 1\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Initializing the ANN\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer\n# Since we have only 690 data points, it's a very simple dataset. Therefore, we only use 2 neurons and only one hidden layer\n# Since we have 15 features, we set the input dimensions (input_dim) to 15\nclassifier.add(Dense(units = 2, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15 ))\n\n# Adding the output layer\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n\n# Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fitting the ANN to the training set\n# Again, because of the simplicity of the dataset, we set the batch_size to 1 and the number of epochs to 2\nclassifier.fit(customers, is_fraud, batch_size = 1, epochs = 2)\n\n# Predicting the probabilities of frauds (among all customers)\ny_pred = classifier.predict(customers)\ny_pred = np.concatenate((dataset.iloc[:, 0:1].values, y_pred), axis = 1) # Horizontal concatenation to have a two-dimensional array that contains the customer IDs and the fraud probabilities\ny_pred = y_pred[y_pred[:, 1].argsort()] # Sorting the y_pred array by the second column (fraud probability) in ascending order\n\nprint(y_pred)"
}
] | 2 |
theoremoon/english_exam_preparation | https://github.com/theoremoon/english_exam_preparation | 4c430423e8ab5778f68ff82b6b2867770966b639 | 452ef97bf9fc54a6c29c3e24e2813629c570b13f | 8ca30a7408816a52476a57924a1b93a2e3c28cee | refs/heads/master | 2021-05-29T21:04:01.742740 | 2015-09-17T08:48:10 | 2015-09-17T08:48:10 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8270777463912964,
"alphanum_fraction": 0.8310992121696472,
"avg_line_length": 48.733333587646484,
"blob_id": "079a0e0488129ea656660cfbebc0e733143fa2f7",
"content_id": "4eddda518f01da2b281d58cf4c4b8e0687a4e322",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1816,
"license_type": "permissive",
"max_line_length": 289,
"num_lines": 15,
"path": "/README.md",
"repo_name": "theoremoon/english_exam_preparation",
"src_encoding": "UTF-8",
"text": "# english_exam_preparation\n これは、私の英語の試験対策用あれです。Python3.xで動くはずです\n\n# options\n -f <file>で、指定したファイル名のファイルを問題ファイルとして開きます。問題ファイルのフォーマットは後述します。\n\n -l <level>で、指定したレベルで問題を出題します。レベルに応じて、虫食いの個数が増えます。指定がない場合は、ランダムでレベルを決定しますが、多くの場合にすべて虫食いとなります。\n\n# problem file format\n 問題ファイルのフォーマットを示します。問題ファイルの文字コードはutf-8としています。問題ファイルは、「英文」「和文」「空行」の3つから構成されます。このプログラムは問題ファイルの最初の行を「英文」、それに続く行を「和文」、次を「空行」と認識します。途中に改行のある文は許されません。また、この仕様のため、問題ファイルの最後には空行が必ず必要です。\n\n 和文を構成する要素に指定はありませんが、英文には英語の多様性に対応するための構文を用意しています。例えば、\"I don't like him.\"と、\"I do not like him.\"は同じ意味ですが違うものと解釈されるでしょう。そこで、\"I (don't|do not) like him.\"と記述することで、don'tまたはdo notと認識されるように指定できます。これを利用して、(that|)などと書けばthatを入れても入れなくても良い、というしていにできます。ただし、これらの指定を行った場合、表示される虫食いの穴の数と、単語の大きさ 数が合わなくなります。\n\n# change log\n かかないです。\n"
},
{
"alpha_fraction": 0.4255829155445099,
"alphanum_fraction": 0.43146654963493347,
"avg_line_length": 27.6064510345459,
"blob_id": "4be9410dca61053fb8ba0d88b051672b5c5ae18a",
"content_id": "0e39ac44c8854f0ce9e4abb8e2074878f93c899f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4589,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 155,
"path": "/eng.py",
"repo_name": "theoremoon/english_exam_preparation",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\r\n\r\nimport random\r\nimport re\r\nimport unittest\r\nimport argparse\r\nimport sys\r\n\r\n\r\nclass Question:\r\n def __init__(self, english, japanese):\r\n self.en = parse_english(english) # double dimention array\r\n self.jp = japanese # str\r\n\r\n def ask(self, level):\r\n moth = {} # moth = ___\r\n GREEN = \"\\033[92m\"\r\n RED = \"\\033[91m\"\r\n END = \"\\033[0m\"\r\n word_matcher = re.compile(\"\\S+\") # match to word\r\n\r\n for v in random.sample(self.en, min(level, len(self.en))):\r\n # some are turned to moth\r\n if v:\r\n moth[self.en.index(v)] = v\r\n\r\n # print problem\r\n print(self.jp) # japanese\r\n for i, w in enumerate(self.en):\r\n # english\r\n if i in moth:\r\n print(\"_\" * len(moth[i][0]), end=\" \")\r\n else:\r\n print(w[0], end=\" \")\r\n\r\n print()\r\n\r\n answers = word_matcher.findall(input()) # split to words\r\n corrects = 0\r\n\r\n for i, ans in enumerate(answers):\r\n if len(self.en) > i:\r\n correct = ans in self.en[i] # are candidates include answer?\r\n if correct:\r\n corrects += 1\r\n print(GREEN + \"O\" + END, end=\"\")\r\n else:\r\n print(RED + \"X\" + END, end=\"\")\r\n print(\"{} {}\".format(self.en[i], ans))\r\n else:\r\n print(RED + \"X\" + END, end=\"\")\r\n print(\"[] {}\".format(ans))\r\n\r\n if len(answers) < len(self.en):\r\n for v in self.en[len(answers):]:\r\n print(RED + \"X\" + END, end=\"\")\r\n print(v)\r\n\r\n if corrects == len(self.en):\r\n print(GREEN + \"Correct!\" + END)\r\n else:\r\n print(RED + \"Wrong...\" + END)\r\n\r\n\r\ndef parse_paren(text):\r\n words = []\r\n wbuf = str()\r\n for i, c in enumerate(text):\r\n if c == '|':\r\n words.append(wbuf)\r\n wbuf = str()\r\n elif c == ')':\r\n words.append(wbuf)\r\n return (i, words)\r\n else:\r\n wbuf += c\r\n\r\n\r\ndef parse_english(sentence):\r\n elements = []\r\n word = []\r\n wbuf = str()\r\n i = 0\r\n sentence += \" \" # sentinel\r\n while i < len(sentence):\r\n c = sentence[i]\r\n if c.isspace():\r\n if wbuf:\r\n word.append(wbuf)\r\n elements.append(word)\r\n wbuf = str()\r\n word = []\r\n elif c == '(':\r\n i += 1 # read '('\r\n index, choices = parse_paren(sentence[i:])\r\n i += index\r\n word = word + choices\r\n else:\r\n wbuf += c\r\n i += 1\r\n return elements[:-1]\r\n\r\n\r\ndef Questions(filename):\r\n questions = []\r\n en = \"\"\r\n jps = []\r\n with open(filename, encoding=\"utf-8\") as f:\r\n for l in f:\r\n if not l.strip():\r\n for jp in jps:\r\n questions.append(Question(en, jp))\r\n en = \"\"\r\n del jps[:]\r\n else:\r\n if not en:\r\n en = l\r\n else:\r\n jps.append(l)\r\n return questions\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n level_help = ('The mothes increase with level.'\r\n 'Default is random (almost cases level max')\r\n parser.add_argument(\"-l\", \"--level\", type=int,\r\n default=sys.maxsize, help=level_help)\r\n parser.add_argument(\"-f\", \"--file\",\r\n default=\"problems.txt\", help=\"Set problem files path\")\r\n args = parser.parse_args()\r\n\r\n qs = Questions(args.file)\r\n random.shuffle(qs)\r\n for q in qs:\r\n q.ask(args.level)\r\n print(\"Continue to press RETURN\")\r\n cont = input()\r\n if cont is not \"\":\r\n break\r\n\r\n\r\nclass Test_parse_english(unittest.TestCase):\r\n def setUp(self):\r\n self.sentences = [\"What is your name?\",\r\n \"I (don't|do not) like it.\",\r\n \"I think (that|) I am right.\"]\r\n\r\n def test_parse_english(self):\r\n self.assertEqual(parse_english(self.sentences[0]),\r\n [[\"What\"], [\"is\"], [\"your\"], [\"name?\"]])\r\n self.assertEqual(parse_english(self.sentences[1]),\r\n [[\"I\"], [\"don't\", \"do not\"], [\"like\"], [\"it.\"]])\r\n self.assertEqual(parse_english(self.sentences[2]),\r\n [[\"I\"], [\"think\"], [\"that\", \"\"],\r\n [\"I\"], [\"am\"], [\"right.\"]])\r\n"
}
] | 2 |
phenguin/dotfiles | https://github.com/phenguin/dotfiles | 2bdc35d755d815be4e0897b06b0d87d3995628a8 | d2613adf0d981c1321952921fb5af7142cce03a3 | b00fc60c9c7de9e2fa96f0f8b7e729b753e8a3ce | refs/heads/master | 2020-04-12T06:35:43.391362 | 2016-07-07T13:49:43 | 2016-07-07T13:49:43 | 2,790,473 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6888889074325562,
"alphanum_fraction": 0.6888889074325562,
"avg_line_length": 18.285715103149414,
"blob_id": "55de09a2829c5ca71f0ebbbf0aa784a103c4b389",
"content_id": "6faf5d2c4258e2f9b924a1813c4b60943d3ac0ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 7,
"path": "/.bashrc",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#! /bin/sh\n\nsource ~/.bash/aliases\nsource ~/.bash/config\nsource ~/.bash/env\n\nPATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting\n"
},
{
"alpha_fraction": 0.6878636479377747,
"alphanum_fraction": 0.6903574466705322,
"avg_line_length": 23.5510196685791,
"blob_id": "33631c27f4a93c5894d6de7fb4170ff61051bbc8",
"content_id": "41a78afed2488876b8cb2490d177a388835918ef",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2406,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 98,
"path": "/build/matcher/README.md",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "# Matcher\n\nThis is a standalone library that does the same fuzzy-find matching as Command-T.vim.\n\n# Installation\n\n```shell\n$ make\n# move `matcher` somewhere useful\n$ make install\n# make install will install it to /usr/local/bin.\n```\n\n# Usage\n\nMatcher searches for a string in a list of filenames, and returns the\nones it thinks you are most likely referring to. It works exactly like\nfuzzy-finder, Command-T, and so on.\n\n### Usage:\n\n```shell\n$ matcher [options] <search>\n```\n\n#### Options:\n\n* `--limit`: The number of matches to return (default 10)\n* `--no-dotfiles`: Dotfiles will never be returned (by default, they may\n be)\n* `--manifest`: Specify a file containing the list of files to scan. If\n none given, matcher will read the list from stdin.\n\n### Examples\n\n```shell\n$ matcher --limit 20 --no-dotfiles --manifest filelist.txt customer.rb\n$ find . | matcher order\n```\n\n# Using with CtrlP.vim\n\n```viml\nlet g:path_to_matcher = \"/path/to/matcher\"\n\nlet g:ctrlp_user_command = ['.git/', 'cd %s && git ls-files . -co --exclude-standard']\n\nlet g:ctrlp_match_func = { 'match': 'GoodMatch' }\n\nfunction! GoodMatch(items, str, limit, mmode, ispath, crfile, regex)\n\n \" Create a cache file if not yet exists\n let cachefile = ctrlp#utils#cachedir().'/matcher.cache'\n if !( filereadable(cachefile) && a:items == readfile(cachefile) )\n call writefile(a:items, cachefile)\n endif\n if !filereadable(cachefile)\n return []\n endif\n\n \" a:mmode is currently ignored. In the future, we should probably do\n \" something about that. the matcher behaves like \"full-line\".\n let cmd = g:path_to_matcher.' --limit '.a:limit.' --manifest '.cachefile.' '\n if !( exists('g:ctrlp_dotfiles') && g:ctrlp_dotfiles )\n let cmd = cmd.'--no-dotfiles '\n endif\n let cmd = cmd.a:str\n\n return split(system(cmd), \"\\n\")\n\nendfunction\n```\n\n# Using with zsh\n\n```shell\n_matcher_complete() {\n git ls-files | /Users/burke/bin/matcher -l20 ${words[CURRENT]} | while read line; do\n compadd -U \"$line\"\n done\n compstate[insert]=menu # no expand\n}\n\nzle -C matcher-complete 'menu-select' _matcher_complete\n\nbindkey '^X^T' matcher-complete # C-x C-t to find matches for the search under the cursor\n# bindkey '^T' matcher-complete # C-t to find matches for the search under the cursor\n```\n\n\n# Bugs\n\n* Probably\n\n# Contributing\n\n* Fork branch commit push pullrequest\n* I'm bad at github notifications. Send me an email too at [email protected]\n"
},
{
"alpha_fraction": 0.7484076619148254,
"alphanum_fraction": 0.7770700454711914,
"avg_line_length": 43.85714340209961,
"blob_id": "4873a88761ccbc79e3223cb2d518b210d384cf86",
"content_id": "6e9aaee6f7234dc14928635454143bd51f490672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 7,
"path": "/.bash/env",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#! /bin/sh\n\nexport PATH=$PATH:~/bin\nexport NIX_PATH=nixutil=/home/$USER/nixutil:$NIX_PATH\nexport EDITOR='vim -u $HOME/.vimrc.minimal'\nexport TERM=xterm-256color\nexport CLASSPATH=.:/home/jcullen/build/mde-test/MDELiteDir/bin/Avalon/Avalon.jar:/home/jcullen/build/mde-test/MDELiteDir/bin/MDELite3_05/MDELite3_05.jar\n"
},
{
"alpha_fraction": 0.5715630650520325,
"alphanum_fraction": 0.5866289734840393,
"avg_line_length": 20.229999542236328,
"blob_id": "a91528ae72411b95ed529807ff5a85091a9d14e7",
"content_id": "15c3d34e49e8c2f8259c7fdfcfef87942b97b895",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2124,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 100,
"path": "/build/matcher/main.c",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <getopt.h>\n#include <ctype.h>\n\n#include \"matcher.h\"\n\nstruct globalArgs_t {\n int dotfiles; // -d\n int limit; // -l\n char *manifest; // -m\n char *search;\n} globalArgs;\n\nstatic const char *optString = \"dl:m:h?\";\n\nstatic const struct option longOpts[] = {\n { \"no-dotfiles\", no_argument, NULL, 'd' },\n { \"limit\", required_argument, NULL, 'l' },\n { \"manifest\", required_argument, NULL, 'm' },\n { \"help\", no_argument, NULL, 'h' },\n { NULL, no_argument, NULL, 0 }\n};\n\n/* Display program usage, and exit. */\nvoid display_usage(void)\n{\n puts(\"Usage: matcher [--no-dotfiles] [--limit num] [--manifest filename] <query>\\n\");\n exit( EXIT_FAILURE );\n}\n\nvoid parse_arguments(int argc, char *argv[])\n{\n int opt = 0;\n int longIndex = 0;\n\n globalArgs.dotfiles = 1;\n globalArgs.limit = 10;\n globalArgs.manifest = NULL;\n globalArgs.search = NULL;\n\n opt = getopt_long(argc, argv, optString, longOpts, &longIndex);\n\n while (opt != -1) {\n switch (opt) {\n case 'l':\n globalArgs.limit = atoi(optarg);\n break;\n case 'd':\n globalArgs.dotfiles = 0;\n break;\n case 'm':\n globalArgs.manifest = optarg;\n break;\n case 'h':\n case '?':\n display_usage();\n break;\n default:\n break;\n }\n opt = getopt_long(argc, argv, optString, longOpts, &longIndex);\n }\n\n\t\n globalArgs.search = argv[argc - 1];\n\tint i = 0;\n\twhile(globalArgs.search[i] != '\\0'){\n\t\tglobalArgs.search[i] = tolower(globalArgs.search[i]);\n\t\ti++;\n\t}\n}\n\nint main(int argc, char *argv[])\n{\n parse_arguments(argc, argv);\n\n char *strings[20000];\n int num_strings = 0;\n\n FILE *fp = stdin;\n if (globalArgs.manifest) {\n fp = fopen(globalArgs.manifest, \"r\");\n }\n\n for (num_strings = 0; num_strings < 20000; num_strings++) {\n strings[num_strings] = malloc(1024 * sizeof(char));\n fgets(strings[num_strings], 1023, fp);\n if (feof(fp)) break;\n }\n\n fclose(fp);\n\n score_list(globalArgs.search,\n strings,\n num_strings,\n globalArgs.dotfiles,\n globalArgs.limit);\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.6991869807243347,
"alphanum_fraction": 0.7154471278190613,
"avg_line_length": 20.705883026123047,
"blob_id": "bc1140b7fbaa933ec6386674c3586e7876fc5a29",
"content_id": "a774492b71f4dcb62d50b65a410b44014fe27d3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/scripts/startup.sh",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#! /bin/sh\n\nxrdb -merge .Xresources &\nsynclient AccelFactor=0.1\nsynclient MinSpeed=1.5\nsynclient MaxSpeed=3.5\n\n# Start up gpg-agent\n# . ~/scripts/gpg-agent.sh\n\n# dropbox start &\nredshift &\ndeluge &\nt-init & # Set up initial tmux environment\necho 'xmodmap ~/.xmodmaprc > /home/jcullen/xtempoutput.txt & exit' | zsh &\nxmodmap /home/jcullen/.xmodmaprc & \nemacs --daemon &\n"
},
{
"alpha_fraction": 0.7639999985694885,
"alphanum_fraction": 0.7639999985694885,
"avg_line_length": 26.77777862548828,
"blob_id": "2ef9d3cb06540ab3083c012c0ba8016b93d6d038",
"content_id": "c615ec3457180eff2c55822c406b2a97eb4f44c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 9,
"path": "/README.md",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "# Dotfiles\nConfiguration files from my home directory put under version control.\n\n# Requirements\nbash, git, make, vim\n\n# Install\n $ wget https://raw.githubusercontent.com/phenguin/dotfiles/master/scripts/install-conf.sh\n $ bash install-conf.sh\n"
},
{
"alpha_fraction": 0.60994952917099,
"alphanum_fraction": 0.6176400184631348,
"avg_line_length": 26.375,
"blob_id": "11b8cd913ba9c32cb51b84b9795ed9f1d8b05f17",
"content_id": "16690f8191db8427cc654cbcd774607a497e4ab2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4161,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 152,
"path": "/.bash/aliases",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nCONFIG_DOTFILES_GIT_DIR=\"$HOME/.cfg\"\n\nalias alert='notify-send --urgency=low -i \"$([ $? = 0 ] && echo terminal || echo error)\" \"$(history|tail -n1|sed -e '\\''s/^\\s*[0-9]\\+\\s*//;s/[;&|]\\s*alert$//'\\'')\"'\nalias bp='echo -e \"\\a\"'\nalias ..='cd ..'\nalias ...='cd ../..'\nalias ....='cd ../../..'\nalias diskhogs='du -hsx * | sort -rh | head -20'\nalias gv='gvim'\nalias hnb-partecs='hnb $HOME/partecs/partecs-hnb.xml'\nalias la='ls -A'\nalias ll='ls -al'\nalias l='ls -CF'\nalias locate='locate -i --regexp'\nalias lock='xscreensaver-command --lock'\nalias ls='ls -h --color=auto '\nalias man='LC_ALL=C LANG=C man'\nalias open='xdg-open'\nalias o='popd'\nalias p='pushd'\nalias rmf='rm -f'\nalias V='vim `ls -t | head -1`'\nalias v=\"vim -u $HOME/.vimrc.minimal\"\nalias vi=\"vim -u $HOME/.vimrc.minimal\"\nalias vim=\"vim -u $HOME/.vimrc.minimal\"\nalias x='exit'\nalias cbot=\"python $HOME/code/crypto_bot/interface.py\"\nalias serve=\"python -m SimpleHTTPServer 8000\"\nalias xx='atool -x'\nalias conf='git --git-dir=$CONFIG_DOTFILES_GIT_DIR --work-tree=$HOME'\n\n# NIx related\n# A nix query helper function\nnq()\n{\n case \"$@\" in\n -h|--help|\"\")\n printf \"nq: A tiny nix-env wrapper to search for packages in package name, attribute name and description fields\\n\";\n printf \"\\nUsage: nq <case insensitive regexp>\\n\";\n return;;\n esac\n nix-env -qaP --description \\* | grep -i \"$@\"\n}\n\nnix-dev() {\n nix-shell --pure $@ --command \"ts $(basename $PWD)\"\n}\n\n# Git related\nfunction git-grep-all-commits {\n git rev-list --all | xargs git grep $@\n}\n\nfunction pyprofile {\n python -m cProfile $1\n}\n\nfunction gpyprofile {\n python -m cProfile -o tmp.cprofile $1 && gprof2dot.py -f pstats tmp.cprofile | dot -Tpng -o out.png && eog out.png && rm -f tmp.cprofile\n}\n\nfunction git-grep-commits {\n\t\techo $1\n\t\techo ${@:2}\n\t\tgit rev-list $1 | xargs git grep ${@:2}\n}\n# Emacs related\n\nfunction e {\n SNAME=$EMACS_SERVER_NAME\n if [ -z \"$SNAME\" ]; then\n SNAME=\"server\"\n fi\n\n ALTERNATE_EDITOR=\"\" emacsclient -c -s $SNAME $@ & disown\n unset SNAME\n}\n\nfunction ed {\n SNAME=$EMACS_SERVER_NAME\n if [ -z \"$SNAME\" ]; then\n SNAME=\"server\"\n fi\n\n emacs --daemon=$SNAME\n unset SNAME\n}\n\nfunction es {\n 'emacs $@ & disown'\n}\n\nalias ek=\"ALTERNATE_EDITOR='emacs' emacsclient -c -s $EMACS_SERVER_NAME\"\nalias ec='ALTERNATE_EDITOR=\"emacs -nw\" emacsclient -t'\nalias et='ALTERNATE_EDITOR=\"emacs -nw\" emacsclient -t'\nalias E='emacs -nw `ls -t | head -1`'\n\n# FASD aliases\n# alias v='f -e vim'\n\n# Function either sends the argument files to be editted in the active vim server, or creates the vim server first if it doesn't already exist\n# Further, if we are in a tmux session, each tmux session gets its own vim server depending on the session name\nvs () {\n if [[ \"$TERM\" == \"screen-256color\" ]]; then # if we're in a tmux session\n SERVERNAME=\"tmux-`tmux list-panes -F '#{session_name}' | head -1`-vim\" # give session its own vim server\n else\n SERVERNAME='default' # else, default\n fi\n\n if [ -z \"$1\" ]\n then\n (vim --serverlist | grep -i \"$SERVERNAME\" && echo \"Already have a server active, if you want a new one do it manually\") || vim --servername \"$SERVERNAME\"\n else\n vim --servername \"$SERVERNAME\" --remote-silent \"$@\" 2> /dev/null || vim --servername \"$SERVERNAME\" \"$@\"\n fi\n}\n\n# TMUX ---------------\n\nalias tl='tmux list-sessions'\n\n# Convenient killing of unneeded tmux session provided by:\n# tk <session-name>\ntk () {\n if [ -z \"$1\" ]\n then\n echo 'Error: Please specify a valid tmux-session name to kill'\n else\n TMUXSESSION=$1\n # if the session is already running, just attach to it.\n tmux has-session -t $TMUXSESSION\n if [ $? -eq 0 ] \n then\n echo \"Killing tmux session $TMUXSESSION\"\n tmux kill-session -t $TMUXSESSION\n else\n echo \"Tmux session $TMUXSESSION does not exist. Nothing to kill\"\n fi\n fi\n}\n\n\n# Git aliases - - - - - - - - - - - - - - -\nalias gs='git status -sb'\n\n# Alias tmux for 256 color support\nalias tmux='tmux -2'\n\n#archlinux only aliases\nalias ys='sudo yaourt -S'\n"
},
{
"alpha_fraction": 0.7666960954666138,
"alphanum_fraction": 0.7687554955482483,
"avg_line_length": 31.68269157409668,
"blob_id": "f0b11b0e97481ab808425c9838c60c4e46c0b6c6",
"content_id": "98ecf05d06872a6fad65f11613ec3859e112a086",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3399,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 104,
"path": "/.emacs.d.old/themes/solarized-emacs/README.md",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "# Solarized for Emacs\n\nSolarized for Emacs is an Emacs port of the [Solarized theme for vim](http://ethanschoonover.com/solarized),\ndeveloped by Ethan Schoonover.\n\nSolarized for Emacs is tested only under Emacs 24, but should be\nworking under Emacs 23 as well. The theme is implemented in terms of\ncustomizations and `deftheme` and does not require the\n`color-theme-package`.\n\n# Installation\n\n\nSolarized for Emacs is available for installation via the\n[MELPA](http://melpa.milkbox.net) and\n[Marmalade](http://marmalade-repo.org/) `package.el`\nrepositories. Assuming you've set one of the them up (I recommend\nMELPA) you can install solarized like this:\n\n`M-x package-install solarized-theme`\n\nAfterwards - business as usual, just load one of the theme variants with `M-x\nload-theme`.\n\n(If you want to install manually that procedure is briefly documentet in the\nFAQ at the end of this document.)\n\n# Customisations\n\n## Theme specific settings\nIf you don't like low-contrast modeline or fringe, you can `customize` them\neither by doing `M-x customize-group solarized` or setting the values using\nelisp code:\n\n\n```lisp\n;; make the fringe stand out from the background\n(setq solarized-distinct-fringe-background t)\n\n;; make the modeline high contrast\n(setq solarized-high-contrast-mode-line t)\n\n;; Use less bolding\n(setq solarized-use-less-bold t)\n\n;; Use more italics\n(setq solarized-use-more-italic t)\n\n;; Use less colors for indicators such as git:gutter, flycheck and similar.\n(setq solarized-emphasize-indicators nil)\n\n```\nNote that these need to be set **before** `load-theme` is invoked for Solarized.\n\n## Underline position setting for X\n\nIf you are using emacs under X you might like the following setting which puts\nthe underline below the\n[font bottomline instead of the baseline](https://publib.boulder.ibm.com/infocenter/pseries/v5r3/topic/com.ibm.aix.graPHIGS/doc/phigstrf/figures/afma5rbd.jpg).\n\nIhmo it enhances the general readability and also it fits well with the default\n`solarized-high-contrast-mode-line` setting which uses an slightly emphazised\nunderline for the modeline to create one horisontal window border in the same\nmanner as the vertical border.\n\n```lisp\n(setq x-underline-at-descent-line t)\n```\n\n# Bugs & Improvements\n\nPlease, report any problems that you find on the projects integrated\nissue tracker. If you've added some improvements and you want them\nincluded upstream don't hesitate to send me a patch or even better - a\nGitHub pull request.\n\n# FAQ\n\n## Stand-alone manual installation\n\nSave the following files in a folder that's on your Emacs' `load-path`:\n\n* [dash.el](https://raw.githubusercontent.com/magnars/dash.el/master/dash.el) - [dash](https://github.com/magnars/dash.el), a modern list library for Emacs\n* [solarized.el](https://raw.githubusercontent.com/bbatsov/solarized-emacs/master/solarized.el) - the solarized theme\n\nSave the following files into `~/.emacs.d/themes`:\n\n* [solarized-light-theme.el](https://raw.githubusercontent.com/bbatsov/solarized-emacs/master/solarized-light-theme.el) \n* [solarized-dark-theme.el](https://raw.githubusercontent.com/bbatsov/solarized-emacs/master/solarized-dark-theme.el)\n\nAdd this your `.emacs.d`:\n\n```lisp\n(add-to-list 'custom-theme-load-path \"~/.emacs.d/themes\")\n```\n\nNow you can load the theme with the interactive function `load-theme`.\n\n\n# Contributors\n\n- [Thomas Frössman](http://t.jossystem.se)\n\n(Add yourself to the list)\n"
},
{
"alpha_fraction": 0.6542324423789978,
"alphanum_fraction": 0.6714490652084351,
"avg_line_length": 22.16666603088379,
"blob_id": "fcdf09df71c9838ed3dfab989ed45543faa75d26",
"content_id": "5f287e68d58430b4e9b227ee1fc83141985da3bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 697,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 30,
"path": "/scripts/t-jsdev",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nSESSION='jsdev'\n\nif [ -z \"$1\" ]\nthen\n WORKINGDIR=\"$PWD\"\nelse\n WORKINGDIR=$1\nfi\n\n# if the session is already running, just attach to it.\ntmux has-session -t $SESSION\nif [ $? -eq 0 ]; then\n echo \"Session $SESSION already exists. Attaching.\"\n tmux -2 attach -t $SESSION\n exit 0;\nfi\n\ntmux start-server\ntmux new-session -d -s $SESSION -n shell\ntmux new-window -t $SESSION:1 -n vim\ntmux new-window -t $SESSION:2 -n repl\n\ntmux send-keys -t $SESSION:0 \"cd $WORKINGDIR; clear\" C-m\ntmux send-keys -t $SESSION:1 \"cd $WORKINGDIR; clear; vs\" C-m\ntmux send-keys -t $SESSION:2 \"cd $WORKINGDIR; clear; js -i\" C-m\n\ntmux select-window -t $SESSION:0\ntmux -2 attach-session -d -t $SESSION\n\n\n"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.6451612710952759,
"avg_line_length": 11.399999618530273,
"blob_id": "1bf830798310b56cf5b5bf08a7086f7a18b60bb7",
"content_id": "14d1bc620745051adc690ac9d309e01264deeb5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 5,
"path": "/scripts/xfce-startup.sh",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\ndeluge &\nt-init &\nredshift &\nxmodmap ~/.xmodmaprc &\n"
},
{
"alpha_fraction": 0.4767441749572754,
"alphanum_fraction": 0.5,
"avg_line_length": 16,
"blob_id": "29aa9b4dce12d65761d7583f110cf8ff93537405",
"content_id": "2b5b8e70799298d9e973ba370e1c9827531de203",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 86,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 5,
"path": "/scripts/getVolume",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\namixer get Master | \\\n grep 'Front Left\\:' | \\\n awk '{print $7 $5}'\n\n"
},
{
"alpha_fraction": 0.6280992031097412,
"alphanum_fraction": 0.6487603187561035,
"avg_line_length": 16.285715103149414,
"blob_id": "443616ebacd8d4cb6d2a58543133ce162610facc",
"content_id": "95038a860a463f9d5d1c15d942aea8a2abfceaed",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 242,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 14,
"path": "/build/matcher/Makefile",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "EXENAME=matcher\nPREFIX=/home/jcullen\nBINDIR=$(PREFIX)/bin\n\n.PHONY: all\nall: $(EXENAME)\n\n$(EXENAME): main.c matcher.c\n\t$(CC) $(CFLAGS) -O3 -Wall $^ -o $@\n\n.PHONY: install\ninstall: $(EXENAME)\n\tinstall -d $(BINDIR)\n\tinstall -m 0755 $< $(BINDIR)\n"
},
{
"alpha_fraction": 0.5730011463165283,
"alphanum_fraction": 0.5840092897415161,
"avg_line_length": 26.396825790405273,
"blob_id": "7473185b449e05759011360f9e5fad3baaec76cd",
"content_id": "b7b52811ebc6ebf33fc388be7d6bc791ff2b490c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1726,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 63,
"path": "/macrandomizer.py",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nimport sys\nimport random\nimport platform\n\nhexdigits = [c for c in \"0123456789ABCDEF\"]\n\nclass CmdFailedException(Exception):\n pass\n\ndef random_mac_address():\n random_digits = [random.choice(hexdigits) for _ in xrange(12)]\n return \"%s%s:%s%s:%s%s:%s%s:%s%s:%s%s\" % tuple(random_digits)\n\ndef run_cmd(cmd):\n print cmd\n res = os.system(cmd)\n if res == 0:\n return res\n else:\n raise CmdFailedException(cmd)\n\ndef change_mac(system = None, interface = None, mac = None):\n system = system or platform.system()\n if mac is None:\n mac = random_mac_address()\n\n if system == 'Darwin':\n interface = interface or 'en0'\n cmd_strs = [\"sudo ifconfig %s ether %s\" % (interface, mac)]\n elif system == 'Linux':\n interface = interface or 'wlp2s0'\n cmd_strs = [\n \"sudo ifconfig %s down\" % (interface,),\n \"sudo ifconfig %s hw ether %s\" % (interface, mac),\n \"sudo ifconfig %s up\" % (interface,),\n ]\n else:\n raise Exception(\"Unknown system: %s\" % (system,))\n\n try:\n map(run_cmd, cmd_strs)\n except CmdFailedException as e:\n print >>sys.stderr, \"Command:\\n\\\"%s\\\" failed.. not continuing\" % (e.args[0])\n sys.exit(1)\n\n\nif __name__ == '__main__':\n try:\n new_mac = sys.argv[1]\n except IndexError:\n new_mac = random_mac_address()\n print \"No mac address specified\"\n print \"Randomly generated new mac address: %s\" % (new_mac,)\n\n print \"Attempting to set mac address...\"\n try:\n change_mac(mac = new_mac)\n except Exception:\n print \"Something went wrong.. go ask Justin for help :D\"\n else:\n print \"Success!\"\n"
},
{
"alpha_fraction": 0.3963133692741394,
"alphanum_fraction": 0.7511520981788635,
"avg_line_length": 71.33333587646484,
"blob_id": "bb535dfc6a75ff3553c234aa72f4b8cac34629e4",
"content_id": "dfc1106c1a202b1ef4ef7cd25e5ac1f06743c39e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 3,
"path": "/scripts/dual_monitor_setup.sh",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "xrandr --newmode \"1920x1200_60.00\" 193.25 1920 2056 2256 2592 1200 1203 1209 1245 -hsync +vsync\nxrandr --addmode VGA1 1920x1200_60.00\nxrandr --output LVDS1 --auto --output VGA1 --mode 1920x1200_60.00 --right-of LVDS1\n"
},
{
"alpha_fraction": 0.7054794430732727,
"alphanum_fraction": 0.7130898237228394,
"avg_line_length": 27.565217971801758,
"blob_id": "d6ce657b2990c5aa1472faf5156d67adc12f8c78",
"content_id": "684a15c37464f2392a9c3fbebf308c5e277bd13d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1314,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 46,
"path": "/.emacs.d.old/themes/jazz-theme/README.md",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "# Jazz theme for Emacs 24\n\nJazz is a warm dark color theme for Emacs 24 inspired by [zenburn](https://github.com/bbatsov/zenburn-emacs) color theme.\nThe theme uses Emacs 24 theming support and is tested only with GUI version of Emacs.\n\n# Installation\n\n## Manual\n\nDownload `jazz-theme.el` to the directory `~/.emacs.d/themes/` and add this to your `.emacs` (or `.emacs.d/init.el` or whatever init file you use):\n\n```lisp\n(add-to-list 'custom-theme-load-path \"~/.emacs.d/themes/\")\n```\n\nNow you can load the theme like this:\n\n`M-x load-theme RET jazz`\n\n## el-get\n\nThe Jazz theme can also be installed with [el-get](https://github.com/dimitri/el-get) with the following recipe:\n\n```lisp\n(:name jazz-theme\n :description \"A warm dark color theme for Emacs 24\"\n :type github\n :pkgname \"donderom/jazz-theme\"\n :minimum-emacs-version 24\n :prepare (add-to-list 'custom-theme-load-path default-directory))\n```\n\nTo load it automatically on Emacs startup add this to your init file:\n\n```lisp\n(load-theme 'jazz t)\n```\n\n# Screenshot\n\n\n\n# Contribution\n\nYou are more than welcome to send a pull request for any improvement you think is worth adding to the theme\n(we're colour-nazi after all, aren't we? :)).\n"
},
{
"alpha_fraction": 0.48534080386161804,
"alphanum_fraction": 0.49841269850730896,
"avg_line_length": 28.744443893432617,
"blob_id": "134a84c9c9fbff85b2beebce07fc44634a3988c9",
"content_id": "84722ce6e6ea8e25dea85d971c874eb1ff329960",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5355,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 180,
"path": "/build/matcher/matcher.c",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "/*\n * Most of this is borrowed from Command-T:\n * https://github.com/wincent/Command-T/blob/4b2da2fb/ruby/command-t/match.c\n * The rest is also under standard 2-clause BSD license, 2012 Burke Libbey\n */\n\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n// use a struct to make passing params during recursion easier\ntypedef struct\n{\n char *str_p; // pointer to string to be searched\n long str_len; // length of same\n char *abbrev_p; // pointer to search string (abbreviation)\n long abbrev_len; // length of same\n double max_score_per_char;\n int dot_file; // boolean: true if str is a dot-file\n int always_show_dot_files; // boolean\n int never_show_dot_files; // boolean\n} matchinfo_t;\n\ndouble recursive_match(matchinfo_t *m, // sharable meta-data\n long str_idx, // where in the path string to start\n long abbrev_idx, // where in the search string to start\n long last_idx, // location of last matched character\n double score) // cumulative score so far\n{\n double seen_score = 0; // remember best score seen via recursion\n int dot_file_match = 0; // true if abbrev matches a dot-file\n int dot_search = 0; // true if searching for a dot\n\n long i, j;\n\n for (i = abbrev_idx; i < m->abbrev_len; i++) {\n char c = m->abbrev_p[i];\n if (c == '.')\n dot_search = 1;\n int found = 0;\n for (j = str_idx; j < m->str_len; j++, str_idx++) {\n char d = m->str_p[j];\n if (d == '.') {\n if (j == 0 || m->str_p[j - 1] == '/') {\n m->dot_file = 1; // this is a dot-file\n if (dot_search) // and we are searching for a dot\n dot_file_match = 1; // so this must be a match\n }\n }\n else if (d >= 'A' && d <= 'Z')\n d += 'a' - 'A'; // add 32 to downcase\n if (c == d) {\n found = 1;\n dot_search = 0;\n\n // calculate score\n double score_for_char = m->max_score_per_char;\n long distance = j - last_idx;\n if (distance > 1) {\n double factor = 1.0;\n char last = m->str_p[j - 1];\n char curr = m->str_p[j]; // case matters, so get again\n if (last == '/')\n factor = 0.9;\n else if (last == '-' ||\n last == '_' ||\n last == ' ' ||\n (last >= '0' && last <= '9'))\n factor = 0.8;\n else if (last >= 'a' && last <= 'z' && curr >= 'A' && curr <= 'Z')\n factor = 0.8;\n else if (last == '.')\n factor = 0.7;\n else\n // if no \"special\" chars behind char, factor diminishes\n // as distance from last matched char increases\n factor = (1.0 / distance) * 0.75;\n score_for_char *= factor;\n }\n\n if (++j < m->str_len) {\n // bump cursor one char to the right and\n // use recursion to try and find a better match\n double sub_score = recursive_match(m, j, i, last_idx, score);\n if (sub_score > seen_score)\n seen_score = sub_score;\n }\n\n score += score_for_char;\n last_idx = str_idx++;\n break;\n }\n }\n if (!found)\n return 0.0;\n }\n if (m->dot_file) {\n if (m->never_show_dot_files || (!dot_file_match && !m->always_show_dot_files))\n return 0.0;\n }\n return (score > seen_score) ? score : seen_score;\n}\n\ndouble score(char *abbrev, // user input string to search for\n char *str, // proposed potential match to calculate score against\n int show_dotfiles) // bool\n{\n long i;\n\n matchinfo_t m;\n m.str_p = str;\n m.str_len = strlen(str);\n m.abbrev_p = abbrev;\n m.abbrev_len = strlen(abbrev);\n m.max_score_per_char = (1.0 / m.str_len + 1.0 / m.abbrev_len) / 2;\n m.dot_file = 0;\n m.always_show_dot_files = show_dotfiles == 1;\n m.never_show_dot_files = show_dotfiles == 0;\n\n // calculate score\n double score = 1.0;\n if (m.abbrev_len == 0) { // special case for zero-length search string\n // filter out dot files\n if (!m.always_show_dot_files) {\n for (i = 0; i < m.str_len; i++) {\n char c = m.str_p[i];\n if (c == '.' && (i == 0 || m.str_p[i - 1] == '/')) {\n score = 0.0;\n break;\n }\n }\n }\n } else { // normal case\n score = recursive_match(&m, 0, 0, 0, 0.0);\n }\n\n return score;\n}\n\ntypedef struct\n{\n char *ptr;\n double score;\n} item_t;\n\nint compare_items(const void *a, const void *b)\n{\n item_t *at = (item_t *)a;\n item_t *bt = (item_t *)b;\n if (at->score > bt->score)\n return -1;\n else if (at->score < bt->score)\n return 1;\n else\n return 0;\n}\n\nvoid score_list(char *abbrev,\n char **strs,\n int num_strs,\n int show_dotfiles,\n int limit)\n{\n long i;\n\n item_t items[num_strs];\n\n for (i = 0; i < num_strs; i++) {\n items[i].ptr = strs[i];\n items[i].score = score(abbrev, strs[i], show_dotfiles);\n }\n\n qsort(items, num_strs, sizeof(item_t), compare_items);\n\n if (num_strs < limit) limit = num_strs;\n for (i = 0; i < limit; i++) {\n printf(\"%s\", items[i].ptr);\n }\n\n}\n\n"
},
{
"alpha_fraction": 0.6791443824768066,
"alphanum_fraction": 0.6960784196853638,
"avg_line_length": 29.29729652404785,
"blob_id": "9bdfcc637d44efe4af790259c8a266cbdb5f7202",
"content_id": "3c10311157e369532d5e1f9df5bd252d6cc3228e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1122,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 37,
"path": "/scripts/t-railsdev",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nSESSION='railsdev'\n\n# if the session is already running, just attach to it.\ntmux has-session -t $SESSION\nif [ $? -eq 0 ]; then\n echo \"Session $SESSION already exists. Attaching.\"\n tmux -2 attach -t $SESSION\n exit 0;\nfi\n\nif [ -z \"$1\" ]\nthen\n echo \"Error: Must specify a valid rails application directory\"\n exit 0;\nelse\n WORKINGDIR=$1\nfi\n\ntmux start-server\ntmux new-session -d -s $SESSION -n shell\ntmux new-window -t $SESSION:1 -n vim\ntmux new-window -t $SESSION:2 -n console\ntmux new-window -t $SESSION:3 -n tests\ntmux new-window -t $SESSION:4 -n log\ntmux new-window -t $SESSION:5 -n server\n\ntmux send-keys -t $SESSION:0 \"cd $WORKINGDIR; clear\" C-m\ntmux send-keys -t $SESSION:1 \"cd $WORKINGDIR; clear; vs\" C-m\ntmux send-keys -t $SESSION:2 \"cd $WORKINGDIR; clear; rails console\" C-m\ntmux send-keys -t $SESSION:3 \"cd $WORKINGDIR; clear; bundle exec rspec spec/\" C-m\ntmux send-keys -t $SESSION:4 \"cd $WORKINGDIR; clear; tail -f log/development.log\" C-m\ntmux send-keys -t $SESSION:5 \"cd $WORKINGDIR; clear; rails server\" C-m\n\ntmux select-window -t $SESSION:0\ntmux -2 attach-session -d -t $SESSION\n\n"
},
{
"alpha_fraction": 0.7090908885002136,
"alphanum_fraction": 0.7757575511932373,
"avg_line_length": 81.5,
"blob_id": "78f4003a320b9f644df9c583ef7f959fb8db325a",
"content_id": "22cd8800850d4dfd1733f52d0f98f66602937b11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 2,
"path": "/scripts/nix-pyenv",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nnix-shell -p python27Full -p python27Packages.jedi -p python27Packages.ipython -p python27Packages.ipdb -p python27Packages.pip --command ''$1''\n"
},
{
"alpha_fraction": 0.6619047522544861,
"alphanum_fraction": 0.6797618865966797,
"avg_line_length": 21.648649215698242,
"blob_id": "9feecfc40c997b9a08e183c2e361e8b36fb357db",
"content_id": "d56c50064608a163a0dfd5dcdc9a7924a17891d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 37,
"path": "/scripts/t-pydev",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nSESSION='pydev'\n\nif [ -z \"$1\" ]\nthen\n WORKINGDIR=\"$PWD\"\nelse\n WORKINGDIR=$1\nfi\n\nif [ -z \"$2\" ]\nthen\n PYENVIRONMENT=\"\"\nelse\n PYENVIRONMENT=$2\nfi\n\n# if the session is already running, just attach to it.\ntmux has-session -t $SESSION\nif [ $? -eq 0 ]; then\n echo \"Session $SESSION already exists. Attaching.\"\n tmux -2 attach -t $SESSION\n exit 0;\nfi\n\ntmux start-server\ntmux new-session -d -s $SESSION -n shell\ntmux new-window -t $SESSION:1 -n vim\ntmux new-window -t $SESSION:2 -n repl\n\ntmux send-keys -t $SESSION:0 \"cd $WORKINGDIR; workon $PYENVIRONMENT; clear\" C-m\ntmux send-keys -t $SESSION:1 \"cd $WORKINGDIR; workon $PYENVIRONMENT; clear; vs\" C-m\ntmux send-keys -t $SESSION:2 \"cd $WORKINGDIR; workon $PYENVIRONMENT; clear; ipython2\" C-m\n\ntmux select-window -t $SESSION:0\ntmux -2 attach-session -d -t $SESSION\n\n\n"
},
{
"alpha_fraction": 0.705974817276001,
"alphanum_fraction": 0.705974817276001,
"avg_line_length": 26.65217399597168,
"blob_id": "518d9be1df593ecc57d2ddf97785ccbb4de1b5a8",
"content_id": "0000c3b7961a0005d749ebb92465069dfadee545",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 636,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 23,
"path": "/scripts/install-conf.sh",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "set -e\n\ngit clone --bare https://github.com/phenguin/dotfiles.git $HOME/.cfg\n\nfunction conf {\n git --git-dir=$HOME/.cfg/ --work-tree=$HOME $@\n}\n\nconf checkout\nconf config status.showUntrackedFiles no\n\necho \"Fetching submodules..\"\ngit submodule update --init --recursive\n\necho \"Trying to build matcher..\"\nmake -C build/matcher || make -C build/matcher install || true\n\necho \"Installing vim plugins via Vundle..\"\nvim -c 'BundleInstall' -c 'qa'\n\necho \"DONE.\"\necho \"Now add: alias conf='git --git-dir=$HOME/.cfg/ --work-tree=$HOME' to bashrc or something.\"\necho \"Also don't forget to link /etc/nixos/* to ~/.conf-nix if you are on nixos.\"\n"
},
{
"alpha_fraction": 0.6724599003791809,
"alphanum_fraction": 0.6885026693344116,
"avg_line_length": 27.69230842590332,
"blob_id": "0fba969bc524cf9f1830070475399886a111c215",
"content_id": "c866a94ecdce5c7ec67717ae473dc6263431f5ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 748,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 26,
"path": "/scripts/t-init",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nSESSION='main'\nWORKINGDIR=\"$HOME\"\n\n# if the session is already running, just attach to it.\ntmux has-session -t $SESSION\nif [ $? -eq 0 ]; then\n echo \"Session $SESSION already exists. Attaching.\"\n tmux -2 attach -t $SESSION\n exit 0;\nfi\n\ntmux start-server\ntmux new-session -d -s $SESSION -n shell\ntmux new-window -t $SESSION:1 -n vim\ntmux new-window -t $SESSION:2 -n irc\ntmux new-window -t $SESSION:3 -n mail\n\ntmux send-keys -t $SESSION:0 \"cd $WORKINGDIR; clear\" C-m\ntmux send-keys -t $SESSION:1 \"cd $WORKINGDIR; clear; vs\" C-m\ntmux send-keys -t $SESSION:2 \"cd $WORKINGDIR; clear; irssi\" C-m\ntmux send-keys -t $SESSION:3 \"cd $WORKINGDIR; clear; mutt\" C-m\n\ntmux select-window -t $SESSION:0\n# tmux -2 attach-session -d -t $SESSION\n\n\n"
},
{
"alpha_fraction": 0.689835250377655,
"alphanum_fraction": 0.7018883228302002,
"avg_line_length": 31.324674606323242,
"blob_id": "9ab3b0a5725be83328dfeb8b6b8ab7f37b6b52a5",
"content_id": "19e18cc74c9b04aa765d2b50f69719b382ceb319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2489,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 77,
"path": "/.bash/config",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nexport NODE_PATH='/usr/local/lib/jsctags/':$NODE_PATH\nexport LANG=en_US.UTF-8\n\nexport PATH=\"$HOME/build/sbt/bin\":\"$HOME/code/pagezero/bin\":\"$HOME/scripts\":\"$HOME/.cabal/bin\":\"$HOME/.gem/ruby/1.9.1/bin\":$PATH\n\nexport SCALA_HOME=\"$HOME/build/scala-2.10.2/\"\nexport PATH=\"$SCALA_HOME/bin\":$PATH\nexport PATH=\"$HOME/bin\":$PATH\nexport PATH=\"$HOME/code/darkdynasty/bin\":$PATH\nexport PATH=\"$HOME/code/jetpack/bin\":$PATH\nexport PYTHONPATH=\"$HOME/code/darkdynasty/src\":\"$HOME/code/jetpack/src\":$PYTHONPATH\n# Important for emacsclient.. makes it start the server if it is not already running.\nexport ALTERNATE_EDITOR=\"\"\n\n# Python virtual environment stuff\n\n# Virualenvwrapper setup\nexport WORKON_HOME=~/.virtualenvs/\n# source /etc/bash_completion.d/virtualenvwrapper\n\n# ~/.bashrc: executed by bash(1) for non-login shells.\n# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)\n# for examples\n\n # If not running interactively, do not do anything, otherwise startup\n # tmux\n[[ $- != *i* ]] && return\n[[ $TERM != \"screen-256color\" ]] # && tmuxgo # && exit\n\n# fan control function - fan 0 to turn off fan\nfunction fan() {\n echo level $@ | sudo tee /proc/acpi/ibm/fan\n}\n\n# don't put duplicate lines in the history. See bash(1) for more options\n# ... or force ignoredups and ignorespace\nHISTCONTROL=ignoredups:ignorespace\n\n# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)\nHISTSIZE=1000000\nHISTFILESIZE=2000000\n\n# make less more friendly for non-text input files, see lesspipe(1)\n[ -x /usr/bin/lesspipe ] && eval \"$(SHELL=/bin/sh lesspipe)\"\n\n# set variable identifying the chroot you work in (used in the prompt below)\nif [ -z \"$debian_chroot\" ] && [ -r /etc/debian_chroot ]; then\n debian_chroot=$(cat /etc/debian_chroot)\nfi\n\n# enable color support of ls and also add handy aliases\nif [ -x /usr/bin/dircolors ]; then\n test -r ~/.dircolors && eval \"$(dircolors -b ~/.dircolors)\" || eval \"$(dircolors -b)\"\n alias ls='ls --color=auto'\n #alias dir='dir --color=auto'\n #alias vdir='vdir --color=auto'\n\n alias grep='grep --color=auto'\n alias fgrep='fgrep --color=auto'\n alias egrep='egrep --color=auto'\nfi\n\n# For RVM function\n[[ -s \"$HOME/.rvm/scripts/rvm\" ]] && source \"$HOME/.rvm/scripts/rvm\"\n\n# For tmuxinator\n[[ -s $HOME/.tmuxinator/scripts/tmuxinator ]] && source $HOME/.tmuxinator/scripts/tmuxinator\n\n# For gpg-agent\n. $HOME/scripts/gpg-agent.sh\nGPG_TTY=$(tty)\nexport GPG_TTY\n\n# NIXOS\n# source ~/.nix-profile/etc/profile.d/nix.sh\n"
},
{
"alpha_fraction": 0.42105263471603394,
"alphanum_fraction": 0.42105263471603394,
"avg_line_length": 29.399999618530273,
"blob_id": "6096d1c0ba4dc217edad6dc9973ac33cf93346ce",
"content_id": "0c7dae3bc27923e3e6f67d8f98c9c33aa65748e9",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 152,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 5,
"path": "/build/matcher/matcher.h",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "void score_list(char *abbrev,\n char **strs,\n int num_strs,\n int show_dotfiles,\n int limit);\n"
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.696864128112793,
"avg_line_length": 15.882352828979492,
"blob_id": "fee6fdc00a7f833809006c7856643dcb5e2e799a",
"content_id": "197acca8189f6f02edbd3acfc10e0eecd5fb2d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/scripts/visual_python_profile",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nFILENAME=$1\nTMPFILENAME=\"output.pstats\"\nPYTHONBIN=\"$(which python)\"\n\nif [ -z \"$2\" ]\nthen\n OUTFILE=\"output.png\"\nelse\n OUTFILE=$2\nfi\n\n$PYTHONBIN -m cProfile -o $TMPFILENAME $FILENAME\ngprof2dot.py -f pstats $TMPFILENAME | dot -Tpng -o $OUTFILE\nrm $TMPFILENAME\nfeh $OUTFILE\n"
},
{
"alpha_fraction": 0.6279069781303406,
"alphanum_fraction": 0.6312292218208313,
"avg_line_length": 14.050000190734863,
"blob_id": "c1e4ab1a5f3fcb1d4ebb56d7015ed49fb571286f",
"content_id": "30585ff6a3cece3fe2df78d6093feaf88bf3ff4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 301,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 20,
"path": "/scripts/vim-bundle-update.sh",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\ncd ~/.vim/bundle\n\nfor directory in `ls ~/.vim/bundle`\ndo\n\techo \"Checking for $directory update...\"\n\tif [ -d \"$directory/.git\" ]\n \tthen\n\t\tcd $directory\n\t\tgit pull\n git submodule update\n\t\tcd ..\n\telse\n\t\techo \"$directory not a git plugin. Skipping update.\"\n\tfi\n\techo \"\\n\"\ndone\n\nexit 0\n"
},
{
"alpha_fraction": 0.6600000262260437,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 49,
"blob_id": "873223378a560d0fdc897f758b0510b7e6416a93",
"content_id": "4d740be96607bfea5a55e068846b7f3e7e992d72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/scripts/mirror_monitor_setup.sh",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "xrandr --output LVDS2 --auto --output VGA2 --auto\n"
},
{
"alpha_fraction": 0.7035813927650452,
"alphanum_fraction": 0.7137414216995239,
"avg_line_length": 25.782312393188477,
"blob_id": "035e582614129d122c472d358f7df20a343adcda",
"content_id": "c4fc04c46ae689f96b9c05664106768efa59c46f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3937,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 147,
"path": "/.zshrc",
"repo_name": "phenguin/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/zsh\n\n# Lines configured by zsh-newuser-install\nHISTFILE=~/.zhistory\nHISTSIZE=1000000\nSAVEHIST=1000000\nsetopt appendhistory extendedglob notify\nbindkey -e\n# End of lines configured by zsh-newuser-install\n# The following lines were added by compinstall\nzstyle :compinstall filename '/home/jcullen/.zshrc'\n\nautoload -Uz compinit\ncompinit\n# End of lines added by compinstall\n#\n# Stuff from zgen example config\n# load zgen\nsource \"${HOME}/scripts/zgen.zsh\"\n\n\n# check if there's no init script\nif ! zgen saved; then\n echo \"Creating a zgen save\"\n\n zgen oh-my-zsh\n\n # plugins\n zgen oh-my-zsh plugins/git\n zgen oh-my-zsh plugins/extract\n zgen oh-my-zsh plugins/sudo\n zgen oh-my-zsh plugins/fancy-ctrl-z\n zgen oh-my-zsh plugins/cabal\n zgen oh-my-zsh plugins/mosh\n zgen oh-my-zsh plugins/pass\n zgen oh-my-zsh plugins/colored-man-pages\n zgen oh-my-zsh plugins/history-substring-search\n zgen load zsh-users/zsh-syntax-highlighting\n\n zgen load spwhitt/nix-zsh-completions\n\n # bulk load\n zgen loadall <<EOPLUGINS\n zsh-users/zsh-history-substring-search\nEOPLUGINS\n # ^ can't indent this EOPLUGINS\n\n # completions\n zgen load zsh-users/zsh-completions src\n\n # theme\n zgen oh-my-zsh themes/geoffgarside\n\n # save all to init script\n zgen save\nfi\n\n# Other customizations\n\neval \"$(fasd --init auto)\"\n\nexport _Z_CMD='j'\nexport PYTHONSTARTUP=~/.pythonrc\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib\nexport PATH=$PATH:~/bin:/var/lib/gems/1.8/bin\n\n# Customize to your needs...\n\n\n# -x4 = Set tab stops to 4 spaces.\n# -F = Don't open with less if entire file fits on screen.\n# -R = Output \"raw\" control characters. (colors)\n# -s = Squeeze multiple blank lines.\n# -X = Ignore ignore termcap initialization. With xterms it keeps\n# the last page of the document on the screen. (see\n# http://www.shallowsky.com/linux/noaltscreen.html)\nalias less='less -x4RFsX'\n\nsource $HOME/.bashrc\n\n# export _Z_CMD='j'\nexport PYTHONSTARTUP=~/.pythonrc\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib\nexport PATH=$PATH:~/bin:/var/lib/gems/1.8/bin\n\nexport PAGER=\"less\"\nexport LESS=\"-R\"\n# nicer highlighting\nif [ -f \"/usr/share/source-highlight/src-hilite-lesspipe.sh\" ]; then\n # ubuntu 12.10: sudo apt-get install source-highlight\n export LESSOPEN=\"| /usr/share/source-highlight/src-hilite-lesspipe.sh %s\"\nelif [ -f \"/usr/bin/src-hilite-lesspipe.sh\" ]; then\n # fedora 18: sudo yum install source-highlight\n export LESSOPEN=\"| /usr/bin/src-hilite-lesspipe.sh %s\"\nfi\n\n# Force a reload of completion system if nothing matched; this fixes installing\n# a program and then trying to tab-complete its name\n_force_rehash() {\n (( CURRENT == 1 )) && rehash\n return 1 # Because we didn't really complete anything\n}\n\n# configuration for \"z\" script\n. ~/scripts/z.sh\nfunction precmd () {\n _z --add \"$(pwd -P)\"\n}\n\n# Binding to edit current line in editor\nautoload edit-command-line\nzle -N edit-command-line\nbindkey '^Xe' edit-command-line\n#\n# Meta-u to chdir to the parent directory\nbindkey -s '\\eu' '^Ucd ..; ls^M'\n\n# If AUTO_PUSHD is set, Meta-p pops the dir stack\nbindkey -s '\\ep' '^Upopd >/dev/null; dirs -v^M'\n\n## Bindings for history-substring-search plugin\n# bind UP and DOWN arrow keys\nzmodload zsh/terminfo\nbindkey \"$terminfo[kcuu1]\" history-substring-search-up\nbindkey \"$terminfo[kcud1]\" history-substring-search-down\n\n# bind UP and DOWN arrow keys (compatibility fallback\n# for Ubuntu 12.04, Fedora 21, and MacOSX 10.9 users)\nbindkey '^[[A' history-substring-search-up\nbindkey '^[[B' history-substring-search-down\n\n# bind P and N for EMACS mode\nbindkey -M emacs '^P' history-substring-search-up\nbindkey -M emacs '^N' history-substring-search-down\n\n# bind k and j for VI mode\nbindkey -M vicmd 'k' history-substring-search-up\nbindkey -M vicmd 'j' history-substring-search-down\n\n# Directory change shortcuts\nc () {\n cd $HOME/code/$@\n}\n\nh () {\n cd $HOME/$@\n}\n"
}
] | 27 |
deepangit/myapp | https://github.com/deepangit/myapp | 0c926fc9c37943bed92ce4062e1c2c4b6903936a | 5524edd130918ddbd23adef66df15b09a055a83d | 02c44592e7467cdf42199c853a5f92498d91556e | refs/heads/master | 2022-03-09T02:47:26.271789 | 2019-10-03T10:56:26 | 2019-10-03T10:56:26 | 212,546,143 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6639455556869507,
"alphanum_fraction": 0.6639455556869507,
"avg_line_length": 26.185184478759766,
"blob_id": "993dbfe0503894df6bc9ea386051fa416d787062",
"content_id": "a28f75cd89c64e17f8b1e451e3457ab021695cc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 735,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 27,
"path": "/myapp/setup.py",
"repo_name": "deepangit/myapp",
"src_encoding": "UTF-8",
"text": "\nfrom setuptools import setup, find_packages\nfrom helloworld.core.version import get_version\n\nVERSION = get_version()\n\nf = open('README.md', 'r')\nLONG_DESCRIPTION = f.read()\nf.close()\n\nsetup(\n name='helloworld',\n version=VERSION,\n description='Hello World App using cememt',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Deepan Chakrvarthy',\n author_email='[email protected]',\n url='https://github.com/deepangit/swdp',\n license='unlicensed',\n packages=find_packages(exclude=['ez_setup', 'tests*']),\n package_data={'helloworld': ['templates/*']},\n include_package_data=True,\n entry_points=\"\"\"\n [console_scripts]\n helloworld = helloworld.main:main\n \"\"\",\n)\n"
}
] | 1 |
zehgota/Tarrasque | https://github.com/zehgota/Tarrasque | b2cbf298aa12c0c2270a7e7fa9cc8bb70257a2a5 | 1172917f5e2e1b21c32180eeee35ca7de11d6100 | b1957d4d56b2673c7cffc632eb91d49bdafe8909 | refs/heads/master | 2021-01-22T00:29:45.399761 | 2014-01-04T15:45:41 | 2014-01-04T15:45:41 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.624078631401062,
"alphanum_fraction": 0.6412776708602905,
"avg_line_length": 20.473684310913086,
"blob_id": "7d122d8b73ece2ece90b1f00e94df17a9d367f5f",
"content_id": "049d71f6f0e88531733d51e8ea3ab45b56d6f1c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 407,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 19,
"path": "/tarrasque/utils.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "MAX_COORD_INTEGER = 16384\n\n\ndef cell_to_coords(cell_x, cell_y, offset_x, offset_y, cellbits):\n cellwidth = 1 << cellbits\n x = ((cell_x * cellwidth) - MAX_COORD_INTEGER) + offset_x\n y = ((cell_y * cellwidth) - MAX_COORD_INTEGER) + offset_y\n return (x, y)\n\n\ndef none_or_nonzero(val):\n if val == 0:\n return\n else:\n return val\n\n\ndef map_foreach(map):\n return lambda lst: [map[item] for item in lst]"
},
{
"alpha_fraction": 0.5440414547920227,
"alphanum_fraction": 0.5613126158714294,
"avg_line_length": 26.619047164916992,
"blob_id": "e6938aca86b8b1e2bb2ad2830efa1864c9a7dd11",
"content_id": "278028bdfaed148b82884da8487c8c935bfd4d0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 21,
"path": "/tarrasque/buildings/tower.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .building import *\n\n@register_entity(\"DT_DOTA_BaseNPC_Tower\")\nclass Tower(Building):\n \"\"\"\n Inherits from :class:`Building`.\n\n Represents a Tower in the game.\n \"\"\"\n\n lane = Property(\"DT_BaseEntity\", \"m_iName\")\\\n .apply(FuncTrans(lambda n: re.findall('(?<=tower)([1-4])\\_([a-z]*)', n)[0][1] if n else None))\n \"\"\"\n Lane of the tower. ``\"bot\"``, ``\"mid\"`` or ``\"top\"``.\n \"\"\"\n\n tier = Property(\"DT_BaseEntity\", \"m_iName\")\\\n .apply(FuncTrans(lambda n: re.findall('(?<=tower)([1-4])\\_([a-z]*)', n)[0][0] if n else None))\n \"\"\"\n Tier of the tower, 1-4.\n \"\"\""
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 19,
"blob_id": "2a3c4982c5435eb1a93fb2fea581ab5b9458e0b6",
"content_id": "b13ff2331e216d57ae401d3f9ff4cc904b902d42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 5,
"path": "/tarrasque/creeps/__init__.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .manager import *\n\nfrom .lanecreep import *\nfrom .neutralcreep import *\nfrom .courier import *\n"
},
{
"alpha_fraction": 0.6879134178161621,
"alphanum_fraction": 0.6993385553359985,
"avg_line_length": 40.57500076293945,
"blob_id": "2b43c6130858fb785ca40b7f931de81e553c34a4",
"content_id": "79fdf9cc14bf20110df0b1d10652970253213d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1663,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 40,
"path": "/docs/source/game_end_guide.rst",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "Analysing game end states\n=========================\n\nOne of the most common ways to get information about a game is to look\nat the state of the game when the ancient has died. Finding that time\ncan be a fairly annoying process, but Tarrasque makes it quite\neasy. This example moves to the final tick of the game and then prints\nout statistics for the players::\n\n import tarrasque\n\n replay = tarrasque.StreamBinding.from_file(\"demo.dem\", start_tick=\"postgame\")\n\n for player in replay.players:\n print \"{} - Gold: {} - KDA: {}/{}/{}\".format(player.name,\n player.earned_gold, player.kills, player.deaths. player.assists)\n\nThe instruction to move to the end of the replay is in the\n``start_tick`` argument to ``StreamBinding.from_file``. By saying we\nwant to start at the ``\"postgame\"`` tick, we instruct Tarrasque to 1)\nlocate the tick where the ancient was destroyed, and 2) move to it.\n\nOne thing to note is that while you may want to use the\n:attr:`GameInfo.game_time` attribute to calculate the GPM of a hero,\nyou should first subtract 90 (1 * 60 + 30) from that value, as while\nthe Dota2 ingame clock counts from the time the creeps spawn, the\nreplay attribute starts 1 minute 30 seconds earlier. To calculate GPM,\nyou might use something like this::\n\n import tarrasque\n\n replay = tarrasque.StreamBinding.from_file(\"demo.dem\",\n start_tick=\"postgame\")\n\n for player in replay.players:\n gpm = player.earned_gold * 60 / (replay.info.game_time - 90)\n print \"{} - GPM: {}\".format(player.name, gpm)\n\nNote also that we multiply by 60, as :attr:`GameInfo.game_time` is in\nseconds, not minutes.\n"
},
{
"alpha_fraction": 0.6613667011260986,
"alphanum_fraction": 0.6667794585227966,
"avg_line_length": 31.844444274902344,
"blob_id": "f2352df31081b985b3a41972a95c2c9ce3701489",
"content_id": "9e3bad8bc1bdaa05b84d54d179010c7e62309b6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2956,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 90,
"path": "/README.md",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "Tarrasque\n=========\n\n[](https://travis-ci.org/skadistats/Tarrasque)\n\nA mapping from entities in Dota 2 replays to Python\nobjects. i.e. replays for humans.\n\nDocumentation\n-------------\n\nDocumentation is hosted by ReadTheDocs.org, and can be found\n[here](https://tarrasque.readthedocs.org/)\n\nExample usage\n-------------\n\nFor more examples, see the examples directory\n\n### Hero and Player manipulation\n\nVery basic example, but should show the power of Tarrasque\n\n from tarrasque import *\n\n replay = StreamBinding.from_file(\"./demo/PL.dem\", start_tick=\"game\")\n for player in replay.players:\n print \"{} is playing {}\".format(player.name, player.hero.name)\n\n### Generating a gold graph\n\n from tarrasque import *\n\n # Create a StreamBinding object; this object allows us to create\n # \"views\" onto the replay data. Using the from_file, we pass it\n # the name of the replay file, and the \"tick\" we want to start\n # at. However, instead of giving a precise tick, we pass \"game\",\n # which tells the StreamBinding to start from the tick where the\n # game time hits 0\n replay = StreamBinding.from_file(\"./demo/PL.dem\", start_tick=\"game\")\n\n for player in replay.players:\n print \"{} is playing {}\".format(player.name, player.hero.name)\n\n # Example output for the replay ./demo/PL.dem\n # Savlon is playing Phantom Lancer\n # once is playing Necrolyte\n # arrow6 is playing Nyx_ Assassin\n # niv3k is playing Tusk\n # Gyozmo is playing Slark\n # xportugeex28 is playing Batrider\n # andreissoares is playing Bounty Hunter\n # williamkork is playing Bristleback\n # Nenette1987 is playing Nevermore\n # Ben_Laden is playing Ogre_ Magi\n\n # As the objects (such as player, player.hero) are just views over\n # the data, when you change the tick, the data they report will\n # change. So graphing things is just a case of remembering the\n # values\n\n # If you have matplotlib installed, this will graph a hero's\n # current gold\n\n hero = replay.players[0].hero\n print \"Graphing for {}, played by {}\".format(hero.name,\n hero.player.name)\n\n gold_data = []\n tick_data = []\n\n # Start at \"game\" which is the time the game clock hits 0, stop at\n # \"postgame\", which is when the ancient is destroyed. Step of 30\n # so we only sample data once a second\n for tick in replay.iter_ticks(start=\"game\", end=\"postgame\", step=30):\n\n # Players have gold, not heroes. This deals with people\n # swapping heroes and stuff.\n gold_data.append(hero.player.total_gold)\n tick_data.append(tick)\n\n # Get our plotting library\n import matplotlib.pyplot as plt\n # And plot the gold against the ticks\n plt.plot(tick_data, gold_data)\n\n # And then show it\n plt.show()\n # Or save it\n plt.savefig(\"./output.png\")\n"
},
{
"alpha_fraction": 0.7028021216392517,
"alphanum_fraction": 0.7070478200912476,
"avg_line_length": 42.61728286743164,
"blob_id": "05b49d835d729597f05eb6b47fea329a2fe4f2ea",
"content_id": "900add4edfe9d2fea5a9ee2870f405ad638b95ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 3533,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 81,
"path": "/docs/source/intro.rst",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "An Introduction to Tarrasque\n============================\n\nTarrasque is a library, build around Skadi_, to allow the easy and\nstraightforward analysis of Dota 2 replays. While Skadi provides only\nthe raw data, Tarrasque allows you to deal in objects and relationships.\nA comparison will show this best.\n\n.. _Skadi: https://github.com/onethirtyfive/skadi\n\nThis code uses Skadi to print out the names of the players in the replay, along\nwith the name of the hero they are playing\n\n::\n\n import io\n from skadi.engine import world as w\n from skadi.replay import demo as rd\n\n demo = rd.construct(\"./demo.dem\")\n for tick, string_tables, world in demo.stream(tick=5000):\n ehandle, player_resource = world.find_by_dt(player_resource_dt)\n\n for i in range(31):\n player_name_key = (\"DT_DOTA_PlayerResource\", \"m_iszPlayerNames.%40s\" % i)\n player_name = player_resource[player_name_key]\n if not player_name:\n break\n hero_ehandle_key = (\"DT_DOTA_PlayerResource\", \"m_hSelectedHero\")\n hero_ehandle = player_resource[hero_ehandle_key]\n hero_dt = world.recv_tables[world.classes[hero_ehandle]].dt\n hero_name = hero_dt.replace(\"DT_DOTA_Unit_Hero_\", \"\").replace(\"_\", \" \")\n print hero_name\n break\n\nUsing Tarrasque, this could be written as\n\n::\n\n import tarrasque\n\n replay = tarrasque.StreamBinding.from_file(\"demo.dem\")\n for player in replay.players:\n print player.name\n print player.hero.name\n\nThe code speaks for itself. Tarrasque makes it simple, easy and even fun to\nanalyse Dota 2 replays.\n\nTarrasque concepts for people who know what an ehandle is\n---------------------------------------------------------\n\nTarrasque is a mapper between Dota2 entities (DT classes) and Python classes.\nEvery Tarrasque class that represents an entity has a\n:attr:`~DotaEntity.dt_key` property that specifies the DT class that it\nrepresents, and once instantiated, every Tarrasque class has a\n:attr:`~DotaEntity.ehandle` property that is used to get information from\nthe world. The current world can be accessed via :attr:`~DotaEntity.world`,\nand the results of ``world.find(self.ehandle)`` via\n:attr:`~DotaEntity.properties`. All this and more is documented on\n:class:`DotaEntity`.\n\nTarrasque concepts for people who don't know what an ehandle is\n---------------------------------------------------------------\n\nThink of Tarrasque as an ORM for Dota2, except the models are already\nmaintained, and you don't have to worry about the database. You don't have\nto mess about writing code to deal with the (disgusting) stuff that Dota2 does\nin its replays, as Tarrasque exposes the data to you in a manner that follows\nPython conventions; you'll get a ``None`` object instead of -1, and the string\n``\"radiant\"`` instead of the integer 2 (where appropriate. Tarrasque understands\nthat values have special meanings only in specific contexts). This allows you to\njust use the data, and not need to worry about the stuff underneath.\n\nThe one major difference between a database ORM and Tarrasque is that while\nmost ORM models are statefull (that is, when the database changes, the model\nstays the same until reloaded), Tarrasque models contain no state, other than\nthat which is needed to uniquely identify the instance (and now you know what an\nehandle is). This means that you never have to do ``hero.update(tick_number)``\nor similar; all that is handled automatically via the :class:`StreamBinding`/\n:class:`DotaEntity` abstraction.\n"
},
{
"alpha_fraction": 0.730434775352478,
"alphanum_fraction": 0.730434775352478,
"avg_line_length": 18.16666603088379,
"blob_id": "53073c6811b5b190a1c9f98aa20c77b4e2a96b09",
"content_id": "cc781f4023f0c120c995156376427d89475c982f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 6,
"path": "/tarrasque/buildings/__init__.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .manager import *\n\nfrom .ancient import *\nfrom .barrack import *\nfrom .building import *\nfrom .tower import *\n"
},
{
"alpha_fraction": 0.6567410826683044,
"alphanum_fraction": 0.6567410826683044,
"avg_line_length": 15.119999885559082,
"blob_id": "ae3dc28bd31836672362e274c15c4d7b15e324b4",
"content_id": "f5ded88bae71c987f42017af0dccf098f0c6f086",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1209,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 75,
"path": "/docs/source/api.rst",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "API\n===\n\nStream Binding\n--------------\n\n.. automodule:: tarrasque.binding\n :members: StreamBinding\n\nCreep Manager\n-------------\n\n.. automodule:: tarrasque.creeps.manager\n :members: CreepManager\n\nDota Entity\n-----------\n\n.. automodule:: tarrasque.entity\n :members:\n\nPlayer\n------\n\n.. automodule:: tarrasque.player\n :members:\n\nGame Info\n---------\n\n.. automodule:: tarrasque.gameinfo\n :members:\n\nAbility\n-------\n\n.. automodule:: tarrasque.ability\n :members:\n\nBase NPC\n--------\n\n.. automodule:: tarrasque.basenpc\n :members:\n\nHero\n----\n\nWhile each hero has a distinct class, not all have classes that are defined in\nsource code. This is because the :class:`Hero` class registers itself as a\nwildcard on the DT regexp ``\"DT_DOTA_Unit_Hero_*\"``, and then dynamically\ngenerates hero classes from the ehandle. The generated classes simply inherit\nfrom the :class:`Hero` and have different values for :attr:`~Hero.dt_key` and\n:attr:`~Hero.name`.\n\n.. automodule:: tarrasque.hero\n :members:\n\nGame Events\n-----------\n\n.. automodule:: tarrasque.gameevents\n :members:\n\nCombat Log\n----------\n\n.. automodule:: tarrasque.combatlog\n :members:\n \nItem\n----------\n\n.. automodule:: tarrasque.item\n :members:\n"
},
{
"alpha_fraction": 0.5793358087539673,
"alphanum_fraction": 0.6143911480903625,
"avg_line_length": 20.27450942993164,
"blob_id": "2aa2dcc6e93e44037d1c689f42d27819799f131e",
"content_id": "03d46e2b30e6f001d70c245c2d88bd5d27640c07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 51,
"path": "/tests/utils.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "def eq_(val1, val2, message=None):\n if type(val1) == type(val2) == float:\n if abs(val1 - val2) < 0.001:\n return\n else:\n if val1 == val2:\n return\n\n if message is None:\n message = \"{} != {}\".format(val1, val2)\n raise AssertionError(message)\n\ndef neq_(val1, val2, message=None):\n if val1 != val2:\n return\n\n if message is None:\n message = \"{} == {}\".format(val1, val2)\n raise AssertionError(message)\n\ndef gt_(val1, val2, message=None):\n if val1 > val2:\n return\n\n if message is None:\n message = \"{} <= {}\".format(val1, val2)\n raise AssertionError(message)\n\ndef gteq_(val1, val2, message=None):\n if val1 >= val2:\n return\n\n if message is None:\n message = \"{} < {}\".format(val1, val2)\n raise AssertionError(message)\n\ndef lt_(val1, val2, message=None):\n if val1 < val2:\n return\n\n if message is None:\n message = \"{} >= {}\".format(val1, val2)\n raise AssertionError(message)\n\ndef in_(val, lst, message=None):\n if val in lst:\n return\n\n if message is None:\n message = \"{} not in {}\".format(val, lst)\n raise AssertionError(message)"
},
{
"alpha_fraction": 0.6589359045028687,
"alphanum_fraction": 0.6616643667221069,
"avg_line_length": 32.318180084228516,
"blob_id": "000314a4f4d8194d521070e4c8024e3ee3c91504",
"content_id": "b33db369c5f521a413f7b2d651409923a5b242ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 733,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 22,
"path": "/tarrasque/creeps/neutralcreep.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from ..properties import *\nfrom .lanecreep import LaneCreep\nfrom ..entity import register_entity\nimport re\n\n\n@register_entity('DT_DOTA_BaseNPC_Creep_Neutral')\nclass NeutralCreep(LaneCreep):\n \"\"\"\n A class for neutral creeps, does not seem to include Roshan (needs\n confirmation)\n \"\"\"\n\n # The regexp cleans up the model cache to give a half readable name\n name = Property(\"DT_BaseEntity\", \"m_nModelIndex\")\\\n .apply(StringTableTrans(\"modelprecache\"))\\\n .apply(FuncTrans(lambda n: n[0]))\\\n .apply(FuncTrans(lambda n: re.findall('(?<=/)[a-z\\_]+(?=\\.mdl)', n)[0]))\n \"\"\"\n A name for the creep. While this name is understandable, it's not something\n you would want to print to a user.\n \"\"\"\n"
},
{
"alpha_fraction": 0.6631439924240112,
"alphanum_fraction": 0.6644650101661682,
"avg_line_length": 22.65625,
"blob_id": "d42caba85c2d3ed1c830a334e185d22f264217f3",
"content_id": "d686f58a3abcfd1bbf4ead05373b3190eeb6d4ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 757,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 32,
"path": "/tarrasque/heroes/lonedruid.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from ..hero import Hero\nfrom ..properties import *\nfrom ..entity import *\nfrom ..ability import BaseAbility\n\n@register_entity('DT_DOTA_Unit_Hero_LoneDruid')\nclass LoneDruid(Hero):\n \"\"\"\n A specialized class for the hero Lone Druid\n \"\"\"\n\n name = 'Lone Druid'\n\n @property\n def bear(self):\n return self.abilities[0].bear\n@register_entity('DT_DOTA_Ability_LoneDruid_SpiritBear')\nclass SummonBear(BaseAbility):\n \"\"\"\n A specialized class for Lone Druid's summon bear ability\n \"\"\"\n\n bear = Property('DT_DOTA_Ability_LoneDruid_SpiritBear', 'm_hBear')\\\n .apply(EntityTrans())\n\n@register_entity('DT_DOTA_Unit_SpiritBear')\nclass SpiritBear(Hero):\n \"\"\"\n A class for LD's spirit bear\n \"\"\"\n\n name = 'Spirit Bear'\n"
},
{
"alpha_fraction": 0.6272189617156982,
"alphanum_fraction": 0.639053225517273,
"avg_line_length": 24.399999618530273,
"blob_id": "ac62a30babe18f7526418cb719154fc2a5ac9324",
"content_id": "d63d3f163ecf8a258a962d6202a1d09290b6bbb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 20,
"path": "/tarrasque/creeps/lanecreep.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from ..properties import *\nfrom ..basenpc import BaseNPC\nfrom ..entity import register_entity\n\n@register_entity(\"DT_DOTA_BaseNPC_Creep_Lane\")\nclass LaneCreep(BaseNPC):\n \"\"\"\n A lane creep (ranged or melee).\n \"\"\"\n\n health_percentage = Property('DT_DOTA_BaseNPC_Creep_Lane',\n 'm_iHealthPercentage')\\\n .apply(FuncTrans(lambda h: h / 1.27))\n\n @property\n def health(self):\n \"\"\"\n The creep's max health.\n \"\"\"\n return self.health_percentage * self.max_health / 100"
},
{
"alpha_fraction": 0.6390847563743591,
"alphanum_fraction": 0.640808641910553,
"avg_line_length": 24.52400016784668,
"blob_id": "a729abee0800aba73f4d85ecc229c67d73a81a37",
"content_id": "e612b65559b0ec91a959775be08e7b73169a8b11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6381,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 250,
"path": "/tarrasque/properties.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .consts import *\nfrom .utils import *\n\nclass BaseProperty(object):\n exposed = True\n _cache = {}\n _tick = None\n\n def get_value(self, entity):\n raise NotImplementedError()\n\n def __get__(self, entity, objtype=None):\n if not self.exposed:\n return self\n\n # Check if we're chaining the thing\n if issubclass(type(entity), BaseProperty) or entity is None:\n return self\n # Otherwise run the property\n if hasattr(entity, \"tick\") and hasattr(entity, \"ehandle\"):\n # If we have hit the next tick, clear the cache\n if entity.tick != self._tick:\n self._tick = entity.tick\n self._cache = {}\n\n # Try and get the item out of the cache\n try:\n return self._cache[entity.ehandle]\n except KeyError:\n pass\n\n # Or calculate it, and add it to the cache\n value = self.get_value(entity)\n self._cache[entity.ehandle] = value\n return value\n\n # Always account for the poor users who don't have an ehandle to\n # go home to\n return self.get_value(entity)\n\n def apply(self, chained):\n chained.set_chained(self)\n self.exposed = False\n return chained\n\n def map(self, chained):\n self.exposed = False\n\n map_prop = self\n class MapProperty(ArrayProperty):\n def __init__(self):\n return\n\n def get_value(self, entity):\n vals = map_prop.get_value(entity)\n if vals is None:\n return\n\n output = []\n for value in vals:\n class PseudoProp(object):\n def get_value(self, entity):\n return value\n chained.chained = PseudoProp()\n output.append(chained.get_value(entity))\n return output\n return MapProperty()\n\n def filter(self, filter_func):\n self.exposed = False\n\n array_prop = self\n class FilterProperty(ArrayProperty):\n def __init__(self):\n return\n\n def get_value(self, entity):\n output = []\n vals = array_prop.get_value(entity)\n if vals is None:\n return\n\n for value in vals:\n if filter_func(value):\n output.append(value)\n return output\n return FilterProperty()\n\nclass ProviderProperty(BaseProperty):\n def used_by(self, chainer):\n chainer.set_chained(self)\n self.exposed = True\n return chainer\n\nclass LocalProperty(ProviderProperty):\n def get_value(self, entity):\n return entity.properties\n\nclass RemoteProperty(ProviderProperty):\n def __init__(self, dt_name):\n self.dt = dt_name\n\n def get_value(self, entity):\n world = entity.world\n _, properties = world.find_by_dt(self.dt)\n return properties\n\nclass ExtractorProperty(BaseProperty):\n chained = LocalProperty()\n\n def set_chained(self, chained):\n self.chained = chained\n\n def apply(self, chained):\n chained.set_chained(self)\n self.exposed = False\n return chained\n\nclass ValueProperty(ExtractorProperty):\n def __init__(self, dt_class, dt_prop=None):\n if not dt_prop:\n self.key = dt_class\n else:\n self.key = (dt_class, dt_prop)\n\n def get_value(self, entity):\n props = self.chained.get_value(entity)\n if props is None:\n return None\n\n return props.get(self.key, None)\n\n\nProperty = ValueProperty\n\nclass ModifierProperty(ExtractorProperty):\n def __init__(self, key):\n self.key = key\n\n def get_value(self, modifier):\n props = self.chained.get_value(modifier)\n if props is None:\n return\n\n return props.get(self.key, None)\n\nclass ArrayProperty(ExtractorProperty):\n def __init__(self, dt_class, dt_prop, array_length=32):\n self.array_length = array_length\n self.key = (dt_class, dt_prop)\n\n def get_value(self, entity):\n props = self.chained.get_value(entity)\n output = []\n for i in range(self.array_length):\n index_key = \"%04d\" % i\n key = (self.key[0], self.key[1] + \".\" + index_key)\n #key = (self.key[1], index_key)\n output.append(props[key])\n return output\n\nclass IndexedProperty(ExtractorProperty):\n def __init__(self, dt_class, dt_prop, index_val=\"index\"):\n self.index_val = index_val\n self.key = (dt_class, dt_prop)\n\n def get_value(self, entity):\n props = self.chained.get_value(entity)\n if props is None:\n return\n\n index = getattr(entity, self.index_val)\n if index is None:\n return\n\n return props[(self.key[0], self.key[1] + \".\" + \"%04d\" % index)]\n\nclass PositionProperty(ExtractorProperty):\n def __init__(self, property_class, cellbits_class=\"DT_BaseEntity\"):\n self.prop = property_class\n self.cellbits_class = cellbits_class\n\n def get_value(self, entity):\n prop = self.chained.get_value(entity)\n if prop is None:\n return\n\n cell_x = prop[(self.prop, \"m_cellX\")]\n cell_y = prop[(self.prop, \"m_cellY\")]\n offset_x, offset_y = prop[(self.prop, \"m_vecOrigin\")]\n cellbits = prop[(self.cellbits_class, \"m_cellbits\")]\n\n return cell_to_coords(cell_x, cell_y, offset_x, offset_y, cellbits)\n\nclass TransformerProperty(BaseProperty):\n chained = None\n\n def set_chained(self, chained):\n self.chained = chained\n\nclass MapTrans(TransformerProperty):\n def __init__(self, value_map):\n self.value_map = value_map\n\n def get_value(self, entity):\n raw = self.chained.get_value(entity)\n if raw is None:\n return\n\n return self.value_map[raw]\n\nclass FuncTrans(TransformerProperty):\n def __init__(self, value_func):\n self.value_func = value_func\n\n def get_value(self, entity):\n raw = self.chained.get_value(entity)\n if raw is None:\n return\n\n return self.value_func(raw)\n\nclass EntityTrans(TransformerProperty):\n def get_value(self, entity):\n from . import entity as e\n\n # Hopefully an ehandle\n ehandle = self.chained.get_value(entity)\n if ehandle == NEGATIVE or ehandle == None:\n return\n\n return e.create_entity(ehandle, entity.stream_binding)\n\nclass StringTableTrans(TransformerProperty):\n def __init__(self, table_key, index_var=\"by_index\"):\n self.key = table_key\n self.index_var = index_var\n\n def get_value(self, entity):\n val = self.chained.get_value(entity)\n if val is None:\n return\n\n table = entity.stream_binding.string_tables[self.key]\n if self.index_var == \"by_index\":\n return table.by_index.get(val, None)\n elif self.index_var == \"by_name\":\n return table.by_name.get(val, None)\n else:\n raise KeyError(\"Unknown index variable: {}\".format(s))\n"
},
{
"alpha_fraction": 0.6718907952308655,
"alphanum_fraction": 0.6729019284248352,
"avg_line_length": 27.39712905883789,
"blob_id": "caaf90840dde91348909aaf5a0c3c017ffaf57fa",
"content_id": "e4a79df372b7852ff7062d8c892520f6af6d084e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5934,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 209,
"path": "/tarrasque/entity.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import inspect\nfrom importlib import import_module\nimport sys\nimport re\n\nfrom .consts import *\nfrom .properties import *\n\nglobal ENTITY_CLASSES\nENTITY_CLASSES = {}\nglobal ENTITY_WILDCARDS\nENTITY_WILDCARDS = []\n\ndef register_entity(dt_name):\n \"\"\"\n Register a class that Tarrasque will use to represent dota entities with\n the given DT key. This class decorator automatically sets the\n :attr:``~DotaEntity.dt_key`` attribute.\n \"\"\"\n def inner(cls):\n ENTITY_CLASSES[dt_name] = cls\n cls.dt_key = dt_name\n return cls\n return inner\n\ndef register_entity_wildcard(regexp):\n \"\"\"\n Similar to :obj:`register_entity`, will register a class, but instead of\n specifying a specific DT, use a regular expression to specify a range of\n DTs. For example, :class:`Hero` uses this to supply a model for all\n heroes, i.e.::\n\n from tarrasque.entity import *\n\n @register_entity_wildcard(\"DT_DOTA_Unit_Hero_(.*)\")\n class Hero(DotaEntity):\n def __new__(cls, *args, **kwargs):\n # Use __new__ to dynamically generate individual hero classes\n # See tarrasque/hero.py for actual implementation\n return cls(*args, **kwargs)\n\n A wildcard registration will not override a specific DT registration via\n :obj:`register_entity`.\n \"\"\"\n def inner(cls):\n ENTITY_WILDCARDS.append((re.compile(regexp), cls))\n return cls\n return inner\n\ndef find_entity_class(dt_name):\n \"\"\"\n Returns the class that should be used to represent the ehandle with the given\n dt name.\n \"\"\"\n if dt_name in ENTITY_CLASSES:\n return ENTITY_CLASSES[dt_name]\n for regexp, cls in ENTITY_WILDCARDS:\n if regexp.match(dt_name):\n return cls\n return DotaEntity\n\ndef create_entity(ehandle, stream_binding):\n \"\"\"\n Finds the correct class for the ehandle and initialises it.\n \"\"\"\n dt = stream_binding.world.fetch_recv_table(ehandle).dt\n cls = find_entity_class(dt)\n return cls(ehandle=ehandle, stream_binding=stream_binding)\n\n@register_entity(\"DT_BaseEntity\")\nclass DotaEntity(object):\n \"\"\"\n A base class for all Tarrasque entity classes.\n\n If you plan to manually initialise this class or any class inheriting from\n it (and I strongly recommend against it), pass initialisation arguments by\n name.\n \"\"\"\n\n def __init__(self, stream_binding, ehandle):\n self._stream_binding = stream_binding\n self._ehandle = ehandle\n\n team = Property(\"DT_BaseEntity\", \"m_iTeamNum\")\\\n .apply(MapTrans(TEAM_VALUES))\n \"\"\"\n The team that the entity is on. Options are\n\n * ``\"radiant\"``\n * ``\"dire\"``\n \"\"\"\n\n name = Property(\"DT_BaseEntity\", \"m_iName\")\\\n .apply(FuncTrans(lambda n: n if n else None))\n \"\"\"\n The name of an entity. This will either be equal to the\n :attr:`DotaEntity.raw_name` or be overridden to be a name an end user might\n be more familiar with. For example, if :attr:`~DotaEntity.raw_name` is\n ``\"dt_dota_nevermore\"``, this value might be set to ``\"Nevermore\"`` or\n ``\"Shadow Field\"``.\n \"\"\"\n\n raw_name = name\n \"\"\"\n The raw name of the entity. Not very useful on its own.\n \"\"\"\n\n owner = Property(\"DT_BaseEntity\", \"m_hOwnerEntity\")\\\n .apply(EntityTrans())\n \"\"\"\n The \"owner\" of the entity. For example, a :class:``BaseAbility`` the hero\n that has that ability as its owner.\n \"\"\"\n\n @property\n def ehandle(self):\n \"\"\"\n The ehandle of the entity. Used to identify the entity across ticks.\n \"\"\"\n return self._ehandle\n\n @property\n def stream_binding(self):\n \"\"\"\n The :class:`StreamBinding` object that the entity is bound to. The\n source of all information in a Tarrasque entity class.\n \"\"\"\n return self._stream_binding\n\n @property\n def world(self):\n \"\"\"\n The world object for the current tick. Accessed via\n :attr:``stream_binding``.\n \"\"\"\n return self.stream_binding.world\n\n @property\n def tick(self):\n \"\"\"\n The current tick number.\n \"\"\"\n return self.stream_binding.tick\n\n @property\n def properties(self):\n \"\"\"\n Return the data associated with the handle for the current tick.\n \"\"\"\n return self.world.find(self.ehandle)\n\n @property\n def exists(self):\n \"\"\"\n True if the ehandle exists in the current tick's world. Examples of\n this not being true are when a :class:`Hero` entity that represents an\n illusion is killed, or at the start of a game when not all heroes have\n been chosen.\n \"\"\"\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True\n\n @property\n def modifiers(self):\n \"\"\"\n A list of the entitiy's modifiers. While this does not make sense on some\n entities, as modifiers can be associated with any entity, this is\n implemented here.\n \"\"\"\n from .modifier import Modifier\n mhandles = self.stream_binding.modifiers.by_parent.get(self.ehandle, [])\n\n modifiers = []\n for mhandle in mhandles:\n modifier = Modifier(parent=self, mhandle=mhandle,\n stream_binding=self.stream_binding)\n modifiers.append(modifier)\n return modifiers\n\n @classmethod\n def get_all(cls, binding):\n \"\"\"\n This method uses the class's :attr:`dt_key` attribute to find all\n instances of the class in the stream binding's current tick, and then\n initialise them and return them as a list.\n\n While this method seems easy enough to use, prefer other methods where\n possible. For example, using this function to find all\n :class:`Player` instances will return 11 or more players, instead of\n the usual 10, where as :attr:`StreamBinding.players` returns the\n standard (and correct) 10.\n \"\"\"\n output = []\n for ehandle, _ in binding.world.find_all_by_dt(cls.dt_key).items():\n output.append(cls(ehandle=ehandle, stream_binding=binding))\n return output\n\n def __eq__(self, other):\n if hasattr(other, \"ehandle\"):\n return other.ehandle == self.ehandle\n\n return False\n\n def __hash__(self):\n return hash(self.ehandle)"
},
{
"alpha_fraction": 0.7446808218955994,
"alphanum_fraction": 0.7446808218955994,
"avg_line_length": 22.5,
"blob_id": "538c8f52e14cf21695a29ffc7515466a00c67f89",
"content_id": "1bff3edfe118c482107967a25f7273ed27d6fa63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/tarrasque/heroes/__init__.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .lonedruid import *\nfrom .visage import *\n"
},
{
"alpha_fraction": 0.6341303586959839,
"alphanum_fraction": 0.6375150084495544,
"avg_line_length": 28.545162200927734,
"blob_id": "ce42ef50f54e0eae9c987bd73780528a2ffc31d3",
"content_id": "6807dc59384136be141dceced7dd8a1b39b5e63b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9159,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 310,
"path": "/tarrasque/binding.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import collections\n\nclass StreamBinding(object):\n \"\"\"\n The StreamBinding class is Tarrasque's metaphor for the replay. Every\n Tarrasque entity class has a reference to an instance of this\n class, and when the tick of the instance changes, the data returned by\n those classes changes. This makes it easy to handle complex object graphs\n without explicitly needing to pass the Skadi demo object around.\n\n .. note:: Where methods on this class take absolute tick values (i.e. the\n ``start`` and ``end`` arguments to :meth:`iter_ticks`), special string\n arguments may be passed. These are:\n\n * ``\"start\"`` - The start of the replay\n * ``\"draft\"`` - The start of the draft\n * ``\"pregame\"`` - The end of the draft phase\n * ``\"game\"`` - The time when the game clock hits 0\n * ``\"postgame\"`` - The time the ancient is destroyed\n * ``\"end\"`` - The last tick in the replay\n\n These values will not be 100% accurate, but should be good +-50 ticks\n \"\"\"\n\n @property\n def user_messages(self):\n \"\"\"\n The user messages for the current tick.\n \"\"\"\n return self._user_messages\n\n @property\n def game_events(self):\n \"\"\"\n The game events in the current tick.\n \"\"\"\n from .gameevents import create_game_event\n\n events = []\n for data in self._game_events:\n events.append(create_game_event(stream_binding=self, data=data))\n return events\n\n # Just another layer of indirection\n # These are properties for autodoc reasons mostly\n @property\n def world(self):\n \"\"\"\n The Skadi wold object for the current tick.\n \"\"\"\n return self._stream.world\n\n @property\n def tick(self):\n \"\"\"\n The current tick.\n \"\"\"\n return self._stream.tick\n\n @property\n def demo(self):\n \"\"\"\n The Skadi demo object that the binding is reading from.\n \"\"\"\n return self._demo\n\n @property\n def modifiers(self):\n \"\"\"\n The Skadi modifiers object for the tick.\n \"\"\"\n return self._stream.modifiers\n\n @property\n def string_tables(self):\n \"\"\"\n The string_table provided by Skadi.\n \"\"\"\n return self._stream.string_tables\n\n @property\n def prologue(self):\n \"\"\"\n The prologue of the replay.\n \"\"\"\n return self._stream.prologue\n\n def __init__(self, demo, start_tick=None, start_time=None):\n self._demo = demo\n self._user_messages = []\n self._game_events = []\n\n self._state_change_ticks = {\n \"start\": 0,\n \"end\": self.demo.file_info.playback_ticks - 2\n }\n\n self.go_to_tick(self.demo.file_info.playback_ticks - 2)\n\n self._state_change_times = {\n \"draft\": self.info.draft_start_time,\n \"pregame\": self.info.pregame_start_time,\n \"game\": self.info.game_start_time,\n \"postgame\": self.info.game_end_time\n }\n\n # We're already here!\n if start_tick == \"end\":\n pass\n elif start_tick is not None:\n self.go_to_tick(start_tick)\n elif start_time is not None:\n self.go_to_time(start_time)\n else:\n self.go_to_state_change(\"game\")\n\n def iter_ticks(self, start=None, end=None, step=1):\n \"\"\"\n A generator that iterates through the demo's ticks and updates the\n :class:`StreamBinding` to that tick. Yields the current tick.\n\n The start parameter defines the tick to iterate from, and if not set, the\n current tick will be used instead.\n\n The end parameter defines the point to stop iterating; if not set,\n the iteration will continue until the end of the replay.\n\n The step parameter is the number of ticks to consume before yielding\n the tick; the default of one means that every tick will be yielded. Do\n not assume that the step is precise; the gap between two ticks will\n always be larger than the step, but usually not equal to it.\n \"\"\"\n if start is not None:\n self.go_to_tick(start)\n\n if end is None:\n end = self._state_change_ticks[\"end\"]\n\n self._user_messages = []\n self._game_events = []\n\n last_tick = self.tick - step - 1\n for _ in self._stream:\n if isinstance(end, str):\n if self.info.game_state == end:\n break\n elif self.tick >= end:\n break\n\n self._user_messages.extend(self._stream.user_messages)\n self._game_events.extend(self._stream.game_events)\n\n if self.tick - last_tick < step:\n continue\n else:\n last_tick = self.tick\n\n yield self.tick\n\n self._user_messages = []\n self._game_events = []\n\n def iter_full_ticks(self, start=None, end=None):\n \"\"\"\n A generator that iterates through the demo's 'full ticks'; sync points\n that occur once a minute. Should be _much_ faster than\n :method:`iter_ticks`.\n\n The ``start`` argument may take the same range of values as the ``start``\n argument of :method:`iter_ticks`. The first full tick yielded will be the\n next full tick after the position obtained via `self.go_to_tick(start)`.\n The end tick may either be a tick value or a game state. The last full\n tick yielded will be the first full tick after the tick value/game state\n change.\n \"\"\"\n if start is not None:\n self.go_to_tick(start)\n\n for _ in self._stream.iterfullticks():\n if end:\n if isinstance(end, basestring):\n if self.info.game_state == end:\n break\n else:\n if self.tick > end:\n break\n self._user_messages = []\n self._game_events = []\n yield self.tick\n\n def go_to_tick(self, tick):\n \"\"\"\n Moves to the given tick, or the nearest tick after it. Returns the tick\n moved to.\n \"\"\"\n if isinstance(tick, str):\n return self.go_to_state_change(tick)\n if tick > self.demo.file_info.playback_ticks or tick < 0:\n raise IndexError(\"Tick {} out of range\".format(tick))\n self._stream = self.demo.stream(tick=tick)\n self._user_messages = (self._stream.user_messages or [])[:]\n self._game_events = (self._stream.game_events or [])[:]\n\n return self.tick\n\n def go_to_time(self, time):\n \"\"\"\n Moves to the tick with the given game time. Could potentially overshoot,\n but not by too much. Will not undershoot.\n\n Returns the tick it has moved to.\n \"\"\"\n # Go for 31 tps as would rather exit FP loop earlier than\n # sooner. 1.5 for same reason\n FP_REGION = 1800 * 1.5 / 31\n\n # If the time we're going to is behind us, recreate the stream\n if time < self.info.game_time:\n self._stream = self.demo.stream(tick=0)\n\n # If the time is more than 1.5 full packets ahead of us, iter full ticks\n if time > self.info.game_time + FP_REGION:\n for _ in self._stream.iterfullticks():\n # If this full tick is within 1.5 full packets of the target, stop\n if self.info.game_time + FP_REGION > time and\\\n self.info.game_time < time:\n break\n else:\n # If we didn't stop via break, then it was EOF, so raise\n raise IndexError(\"Time {} out of range\".format(time))\n\n for _ in self._stream:\n if self.info.game_time > time:\n break\n else:\n raise IndexError(\"Time {} out of range\".format(time))\n\n self._user_messages = self._stream.user_messages[:]\n self._game_events = self._stream.game_events[:]\n return self.tick\n\n def go_to_state_change(self, state):\n \"\"\"\n Moves to the time when the :attr:`GameInfo.game_state` changed to the given\n state. Valid values are equal to the possible values of\n :att:`~GameInfo.game_state`, along with ``\"start\"`` and ``\"end\"`` which\n signify the first and last tick in the replay, respectively.\n\n Returns the tick moved to.\n \"\"\"\n if state in self._state_change_ticks:\n return self.go_to_tick(self._state_change_ticks[state])\n elif state in self._state_change_times:\n return self.go_to_time(self._state_change_times[state])\n else:\n raise ValueError(\"Unsupported state {}\".format(repr(state)))\n\n def __iter__(self):\n return self.iter_ticks()\n\n @property\n def players(self):\n \"\"\"\n A list of :class:`Player` objects, one for each player in the game.\n This excludes spectators and other non-hero-controlling players.\n \"\"\"\n from . import Player\n\n return sorted([p for p in Player.get_all(self) if \n p.index != None and p.team != \"spectator\"],\n key=lambda p:p.index)\n\n @property\n def info(self):\n \"\"\"\n The :class:`GameInfo` object for the replay.\n \"\"\"\n from .gameinfo import GameInfo\n info = GameInfo.get_all(self)\n assert len(info) == 1\n return info[0]\n\n @property\n def creeps(self):\n \"\"\"\n The :class:`CreepManager` object for the replay.\n \"\"\"\n from .creeps import CreepManager\n\n return CreepManager(self)\n\n @property\n def buildings(self):\n \"\"\"\n The :class:`BuildingManager` object for the replay.\n \"\"\"\n from .buildings import BuildingManager\n return BuildingManager(self)\n\t\n @staticmethod\n def from_file(filename, *args, **kwargs):\n \"\"\"\n Loads the demo from the filename, and then initialises the\n :class:`StreamBinding` with it, along with any other passed arguments.\n \"\"\"\n import skadi.demo\n\n demo = skadi.demo.construct(filename)\n\n return StreamBinding(demo, *args, **kwargs)\n"
},
{
"alpha_fraction": 0.64952152967453,
"alphanum_fraction": 0.64952152967453,
"avg_line_length": 21.594594955444336,
"blob_id": "09bf2778f7e19ebd33771c3335a82cc2eb161ef8",
"content_id": "b9d474ed1e7728de39dcce795f119ac1a9b71798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 37,
"path": "/tarrasque/creeps/manager.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "class CreepManager(object):\n \"\"\"\n A general object that allows the user to access the creeps in the game.\n \"\"\"\n\n def __init__(self, stream_binding):\n self.stream_binding = stream_binding\n\n @property\n def lane(self):\n \"\"\"\n Returns all the living lane creeps on the map.\n \"\"\"\n from .lanecreep import LaneCreep\n\n return [lc for lc in LaneCreep.get_all(self.stream_binding)\n if lc.is_alive]\n\n @property\n def neutrals(self):\n \"\"\"\n Returns all the living neutral creeps on the map.\n \"\"\"\n from .neutralcreep import NeutralCreep\n\n return [nc for nc in NeutralCreep.get_all(self.stream_binding)\n if nc.is_alive]\n\n @property\n def couriers(self):\n \"\"\"\n Returns all couriers on the map\n \"\"\"\n\n from .courier import Courier\n\n return Courier.get_all(self.stream_binding)\n"
},
{
"alpha_fraction": 0.551948070526123,
"alphanum_fraction": 0.5584415793418884,
"avg_line_length": 29.850000381469727,
"blob_id": "c76ec2e849e836ba2e4b2af425b85d62e6955b57",
"content_id": "d8cdb5bac83bcaf3ec32c92e6c40504a4efa2a69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 616,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 20,
"path": "/tarrasque/buildings/barrack.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .building import *\n\n@register_entity(\"DT_DOTA_BaseNPC_Barracks\")\nclass Barrack(Building):\n \"\"\"\n Inherits from :class:`Building`.\n\n Represents a Barrack in the game.\n \"\"\"\n lane = Property(\"DT_BaseEntity\", \"m_iName\")\\\n .apply(FuncTrans(lambda n: re.findall('(?<=rax\\_)([a-z]*)\\_([a-z]*)', n)[0][1] if n else None))\n \"\"\"\n Lane of the barracks. ``\"bot\"``, ``\"mid\"`` or ``\"top\"``.\n \"\"\"\n\n type = Property(\"DT_BaseEntity\", \"m_iName\")\\\n .apply(FuncTrans(lambda n: re.findall('(?<=rax\\_)([a-z]*)\\_([a-z]*)', n)[0][0] if n else None))\n \"\"\"\n Type of the barracks, ``\"melee\"`` or ``\"range\"``.\n \"\"\""
},
{
"alpha_fraction": 0.6927374005317688,
"alphanum_fraction": 0.6927374005317688,
"avg_line_length": 18.88888931274414,
"blob_id": "f347cb12c7090c78254af739c34823bebf4a79a8",
"content_id": "c7b0fd86a2dfe7f18efb136daafbb5d8a687df7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 9,
"path": "/tarrasque/buildings/building.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from ..basenpc import *\n\n@register_entity(\"DT_DOTA_BaseNPC_Building\")\nclass Building(BaseNPC):\n \"\"\"\n Inherits from :class:`BaseNPC`.\n\n Represents a building in the game.\n \"\"\"\n"
},
{
"alpha_fraction": 0.7719298005104065,
"alphanum_fraction": 0.7719298005104065,
"avg_line_length": 56,
"blob_id": "109a9edea897c0ef5f73e20c7bc0436d00c59693",
"content_id": "8bd272f2050b9d08d284ffa4ec16fc5f32fe6846",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "-e git+https://github.com/skadistats/skadi.git#egg=skadi\n"
},
{
"alpha_fraction": 0.6591816544532776,
"alphanum_fraction": 0.6591816544532776,
"avg_line_length": 22.302326202392578,
"blob_id": "458bb7b3af3a7e0952a79eb23195e0309e7bc10e",
"content_id": "ff03f1ab07887a20d9e90385e7aba91857c1f322",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2004,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 86,
"path": "/tarrasque/modifier.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .properties import *\n\nclass Modifier(object):\n \"\"\"\n Represents a modifier on an entity. Modifiers are sustained affects on\n entities such as buffs and debufs. For example, the invulnerability aura\n provided by the fountain is exposed as a Modifier.\n \"\"\"\n\n def __init__(self, parent, mhandle, stream_binding):\n self.parent = parent\n \"\"\"\n The :class:`DotaEntity` that has this modifier.\n \"\"\"\n\n self.mhandle = mhandle\n \"\"\"\n The \"mhandle\" of the Modifier, used to track it across ticks.\n \"\"\"\n\n self.stream_binding = stream_binding\n\n @property\n def world(self):\n return self.stream_binding.world\n\n @property\n def properties(self):\n modifiers = self.stream_binding.modifiers\n return modifiers.by_parent[self.parent.ehandle][self.mhandle]\n\n @property\n def exists(self):\n modifiers = self.stream_binding.modifiers\n return self.mhandle in modifier.by_parent.get(self.parent.ehandle, {})\n \n @property\n def expire_time(self):\n duration = self.duration\n creation_time = self.created\n \n expire_time = creation_time + duration\n return expire_time\n\n name = ModifierProperty(\"name\")\n \"\"\"\n The name of the modifier.\n \"\"\"\n\n caster = ModifierProperty(\"caster\")\\\n .apply(EntityTrans())\n \"\"\"\n The caster of the modifier.\n \"\"\"\n\n aura = ModifierProperty(\"aura\")\n \"\"\"\n Boolean determining if the modifier is placed by an aura or not.\n \"\"\"\n\n ability_level = ModifierProperty(\"ability_level\")\n \"\"\"\n The level of the ability that caused this modifier.\n \"\"\"\n\n ability = ModifierProperty(\"ability\")\\\n .apply(EntityTrans())\n \"\"\"\n The ability that caused this modifier.\n \"\"\"\n\n created = ModifierProperty(\"creation_time\")\n \"\"\"\n The game time that the modifier was applied.\n \"\"\"\n \n duration = ModifierProperty(\"duration\")\n \"\"\"\n The duration of the modifier.\n \"\"\"\n\n def __repr__(self):\n if self.name:\n return \"Modifier('{}')\".format(self.name)\n else:\n return super(Modifier, self).__repr__()\n"
},
{
"alpha_fraction": 0.6283556818962097,
"alphanum_fraction": 0.6602349281311035,
"avg_line_length": 26.744186401367188,
"blob_id": "366932855dc8647fcda7208b5663afb1af11caef",
"content_id": "7f3669bbf88c2208677f6ba24f3938d2ca049e69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1192,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 43,
"path": "/examples/distance.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import itertools, math\nfrom tarrasque import *\n\nreplay = StreamBinding.from_file(\"./demo/PL.dem\", 10000)\n\ngraphing_team = \"radiant\"\n\nheroes = [player.hero for player in replay.players if\n player.team == graphing_team]\nprint \"Graphing for team {}, players: {}\".format(graphing_team,\n \", \".join(hero.player.name for hero in heroes))\n\ndistance_data = []\ntick_data = []\n\n# Step 300 ticks (10 secs) at a time in an effort to speed things up\nfor tick in replay.iter_ticks(start=10000, step=300):\n # The info object contains the game state, among other things. It\n # goes from \"loading\" -> \"draft\" -> \"pregame\" -> \"game\" -> \"postgame\"\n if replay.info.game_state == \"postgame\":\n break\n\n total_distance = 0\n n = 0\n for hero1, hero2 in itertools.combinations(heroes, 2):\n if not hero1.is_alive or not hero2.is_alive:\n continue\n\n x1, y1 = hero1.position\n x2, y2 = hero2.position\n distance = math.sqrt(abs((x1 - x2) ** 2 + (y1 - y2) ** 2))\n\n total_distance += distance\n n += 1\n\n distance_data.append(distance/n)\n tick_data.append(tick)\n\nimport matplotlib.pyplot as plt\n\nplt.plot(tick_data, distance_data)\nplt.show()\nplt.savefig(\"./output.png\")"
},
{
"alpha_fraction": 0.6540638208389282,
"alphanum_fraction": 0.6547430157661438,
"avg_line_length": 28.851350784301758,
"blob_id": "62810ed3bd045efaf87ce076fe2c16037a959929",
"content_id": "ffd322e0f7cdacd60750c2e32619b4990548a028",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4417,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 148,
"path": "/tarrasque/hero.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import re\n\nfrom .binding import *\nfrom .entity import *\nfrom .properties import *\nfrom .basenpc import *\n\n@register_entity(\"DT_DOTA_BaseNPC_Hero\")\n@register_entity_wildcard(\"DT_DOTA_Unit_Hero_*\")\nclass Hero(BaseNPC):\n \"\"\"\n While all hero classes inherit from this class, it is unlikely that this class\n will ever need to be instantiated.\n \"\"\"\n def __new__(cls, *args, **kwargs):\n if cls != Hero:\n return object.__new__(cls, *args, **kwargs)\n\n ehandle = kwargs.get(\"ehandle\")\n stream_binding = kwargs.get(\"stream_binding\")\n\n world = stream_binding.world\n dt = world.recv_tables[world.classes[ehandle]].dt\n\n cls_name = dt.replace(\"DT_DOTA_Unit_Hero_\", \"\").replace(\" \", \"\")\n cls = type(str(cls_name), (Hero,), {})\n register_entity(dt)(cls)\n\n instance = object.__new__(cls, *args, **kwargs)\n cls.__init__(instance, *args, **kwargs)\n if not instance.name:\n split_name = [s.replace(\"_\", \"\") for s in\n re.split(\"([A-Z][^A-Z]*)\", cls_name) if s]\n cls.name = \" \".join(split_name)\n return instance\n\n name = None\n \"\"\"\n The name of the hero. For the base :class:`Hero` class, this is ``None``,\n but it is set when a subclass is created in the __new__ method.\n \"\"\"\n\n xp = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_iCurrentXP\")\n \"\"\"\n The hero's experience.\n \"\"\"\n\n respawn_time = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flRespawnTime\")\n \"\"\"\n Appears to be the absolute time that the hero respawns. See\n :attr:`~GameInfo.game_time` for the current time of the tick to compare.\n\n TODO: Check this on IRC\n \"\"\"\n\n ability_points = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_iAbilityPoints\")\n \"\"\"\n Seems to be the number of ability points the player can assign.\n \"\"\"\n\n natural_strength = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flStrength\")\n \"\"\"\n The hero's strength from levels.\n \"\"\"\n\n natural_agility = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flAgility\")\n \"\"\"\n The hero's agility from levels.\n \"\"\"\n\n natural_intelligence = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flIntellect\")\n \"\"\"\n The hero's intelligence from levels.\n \"\"\"\n\n strength = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flStrengthTotal\")\n \"\"\"\n The hero's strength (from levels, items, and the attribute bonus).\n \"\"\"\n\n agility = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flAgilityTotal\")\n \"\"\"\n The hero's agility (from levels, items, and the attribute bonus).\n \"\"\"\n\n intelligence = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flIntellectTotal\")\n \"\"\"\n The hero's intelligence (from levels, items, and the attribute bonus).\n \"\"\"\n\n recent_damage = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_iRecentDamage\")\n \"\"\"\n The damage taken by the hero recently. The exact time period that classifies\n as \"recently\" is around 2/3 seconds.\n\n TODO: Find exact value\n \"\"\"\n\n spawned_at = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_flSpawnedAt\")\n \"\"\"\n The time (in :attr:`~GameInfo.game_time` units) the hero spawned at.\n\n TODO: Check this in game.\n \"\"\"\n\n replicating_hero = Property(\n \"DT_DOTA_BaseNPC_Hero\", \"m_hReplicatingOtherHeroModel\"\n ).apply(EntityTrans())\n \"\"\"\n The :class:`Hero` the current hero is \"replicating\" [#f1]_. If the instance\n is not an illusion (which use the :class:`Hero` class also), this will be\n ``None``. There is no guarantee that that this hero will exist (see\n :attr:`DotaEntity.exists`) if the hero is someone like Phantom Lancer, who\n may have an illusion which creates other illusions, and then dies.\n However, this is still a useful property for tracking illusion creation\n chains\n \"\"\"\n\n _player_id = Property(\"DT_DOTA_BaseNPC_Hero\", \"m_iPlayerID\")\n\n @property\n def player(self):\n \"\"\"\n The player that is playing the hero.\n \"\"\"\n for player in self.stream_binding.players:\n if player.index == self._player_id:\n return player\n return None\n\n @staticmethod\n def get_all_heroes(stream_binding):\n \"\"\"\n Overrides DotaEntity.get_all in order to return all heroes with the prefix\n ```\"DT_DOTA_Unit_Hero\"```, as there is never any results for\n ```\"DT_DOTA_BaseNPC_Hero\"```, and it also wouldn't be of any use to devs.\n \"\"\"\n from . import entity\n\n heroes = []\n for ehandle, _ in stream_binding.world.find_all_by_dt(\n \"DT_DOTA_Unit_Hero_*\").iteritems():\n hero = entity.create_entity(ehandle, stream_binding)\n # Avoid illusions\n if hero.replicating_hero:\n continue\n heroes.append(hero)\n return heroes"
},
{
"alpha_fraction": 0.696811318397522,
"alphanum_fraction": 0.696811318397522,
"avg_line_length": 24.1842098236084,
"blob_id": "57ab6ac280e63ec5f363651be942ca2d5bef6c1c",
"content_id": "97504824ada1ea09a460afb5f6e0abc665c5a792",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1913,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 76,
"path": "/tarrasque/gameevents.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import functools\nimport re\n\nfrom skadi.engine import game_event\n\nfrom .properties import *\n\nglobal EVENT_CLASSES\nEVENT_CLASSES = {}\nglobal EVENT_WILDCARDS\nEVENT_WILDCARDS = []\n\ndef register_event(event_name):\n \"\"\"\n Register a class as the handler for a given event.\n \"\"\"\n def inner(event_class):\n EVENT_CLASSES[event_name] = event_class\n return event_class\n return inner\n\ndef register_event_wildcard(event_pattern):\n \"\"\"\n Same as :func:`register_event` but uses a regex pattern to match, instead of\n a static game event name.\n \"\"\"\n def inner(event_class):\n EVENT_WILDCARD.append((re.compile(event_wildcard), event_class))\n return event_class\n return inner\n\ndef find_game_event_class(event_name):\n \"\"\"\n Given the name of an event, finds the class that should be used to represent\n it.\n \"\"\"\n if event_name in EVENT_CLASSES:\n return EVENT_CLASSES[event_name]\n\n for regexp, cls in EVENT_WILDCARDS:\n if regexp.match(event_name):\n return cls\n\n return GameEvent\n\ndef create_game_event(stream_binding, data):\n \"\"\"\n Creates a new GameEvent object from a stream binding and the un-humanized game\n event data.\n \"\"\"\n event_list = stream_binding.prologue.game_event_list\n name, properties = game_event.humanize(data, event_list)\n\n cls = find_game_event_class(name)\n\n return cls(stream_binding=stream_binding, name=name, properties=properties)\n\nclass GameEvent(object):\n \"\"\"\n Base class for all game events. Handles humanise and related things.\n \"\"\"\n\n def __init__(self, stream_binding, name, properties):\n # Note that game events can't really be tracked across ticks, so\n # we just pass the data\n\n self.name = name\n \"\"\"\n The name of the GameEvent. i.e. ``\"dota_combatlog\"``, ``\"dota_chase_hero\"``.\n \"\"\"\n self.properties = properties\n\n self.stream_binding = stream_binding\n\n def __repr__(self):\n return \"{}({})\".format(self.name, self.properties)"
},
{
"alpha_fraction": 0.6589806079864502,
"alphanum_fraction": 0.6699029207229614,
"avg_line_length": 27.413793563842773,
"blob_id": "b547ffd447c5674c758103f8fc7e02a5c0b4cf6e",
"content_id": "55444075554e28930bf73dc42e8c531b4c040a2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 824,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 29,
"path": "/tarrasque/creeps/courier.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from ..properties import *\nfrom ..basenpc import BaseNPC\nfrom ..entity import register_entity\n\n@register_entity(\"DT_DOTA_Unit_Courier\")\nclass Courier(BaseNPC):\n \"\"\"\n A courier\n \"\"\"\n\n respawn_time = Property('DT_DOTA_Unit_Courier', 'm_flRespawnTime')\n \"\"\"\n Returns a float of the courier's time until respawn.\n Returns 0.0 if courier is alive\n \"\"\"\n \n\n is_flying = Property('DT_DOTA_Unit_Courier', 'm_bFlyingCourier')\n \"\"\"\n Returns 0 if the courier is walking, 1 if it's flying\n \"\"\"\n\n\n unusual_effect = Property('DT_DOTA_Unit_Courier', 'm_iUnusualParticleSystem')\n \"\"\"\n Something about unusual couriers. -1 if not unusual.\n 96 seems to be the ethereal flames effect, although it's possible that\n 96 just refers to a general unusual particle thing. Not sure at all\n \"\"\"\n"
},
{
"alpha_fraction": 0.6119987964630127,
"alphanum_fraction": 0.6126017570495605,
"avg_line_length": 27.59482765197754,
"blob_id": "c61522e30945e337e278e3d393a6b2617d896d1e",
"content_id": "7781b78d4bb42d0a58e56b632cefc475a6dd8b95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3317,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 116,
"path": "/tarrasque/item.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .entity import *\nfrom .properties import *\nfrom .consts import *\n\n@register_entity(\"DT_DOTA_Item\")\n@register_entity_wildcard(\"DT_DOTA_Item_*\")\nclass Item(DotaEntity):\n \"\"\"\n Item class\n \"\"\"\n\n \n off_cooldown_time = Property('DT_DOTABaseAbility', 'm_fCooldown')\n \"\"\"\n The time when the item will come off cooldown\n \"\"\"\n \n @property\n def is_on_cooldown(self):\n current_time = self.stream_binding.info.game_time\n return current_time <= self.off_cooldown_time\n \n\n cooldown_length = Property('DT_DOTABaseAbility', 'm_flCooldownLength')\n \"\"\"\n These are all the same as the functions in the ability class,\n I'm lazy, go read them, they are fairly self-explanatory :D\n \"\"\"\n\n mana_cost = Property('DT_DOTABaseAbility', 'm_iManaCost')\n\n cast_range = Property('DT_DOTABaseAbility', 'm_iCastRange')\n\n purchase_time = Property('DT_DOTA_Item', 'm_flPurchaseTime')\n \"\"\"\n The time when the item was purchased\n \"\"\"\n \n droppable = Property('DT_DOTA_Item', 'm_bDroppable')\n \"\"\"\n Presumably if the item is droppable (ex: not Aegis)\n \"\"\"\n\n initial_charges = Property('DT_DOTA_Item', 'm_iInitialCharges')\n \"\"\"\n Presumably charges when item is bought (ex: 8 for diffusal)\n \"\"\"\n \n sharability = Property('DT_DOTA_Item', 'm_iSharability')\n \"\"\"\n Presumably whether the item can be shared (ex: Tango, RoH)\n \"\"\"\n \n current_charges = Property('DT_DOTA_Item', 'm_iCurrentCharges')\n \"\"\"\n Presumably the item's current charges (ex: 7 for Diffusal if used once)\n \"\"\"\n \n requires_charges = Property('DT_DOTA_Item', 'm_bRequiresCharges')\n \"\"\"\n Presumably whether the item needs charges to work (ex: Diffusal)\n \"\"\"\n\n sellable = Property('DT_DOTA_Item', 'm_bSellable')\n \"\"\"\n Presumably whether the item can be sold or not (ex: Not BKB)\n \"\"\"\n \n stackable = Property('DT_DOTA_Item', 'm_bStackable')\n \"\"\"\n Presumably whether the item can be stacked (ex: Wards)\n \"\"\"\n \n disassemblable = Property('DT_DOTA_Item', 'm_bDisassemblable')\n \"\"\"\n Presumably whether you can disassemble the item (ex: Arcane Boots)\n \"\"\"\n \n killable = Property('DT_DOTA_Item', 'm_bKillable')\n \"\"\"\n Presumably whether the item can be denied (ex: not Gem)\n \"\"\"\n \n permanent = Property('DT_DOTA_Item', 'm_bPermanent')\n \"\"\"\n Seems to be if the item will disappear when it runs out of stacks\n (i.e consumable. Ex: Tango, not Diffusal)\n \"\"\"\n \n alertable = Property('DT_DOTA_Item', 'm_bAlertable')\n \"\"\"\n Presumably whether you can right-click 'Alert allies' with it\n (ex: Smoke, Arcane Boots, 'Gather for Arcane Boots here!')\n \"\"\"\n\n purchasable = Property('DT_DOTA_Item', 'm_bPurchasable')\n \"\"\"\n Presumably whether you can buy the item or not (ex: not Aegis)\n \"\"\"\n \n recipe = Property('DT_DOTA_Item', 'm_bRecipe')\n \"\"\"\n Presumably whether the item is a recipe or not (ex: any Recipe)\n \"\"\"\n\n purchaser = Property('DT_DOTA_Item', 'm_hPurchaser')\\\n .apply(EntityTrans())\n \"\"\"\n The hero object of the purchaser of the item\n \"\"\"\n \n def __repr__(self):\n if self.name:\n return \"Item('{}')\".format(self.name)\n else:\n return super(Item, self).__repr__()\n"
},
{
"alpha_fraction": 0.6376462578773499,
"alphanum_fraction": 0.6682725548744202,
"avg_line_length": 24.946428298950195,
"blob_id": "7ab9b7ac7db48fc1b45a219c15fae0719bfc2e9e",
"content_id": "598c1da6c2c1e34bfa9b0416638eb97d1df0a76e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2906,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 112,
"path": "/tests/test_hero.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import unittest\n\nimport tarrasque\n\nfrom .utils import *\n\nclass HeroTestCase(unittest.TestCase):\n REPLAY_FILE = \"./demo/PL.dem\"\n\n @classmethod\n def setUpClass(cls):\n cls.replay = tarrasque.StreamBinding.from_file(cls.REPLAY_FILE,\n start_tick=10000)\n cls.heroes = [p.hero for p in cls.replay.players]\n cls.player = cls.replay.players[4]\n cls.hero = cls.player.hero\n\n def test_heroes_name(self):\n for hero in self.heroes:\n neq_(hero.name, None)\n\n def test_heroes_xp(self):\n for hero in self.heroes:\n gt_(hero.xp, 0)\n\n def test_heroes_respawn_time(self):\n for hero in self.heroes:\n neq_(hero.respawn_time, None)\n\n def test_heroes_ability_points(self):\n for hero in self.heroes:\n neq_(hero.ability_points, None)\n\n def test_heroes_natural_strength(self):\n for hero in self.heroes:\n gt_(hero.natural_strength, 0)\n\n def test_heroes_natural_agility(self):\n for hero in self.heroes:\n gt_(hero.natural_agility, 0)\n\n def test_heroes_natural_intelligence(self):\n for hero in self.heroes:\n gt_(hero.natural_intelligence, 0)\n\n def test_heroes_strength(self):\n for hero in self.heroes:\n gteq_(hero.strength, hero.natural_strength)\n\n def test_heroes_agility(self):\n for hero in self.heroes:\n gteq_(hero.agility, hero.natural_agility)\n\n def test_heroes_intelligence(self):\n for hero in self.heroes:\n gteq_(hero.intelligence, hero.natural_intelligence)\n\n def test_heroes_recent_damage(self):\n for hero in self.heroes:\n gteq_(hero.recent_damage, 0)\n\n def test_heroes_spawned_at(self):\n for hero in self.heroes:\n gteq_(hero.spawned_at, 1 * 60 + 30)\n\n def test_heroes_replicating_hero(self):\n for hero in self.heroes:\n eq_(hero.replicating_hero, None)\n\n def test_heroes_player(self):\n for player in self.replay.players:\n eq_(player.hero.player.name, player.name)\n eq_(player.hero.player, player)\n\n def test_name(self):\n eq_(self.hero.name, \"Slark\")\n\n def test_xp(self):\n eq_(self.hero.xp, 206)\n\n def test_respawn_time(self):\n eq_(self.hero.respawn_time, 0.0)\n\n def test_ability_points(self):\n eq_(self.hero.ability_points, 0)\n\n def test_natural_strength(self):\n eq_(self.hero.natural_strength, 22.7999992371)\n\n def test_natural_agility(self):\n eq_(self.hero.natural_agility, 22.5)\n\n def test_natural_intelligence(self):\n eq_(self.hero.natural_intelligence, 17.8999996185)\n\n def test_strength(self):\n eq_(self.hero.strength, 25.7999992371)\n\n def test_agility(self):\n eq_(self.hero.agility, 25.5)\n\n def test_intelligence(self):\n eq_(self.hero.intelligence, 20.8999996185)\n\n def test_recent_damage(self):\n eq_(self.hero.recent_damage, 0)\n\n def test_spawned_at(self):\n eq_(self.hero.spawned_at, 181.876556396)\n\n def test_replicating_hero(self):\n eq_(self.hero.replicating_hero, None)\n"
},
{
"alpha_fraction": 0.686956524848938,
"alphanum_fraction": 0.6927536129951477,
"avg_line_length": 16.25,
"blob_id": "66d91ad71ef71e00d18cff571f04f900a179a925",
"content_id": "4cdee95f74c6d5ce25a4625cb12a5338d93f46c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 345,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 20,
"path": "/tarrasque/__init__.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .binding import *\nfrom .entity import *\nfrom .modifier import *\n\nfrom .player import *\nfrom .hero import *\nfrom .gameinfo import *\nfrom .ability import *\nfrom .gameevents import *\nfrom .combatlog import *\nfrom .item import *\n\nfrom .buildings import *\n\nfrom .creeps import *\n\nfrom .heroes import *\n\n__version__ = \"0.1\"\n__author__ = \"Laurie Clark-Michalek\"\n"
},
{
"alpha_fraction": 0.6362530589103699,
"alphanum_fraction": 0.6532846689224243,
"avg_line_length": 27.859649658203125,
"blob_id": "927df1735a326951fb7ae1d645aefc51a3cc4336",
"content_id": "74ca10455f753895bccddffe53b11894966107ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1644,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 57,
"path": "/tests/test_binding.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import unittest\n\nimport tarrasque\n\nfrom .utils import *\n\nclass StreamBindingMovementTestCase(unittest.TestCase):\n REPLAY_FILE = \"./demo/PL.dem\"\n\n @classmethod\n def setUpClass(cls):\n cls.replay = tarrasque.StreamBinding.from_file(cls.REPLAY_FILE)\n\n def test_go_to_time(self):\n self.replay.go_to_time(3 * 60 + 50)\n eq_(int(self.replay.info.game_time), 3 * 60 + 50)\n\n def test_go_to_tick(self):\n self.replay.go_to_tick(2000)\n gt_(self.replay.tick, 1999)\n lt_(self.replay.tick, 2003)\n\n def test_go_to_end(self):\n self.replay.go_to_tick(\"end\")\n ticks_left = self.replay.demo.file_info.playback_ticks - self.replay.tick\n lt_(abs(ticks_left), 3)\n lt_(-1, ticks_left)\n\n def test_go_to_state_change(self):\n states = [\"pregame\", \"game\", \"postgame\"]\n\n for state_name in states:\n self.replay.go_to_tick(state_name)\n change_tick = self.replay.tick\n eq_(self.replay.info.game_state, state_name)\n while self.replay.tick == change_tick:\n self.replay.go_to_tick(self.replay.tick - 10)\n\n neq_(self.replay.info.game_state, state_name)\n\nclass StreamBindingConstantTestCase(unittest.TestCase):\n REPLAY_FILE = \"./demo/PL.dem\"\n\n def setUp(self):\n self.replay = tarrasque.StreamBinding.from_file(self.REPLAY_FILE,\n start_tick=\"game\")\n\n def test_number_of_players(self):\n eq_(len(self.replay.players), 10)\n\n def test_no_spectators_in_players(self):\n for player in self.replay.players:\n in_(player.team, [\"radiant\", \"dire\"])\n\n def test_player_heroes(self):\n for player in self.replay.players:\n neq_(player.hero, None)"
},
{
"alpha_fraction": 0.6627078652381897,
"alphanum_fraction": 0.6627078652381897,
"avg_line_length": 25.3125,
"blob_id": "2c2f109f99eaa56d99b2b66fce40b753bed41223",
"content_id": "99ece3cc5096ff99abd0b6a8d0caa044efbbe02e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 32,
"path": "/tarrasque/buildings/manager.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "class BuildingManager(object):\n \"\"\"\n A general object that allows the user to access the creeps in the game.\n \"\"\"\n\n def __init__(self, stream_binding):\n self.stream_binding = stream_binding\n\n @property\n def towers(self):\n \"\"\"\n A list of :class:`Tower` objects, one for each tower in the replay.\n This excludes dead towers.\n \"\"\"\n from .tower import Tower\n return Tower.get_all(self.stream_binding)\n\t\n @property\n def barracks(self):\n \"\"\"\n A list of :class:`Barrack` objects, one for each barrack in the replay.\n \"\"\"\n from .barrack import Barrack\n return Barrack.get_all(self.stream_binding)\n\t\n @property\n def ancients(self):\n \"\"\"\n A list of :class:`Ancient` objects, one for each ancient in the replay.\n \"\"\"\n from .ancient import Ancient\n return Ancient.get_all(self.stream_binding)\n"
},
{
"alpha_fraction": 0.6266447305679321,
"alphanum_fraction": 0.6266447305679321,
"avg_line_length": 20.714284896850586,
"blob_id": "48e56aa153cfae7b91daa81e7f28ea508cba75b8",
"content_id": "b49ddf22c755075b367a619432ac02ea1f96b133",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 28,
"path": "/tarrasque/heroes/visage.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from ..hero import Hero\nfrom ..properties import *\nfrom ..entity import *\nfrom ..basenpc import BaseNPC\n\n@register_entity('DT_DOTA_Unit_Hero_Visage')\nclass Visage(Hero):\n \"\"\"\n A specialized class for the hero Visage\n \"\"\"\n\n name = 'Visage'\n \n @property\n def familiars(self):\n \"\"\"\n Gets all familiars on the map.\n Seems to only work on living familiars\n \"\"\"\n return Familiar.get_all(self.stream_binding)\n \n@register_entity('DT_DOTA_Unit_VisageFamiliar')\nclass Familiar(BaseNPC):\n \"\"\"\n A class for visage familiars\n \"\"\"\n\n name = 'Familiar'\n"
},
{
"alpha_fraction": 0.6645172834396362,
"alphanum_fraction": 0.6659501791000366,
"avg_line_length": 30.36516761779785,
"blob_id": "1a34a5cca5b2170eed8cba3b364864664056adf9",
"content_id": "adac9055f8eac2087235841d84097ac50094f381",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5583,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 178,
"path": "/tarrasque/player.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .entity import *\nfrom .consts import *\nfrom .properties import *\n\n@register_entity(\"DT_DOTAPlayer\")\nclass Player(DotaEntity):\n \"\"\"\n Inherits from :class:`DotaEntity`.\n\n Represents a player in the game. This can be a player who is controlling a\n hero, or a \"player\" that is spectating.\n \"\"\"\n\n index = Property(\"DT_DOTAPlayer\", \"m_iPlayerID\")\\\n .apply(FuncTrans(lambda i: None if i == -1 else i))\n \"\"\"\n The index of the player in the game. i.e. 0 is the first player on the\n radiant team, 9 is the last on the dire\n\n This is ``None`` for the undefined player, which should be ignored.\n \"\"\"\n\n hero = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_hSelectedHero\"))\\\n .apply(EntityTrans())\n \"\"\"\n The :class:`Hero` that the player is playing in the tick. May be ``None``\n if the player has yet to choose a hero. May change when the\n :attr:`~GameInfo.game_state` is ``\"pre_game\"``, due to players swapping\n their heroes.\n \"\"\"\n\n @property\n def reliable_gold(self):\n \"\"\"\n The player's reliable gold.\n \"\"\"\n team = self.team.capitalize()\n try:\n prop = RemoteProperty(\"DT_DOTA_Data{}\".format(team))\\\n .used_by(IndexedProperty(\"DT_DOTA_DataNonSpectator\", \"m_iReliableGold\")) \n return prop.get_value(self)\n except (KeyError, IndexError):\n pass\n\n try:\n prop = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iReliableGold\"))\n return prop.get_value(self)\n except (KeyError, IndexError):\n pass\n\n dt_class = \"DT_{}Data\".format(team)\n dt_prop = \"m_iReliableGold{}.{:04d}\".format(team, self.index)\n prop = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(Property(dt_class, dt_prop))\n return prop.get_value(self)\n\n @property\n def unreliable_gold(self):\n \"\"\"\n The player's unreliable gold.\n \"\"\"\n team = self.team.capitalize()\n try:\n prop = RemoteProperty(\"DT_DOTA_Data{}\".format(team))\\\n .used_by(IndexedProperty(\"DT_DOTA_DataNonSpectator\", \"m_iUnreliableGold\")) \n return prop.get_value(self)\n except (KeyError, IndexError):\n pass\n\n try:\n prop = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iUnreliableGold\"))\n return prop.get_value(self)\n except KeyError:\n pass\n\n dt_class = \"DT_{}Data\".format(team)\n dt_prop = \"m_iUnreliableGold{}.{:04d}\".format(team, self.index)\n prop = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(Property(dt_class, dt_prop))\n return prop.get_value(self)\n\n earned_gold = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_EndScoreAndSpectatorStats\", \"m_iTotalEarnedGold\"))\n \"\"\"\n The total earned gold by the user. This is not net worth; it should be used to\n calculate gpm and stuff.\n \"\"\"\n\n name = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iszPlayerNames\"))\n \"\"\"\n The Steam name of the player, at the time of the game being played.\n \"\"\"\n\n steam_id = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iPlayerSteamIDs\"))\n \"\"\"\n The Steam ID of the player.\n \"\"\"\n\n team = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iPlayerTeams\"))\\\n .apply(MapTrans(TEAM_VALUES))\n \"\"\"\n The player's team. Possible values are\n\n * ``\"radiant\"``\n * ``\"dire\"``\n * ``\"spectator\"``\n \"\"\"\n\n last_hits = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iLastHitCount\"))\n \"\"\"\n The number of last hits on creeps that the player has.\n \"\"\"\n\n denies = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iDenyCount\"))\n \"\"\"\n The number of denies on creeps that the player has.\n \"\"\"\n\n kills = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iKills\"))\n \"\"\"\n The number of times the player has killed an enemy hero.\n \"\"\"\n\n deaths = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iDeaths\"))\n \"\"\"\n The number of times the player has died.\n \"\"\"\n\n assists = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iAssists\"))\n \"\"\"\n The number of assists the player has.\n \"\"\"\n\n streak = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iStreak\"))\n \"\"\"\n The current kill-streak the player is on\n \"\"\"\n\n buyback_cooldown_time = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\",\n \"m_flBuybackCooldownTime\"))\n \"\"\"\n The game time that the buyback will come off cooldown. If this is 0, the\n player has not bought back.\n \"\"\"\n\n last_buyback_time = RemoteProperty(\"DT_DOTA_PlayerResource\")\\\n .used_by(IndexedProperty(\"DT_DOTA_PlayerResource\", \"m_iLastBuybackTime\"))\n \"\"\"\n The :attr:`~GameInfo.game_time` that the player bought back.\n \"\"\"\n\n @property\n def has_buyback(self):\n \"\"\"\n Can the player buyback (regardless of their being alive or dead).\n \"\"\"\n current_time = self.stream_binding.info.game_time\n return current_time >= self.buyback_cooldown_time\n\n @property\n def total_gold(self):\n \"\"\"\n The sum of the player's reliable and unreliable gold.\n \"\"\"\n return self.reliable_gold + self.unreliable_gold\n"
},
{
"alpha_fraction": 0.6931818127632141,
"alphanum_fraction": 0.6931818127632141,
"avg_line_length": 18.55555534362793,
"blob_id": "8acb7539a0d477c21315fdeb4a8984e1e6d86578",
"content_id": "643e6b9121303cd9687476ce5b1d9a3468951d75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 176,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 9,
"path": "/tarrasque/buildings/ancient.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .building import *\n\n@register_entity(\"DT_DOTA_BaseNPC_Fort\")\nclass Ancient(Building):\n \"\"\"\n Inherits from :class:`Building`.\n\n Represents an Ancient in the game.\n \"\"\"\n"
},
{
"alpha_fraction": 0.6644260883331299,
"alphanum_fraction": 0.6664943099021912,
"avg_line_length": 27.441177368164062,
"blob_id": "a6cab6f22afc946fd3d3490a30bd8864ae409bb1",
"content_id": "d961acf7a6ba44a38afada82540a41cc11290fd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1934,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 68,
"path": "/tarrasque/combatlog.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from . import gameevents\nfrom .properties import *\nfrom .consts import *\n\[email protected]_event(\"dota_combatlog\")\nclass CombatLogMessage(gameevents.GameEvent):\n \"\"\"\n A message in the combat log.\n \"\"\"\n\n type = Property(\"type\").apply(MapTrans(COMBAT_LOG_TYPES))\n \"\"\"\n The type of event this message signifies. Options are:\n\n * ``\"damage\"`` - One entity is damaging another\n * ``\"heal\"`` - One entity is healing another\n * ``\"modifier added\"`` - A modifier is being added to an entity\n * ``\"modifier removed\"`` - A modifier is being removed from an entity\n * ``\"death\"`` - An entity has died.\n \"\"\"\n\n target_name = Property(\"targetname\")\\\n .apply(StringTableTrans(\"CombatLogNames\"))\\\n .apply(FuncTrans(lambda n: n[0]))\n \"\"\"\n The name of the entity that was targeted in the event. Note that this is not\n the dt name or \"pretty\" name, this is the :attr:`DotaEntity.raw_name`. So for\n a message where Shadow Field is being attacked, this would be\n ``\"dt_dota_nevermore\"``.\n \"\"\"\n\n source_name = Property(\"sourcename\")\\\n .apply(StringTableTrans(\"CombatLogNames\"))\\\n .apply(FuncTrans(lambda n: n[0]))\n \"\"\"\n The name of the source of the event.\n \"\"\"\n\n attacker_name = Property(\"attackername\")\\\n .apply(StringTableTrans(\"CombatLogNames\"))\\\n .apply(FuncTrans(lambda n: n[0]))\n \"\"\"\n The name of the attacker in the event.\n \"\"\"\n\n value = Property(\"value\")\n \"\"\"\n The value of the event. Can have various different meanings depending on the\n :attr:`type`.\n \"\"\"\n\n inflictorname = Property(\"inflictorname\")\\\n .apply(StringTableTrans(\"CombatLogNames\"))\\\n .apply(FuncTrans(lambda n: n[0]))\n \"\"\"\n The name of the \"inflictor\" (wtf is that?). Used to id modifiers.\n \"\"\"\n\n health = Property(\"health\")\n \"\"\"\n The health of the unit being attacked, for 'heal' and 'damage' events.\n \"\"\"\n\n\n timestamp = Property(\"timestamp\")\n \"\"\"\n The timestamp this combat log message corresponds to.\n \"\"\"\n"
},
{
"alpha_fraction": 0.5987577438354492,
"alphanum_fraction": 0.6074534058570862,
"avg_line_length": 30,
"blob_id": "facfe53c61d9672efbbeb31d18987902e7a5f1dc",
"content_id": "ee7f0ca9ae44ab89e0dd84cf142f3c54ced0476f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 26,
"path": "/setup.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\nsetup(\n name='Tarrasque',\n version='0.1',\n description='A Dota 2 replay view library.',\n long_description=open('README.md').read(),\n author='Laurie Clark-Michalek',\n author_email='[email protected]',\n zip_safe=True,\n url='https://github.com/bluepeppers/Tarrasque',\n license='MIT',\n packages=find_packages(),\n keywords='dota replay',\n install_requires=[\n 'skadi'\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n ])"
},
{
"alpha_fraction": 0.6488203406333923,
"alphanum_fraction": 0.6524500846862793,
"avg_line_length": 30.855491638183594,
"blob_id": "ca9cdf9217b7e6debd2df769a5c98d6b71ac0cee",
"content_id": "7eb0def347b0f53d28a248a490646e594f05123c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5510,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 173,
"path": "/tarrasque/gameinfo.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .entity import *\nfrom .consts import *\nfrom .properties import *\nfrom .utils import *\n\n@register_entity(\"DT_DOTAGamerulesProxy\")\nclass GameInfo(DotaEntity):\n \"\"\"\n Inherits from :class:`DotaEntity`\n\n The GameInfo contains the macro state of the game; the stage of the game\n that the tick is in, whether the tick is in day or night, the length of\n the game, etc etc.\n \"\"\"\n\n game_time = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_fGameTime\")\n \"\"\"\n The time in seconds of the current tick.\n \"\"\"\n\n load_time = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_flGameLoadTime\")\\\n .apply(FuncTrans(none_or_nonzero))\n \"\"\"\n The time that the game_state changed to ``loading``.\n \"\"\"\n\n draft_start_time = Property(\"DT_DOTAGamerulesProxy\",\n \"DT_DOTAGamerules.m_flHeroPickStateTransitionTime\")\\\n .apply(FuncTrans(none_or_nonzero))\n \"\"\"\n The time that the game_state changed to ``draft``.\n \"\"\"\n\n pregame_start_time = Property(\"DT_DOTAGamerulesProxy\",\n \"DT_DOTAGamerules.m_flPreGameStartTime\")\\\n .apply(FuncTrans(none_or_nonzero))\n \"\"\"\n The time that the game_state changed to ``pregame``.\n \"\"\"\n\n game_start_time = Property(\"DT_DOTAGamerulesProxy\",\n \"DT_DOTAGamerules.m_flGameStartTime\")\\\n .apply(FuncTrans(none_or_nonzero))\n \"\"\"\n The time that the game_state changed to ``game``.\n \"\"\"\n\n game_end_time = Property(\"DT_DOTAGamerulesProxy\",\n \"DT_DOTAGamerules.m_flGameEndTime\")\\\n .apply(FuncTrans(none_or_nonzero))\n \"\"\"\n The time that the game_state changed to ``postgame``.\n \"\"\"\n\n match_id = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_unMatchID\")\n \"\"\"\n The unique match id, used by the Steam API and stuff (i.e. DotaBUff and\n friends).\n \"\"\"\n\n game_state = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_nGameState\")\\\n .apply(MapTrans(GAME_STATE_VALUES))\n \"\"\"\n The state of the game. Potential values are:\n\n * ``\"loading\"`` - Players are loading into the game\n * ``\"draft\"`` - The draft state has begun\n * ``\"strategy\"`` - Unknown\n * ``\"pregame\"`` - The game has started but creeps have not been\n spawned\n * ``\"game\"`` - The main game, between the first creep spawn and the\n ancient being destroyed\n * ``\"postgame\"`` - After the ancient has been destroyed\n * ``\"disconnect\"`` - Unknown\n \"\"\"\n\n game_mode = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_iGameMode\")\\\n .apply(MapTrans(GAME_MODE_VALUES))\n \"\"\"\n The mode of the dota game. Possible values are:\n\n * ``\"none\"``\n * ``\"all pick\"``\n * ``\"captain's mode\"``\n * ``\"random draft\"``\n * ``\"single draft\"``\n * ``\"all random\"``\n * ``\"intro\"``\n * ``\"diretide\"``\n * ``\"reverse captain's mode\"``\n * ``\"greeviling\"``\n * ``\"tutorial\"``\n * ``\"mid only\"``\n * ``\"least played\"``\n * ``\"new player pool\"``\n * ``\"compendium matchmaking\"``\n \"\"\"\n\n starting_team = Property(\"DT_DOTAGamerulesProxy\",\n \"DT_DOTAGamerules.m_iStartingTeam\")\\\n .apply(MapTrans(TEAM_VALUES))\n \"\"\"\n The team that begins the draft.\n \"\"\"\n\n pausing_team = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_iPauseTeam\")\\\n .apply(MapTrans(TEAM_VALUES))\n \"\"\"\n The team that is currently pausing. Will be ``None`` if the game is not\n paused, otherwise either ``\"radiant\"`` or ``\"dire\"``.\n \"\"\"\n\n active_team = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_iActiveTeam\")\\\n .apply(MapTrans(TEAM_VALUES))\n \"\"\"\n The team that is currently banning/picking.\n \"\"\"\n\n pick_state = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_iHeroPickState\")\\\n .apply(MapTrans(PICK_VALUES))\n \"\"\"\n The current pick/ban that is happening. ``None`` if no pick or ban is\n happening. If the :attr:`game_mode` is not ``\"captain's mode\"``, the\n possible values are:\n\n * ``\"all pick\"``\n * ``\"single draft\"``\n * ``\"random draft\"``\n * ``\"all random\"``\n\n Otherwise, the current pick and ban is returned in a tuple of the type of\n draft action and the index. For example, if the current tick was during\n the 5th ban of a captains mode game, the value of :attr:`pick_state` would\n be ``(\"ban\", 5)``. :attr:`active_team` could then be used to work out who\n is banning. Alternatively, if it was the 2nd pick of the game, it would be\n ``(\"pick\", 2)``.\n \"\"\"\n\n extra_time = ArrayProperty(\"DT_DOTAGamerules\", \"m_fExtraTimeRemaining\", array_length=2)\n \"\"\"\n Extra time left for both teams. Index 0 is radiant, index 1 is dire\n \"\"\"\n\n captain_ids = ArrayProperty(\"DT_DOTAGamerules\", \"m_iCaptainPlayerIDs\", array_length=2)\n \"\"\"\n IDs of the picking players (captains)\n \"\"\"\n\n banned_heroes = ArrayProperty(\"DT_DOTAGamerules\", \"m_BannedHeroes\", array_length=10)\\\n .apply(FuncTrans(map_foreach(HERO_VALUES)))\n\n \"\"\"\n List of currently banned heroes. 0-4 are radiant picks, 5-9 dire. Bans that have not yet been done have value None.\n \"\"\"\n\n selected_heroes = ArrayProperty(\"DT_DOTAGamerules\", \"m_SelectedHeroes\", array_length=10)\\\n .apply(FuncTrans(map_foreach(HERO_VALUES)))\n \"\"\"\n List of currently picked heroes. 0-4 are radiant picks, 5-9 dire. Picks that have not yet been done have value None.\n \"\"\"\n\n game_winner = Property(\"DT_DOTAGamerulesProxy\", \"DT_DOTAGamerules.m_nGameWinner\")\\\n .apply(MapTrans(WINNER_VALUES))\n \"\"\"\n The winner of the game.\n \"\"\"\n\n @property\n def replay_length(self):\n \"\"\"\n The length in seconds of the replay.\n \"\"\"\n return self.stream_binding.demo.file_info.playback_time"
},
{
"alpha_fraction": 0.39354342222213745,
"alphanum_fraction": 0.4788624048233032,
"avg_line_length": 17.95145606994629,
"blob_id": "5401ab591896cbeae5c1b7317bd8b9d2c66dc0fb",
"content_id": "ed0367e75b2c54b3370f09029684d2d79c6294f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3903,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 206,
"path": "/tarrasque/consts.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "# This value is used to report None\nNEGATIVE = 2 ** 21 - 1 # 2097151\n\n# Map between integers and teams\nTEAM_VALUES = {\n 2: \"radiant\",\n 3: \"dire\",\n 5: \"spectator\",\n 0: None,\n 1: None,\n}\n\nWINNER_VALUES = {\n 2: \"radiant\",\n 3: \"dire\",\n 5: None\n}\n\nLIFE_STATE_VALUES = {\n 0: \"alive\",\n 1: \"dying\",\n 2: \"dead\",\n 3: \"respawnable\",\n 4: \"discardbody\"\n}\n\nGAME_STATE_VALUES = {\n 1: \"loading\",\n 2: \"draft\",\n 3: \"strategy time\",\n 4: \"pregame\",\n 5: \"game\",\n 6: \"postgame\",\n 7: \"disconnect\"\n}\n\nGAME_MODE_VALUES = {\n 0: \"none\",\n 1: \"all pick\",\n 2: \"captain's mode\",\n 3: \"random draft\",\n 4: \"single draft\",\n 5: \"all random\",\n 6: \"intro\",\n 7: \"diretide\",\n 8: \"reverse captain's mode\",\n 9: \"greeviling\",\n 10: \"tutorial\",\n 11: \"mid only\",\n 12: \"least played\",\n 13: \"new player pool\",\n 14: \"compendium matchmaking\"\n}\n\ndef generate_pick_values():\n b = \"ban\"\n p = \"pick\"\n return {\n 1: \"all pick\",\n 2: \"single draft\",\n 4: \"random draft\",\n 27: \"all random\",\n\n # CM values\n 6: (b, 1),\n 7: (b, 2),\n 8: (b, 3),\n 9: (b, 4),\n 16: (p, 1),\n 17: (p, 2),\n 18: (p, 3),\n 19: (p, 4),\n 10: (b, 5),\n 11: (b, 6),\n 12: (b, 7),\n 13: (b, 8),\n 20: (p, 5),\n 21: (p, 6),\n 22: (p, 7),\n 23: (p, 8),\n 14: (b, 9),\n 15: (b, 10),\n 24: (p, 9),\n 25: (p, 10),\n 26: \"complete\"\n }\n\nPICK_VALUES = generate_pick_values()\n\nCOMBAT_LOG_TYPES = {\n 0: \"damage\",\n 1: \"heal\",\n 2: \"modifier added\",\n 3: \"modifier removed\",\n 4: \"death\"\n}\n\nHERO_VALUES = {\n 0: None,\n 1: \"Antimage\",\n 2: \"Axe\",\n 3: \"Bane\",\n 4: \"Bloodseeker\",\n 5: \"Crystal Maiden\",\n 6: \"Drow Ranger\",\n 7: \"Earthshaker\",\n 8: \"Juggernaut\",\n 9: \"Mirana\",\n 10: \"Morphling\",\n 11: \"Nevermore\",\n 12: \"Phantom Lancer\",\n 13: \"Puck\",\n 14: \"Pudge\",\n 15: \"Razor\",\n 16: \"Sand King\",\n 17: \"Storm Spirit\",\n 18: \"Sven\",\n 19: \"Tiny\",\n 20: \"Vengeful Spirit\",\n 21: \"Windrunner\",\n 22: \"Zuus\",\n 23: \"Kunkka\",\n 25: \"Lina\",\n 26: \"Lion\",\n 27: \"Shadow Shaman\",\n 28: \"Slardar\",\n 29: \"Tidehunter\",\n 30: \"Witch Doctor\",\n 31: \"Lich\",\n 32: \"Riki\",\n 33: \"Enigma\",\n 34: \"Tinker\",\n 35: \"Sniper\",\n 36: \"Necrolyte\",\n 37: \"Warlock\",\n 38: \"Beastmaster\",\n 39: \"Queen of Pain\",\n 40: \"Venomancer\",\n 41: \"Faceless Void\",\n 42: \"Skeleton King\",\n 43: \"Death Prophet\",\n 44: \"Phantom Assassin\",\n 45: \"Pugna\",\n 46: \"Templar Assassin\",\n 47: \"Viper\",\n 48: \"Luna\",\n 49: \"Dragon Knight\",\n 50: \"Dazzle\",\n 51: \"Rattletrap\",\n 52: \"Leshrac\",\n 53: \"Furion\",\n 54: \"Life Stealer\",\n 55: \"Dark Seer\",\n 56: \"Clinkz\",\n 57: \"Omniknight\",\n 58: \"Enchantress\",\n 59: \"Huskar\",\n 60: \"Night Stalker\",\n 61: \"Broodmother\",\n 62: \"Bounty Hunter\",\n 63: \"Weaver\",\n 64: \"Jakiro\",\n 65: \"Batrider\",\n 66: \"Chen\",\n 67: \"Spectre\",\n 68: \"Ancient Apparition\",\n 69: \"Doom Bringer\",\n 70: \"Ursa\",\n 71: \"Spirit Breaker\",\n 72: \"Gyrocopter\",\n 73: \"Alchemist\",\n 74: \"Invoker\",\n 75: \"Silencer\",\n 76: \"Obsidian Destroyer\",\n 77: \"Lycan\",\n 78: \"Brewmaster\",\n 79: \"Shadow Demon\",\n 80: \"Lone Druid\",\n 81: \"Chaos Knight\",\n 82: \"Meepo\",\n 83: \"Treant\",\n 84: \"Ogre Magi\",\n 85: \"Undying\",\n 86: \"Rubick\",\n 87: \"Disruptor\",\n 88: \"Nyx Assassin\",\n 89: \"Naga Siren\",\n 90: \"Keeper of the Light\",\n 91: \"Wisp\",\n 92: \"Visage\",\n 93: \"Slark\",\n 94: \"Medusa\",\n 95: \"Troll Warlord\",\n 96: \"Centaur\",\n 97: \"Magnataur\",\n 98: \"Shredder\",\n 99: \"Bristleback\",\n 100: \"Tusk\",\n 101: \"Skywrath Mage\",\n 102: \"Abaddon\",\n 103: \"Elder Titan\",\n 104: \"Legion Commander\",\n 106: \"Ember Spirit\",\n 107: \"Earth Spirit\",\n 108: \"Abyssal Underlord\",\n}"
},
{
"alpha_fraction": 0.6494269967079163,
"alphanum_fraction": 0.664707601070404,
"avg_line_length": 24.593984603881836,
"blob_id": "01a06dc1bc40232849688ac3dda5c13423512ec0",
"content_id": "4c4bbd84246829fa74e1a6a64b1301a599007952",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3403,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 133,
"path": "/tests/test_player.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "import unittest\n\nimport tarrasque\n\nfrom .utils import *\n\nclass PlayerTestCase(unittest.TestCase):\n REPLAY_FILE = \"./demo/PL.dem\"\n\n @classmethod\n def setUpClass(cls):\n cls.replay = tarrasque.StreamBinding.from_file(cls.REPLAY_FILE,\n start_tick=10000)\n cls.player = cls.replay.players[4]\n\n def test_players_have_names(self):\n for player in self.replay.players:\n neq_(player.name, None)\n\n def test_player_have_hero(self):\n for player in self.replay.players:\n neq_(player.hero, None)\n\n def test_players_reliable_gold(self):\n for player in self.replay.players:\n gt_(player.reliable_gold, -1)\n\n def test_players_unreliable_gold(self):\n for player in self.replay.players:\n gt_(player.unreliable_gold, -1)\n\n def test_players_earned_gold(self):\n for player in self.replay.players:\n gt_(player.earned_gold, -1)\n\n def test_players_steam_id(self):\n for player in self.replay.players:\n neq_(player.steam_id, None)\n\n def test_players_team(self):\n for player in self.replay.players:\n in_(player.team, [\"radiant\", \"dire\"])\n\n def test_players_last_hits(self):\n for player in self.replay.players:\n gt_(player.last_hits, -1)\n\n def test_players_denies(self):\n for player in self.replay.players:\n gt_(player.denies, -1)\n\n def test_players_kills(self):\n for player in self.replay.players:\n gt_(player.kills, -1)\n\n def test_players_deaths(self):\n for player in self.replay.players:\n gt_(player.deaths, -1)\n\n def test_players_assists(self):\n for player in self.replay.players:\n gt_(player.assists, -1)\n\n def test_players_streak(self):\n for player in self.replay.players:\n gt_(player.streak, -1)\n\n def test_players_buyback_cooldown_time(self):\n for player in self.replay.players:\n gt_(player.buyback_cooldown_time, -1)\n\n def test_players_last_buyback_time(self):\n for player in self.replay.players:\n gt_(player.last_buyback_time, -1)\n\n def test_players_has_buyback(self):\n for player in self.replay.players:\n eq_(player.has_buyback, True)\n\n def test_players_total_gold(self):\n for player in self.replay.players:\n eq_(player.total_gold, player.unreliable_gold + player.reliable_gold)\n\n def test_have_name(self):\n eq_(self.player.name, \"Gyozmo\")\n\n def test_have_hero(self):\n neq_(self.player.hero, None)\n\n def test_reliable_gold(self):\n eq_(self.player.reliable_gold, 0)\n\n def test_unreliable_gold(self):\n eq_(self.player.unreliable_gold, 154)\n\n def test_earned_gold(self):\n eq_(self.player.earned_gold, 150)\n\n def test_steam_id(self):\n eq_(self.player.steam_id, 2865091096608769)\n\n def test_team(self):\n eq_(self.player.team, \"radiant\")\n\n def test_last_hits(self):\n eq_(self.player.last_hits, 1)\n\n def test_denies(self):\n eq_(self.player.denies, 0)\n\n def test_kills(self):\n eq_(self.player.kills, 0)\n\n def test_deaths(self):\n eq_(self.player.deaths, 0)\n\n def test_assists(self):\n eq_(self.player.assists, 0)\n\n def test_streak(self):\n eq_(self.player.streak, 0)\n\n def test_buyback_cooldown_time(self):\n eq_(self.player.buyback_cooldown_time, 0.0)\n\n def test_last_buyback_time(self):\n eq_(self.player.last_buyback_time, 0)\n\n def test_has_buyback(self):\n eq_(self.player.has_buyback, True)\n\n def test_total_gold(self):\n eq_(self.player.total_gold, 154)"
},
{
"alpha_fraction": 0.6274976134300232,
"alphanum_fraction": 0.6303520202636719,
"avg_line_length": 23.729412078857422,
"blob_id": "c0418b05eb354d7a8a3f71d9da3ca8d68be41674",
"content_id": "685113ae389ac9657ad9dfe008d1940c8a0c1716",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2102,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 85,
"path": "/tarrasque/basenpc.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .entity import *\nfrom .properties import *\nfrom .consts import *\n\n@register_entity(\"DT_DOTA_BaseNPC\")\nclass BaseNPC(DotaEntity):\n \"\"\"\n A base class for all NPCs, even ones controllable by players.\n \"\"\"\n\n position = PositionProperty(\"DT_DOTA_BaseNPC\")\n \"\"\"\n The (x, y) position of the NPC in Dota2 map coordinates\n \"\"\"\n\n life_state = Property(\"DT_DOTA_BaseNPC\", \"m_lifeState\")\\\n .apply(MapTrans(LIFE_STATE_VALUES))\n \"\"\"\n The state of the NPC's life (unsurprisingly). Possible values are:\n\n * ``\"alive\"`` - The hero is alive\n * ``\"dying\"`` - The hero is in their death animation\n * ``\"dead\"`` - The hero is dead\n * ``\"respawnable\"`` - The hero can be respawned\n * ``\"discardbody\"`` - The hero's body can be discarded\n\n ``\"respawnable\"`` and ``\"discardbody\"`` shouldn't occur in a Dota2 replay\n \"\"\"\n\n level = Property(\"DT_DOTA_BaseNPC\", \"m_iCurrentLevel\")\n \"\"\"\n The NPC's level. See :attr:`Hero.ability_points` for unspent level up\n ability points.\n \"\"\"\n\n health = Property(\"DT_DOTA_BaseNPC\", \"m_iHealth\")\n \"\"\"\n The NPC's current HP.\n \"\"\"\n\n max_health = Property(\"DT_DOTA_BaseNPC\", \"m_iMaxHealth\")\n \"\"\"\n The NPC's maximum HP.\n \"\"\"\n\n health_regen = Property(\"DT_DOTA_BaseNPC\", \"m_flHealthThinkRegen\")\n \"\"\"\n The NPC's health regen per second.\n \"\"\"\n\n mana = Property(\"DT_DOTA_BaseNPC\", \"m_flMana\")\n \"\"\"\n The NPC's current mana.\n \"\"\"\n\n max_mana = Property(\"DT_DOTA_BaseNPC\", \"m_flMaxMana\")\n \"\"\"\n The NPC's maximum mana.\n \"\"\"\n\n mana_regen = Property(\"DT_DOTA_BaseNPC\", \"m_flManaThinkRegen\")\n \"\"\"\n The NPC's mana regen per second.\n \"\"\"\n\n abilities = ArrayProperty(\"DT_DOTA_BaseNPC\", \"m_hAbilities\", array_length=16)\\\n .filter(lambda h: h != NEGATIVE)\\\n .map(EntityTrans())\n \"\"\"\n A list of the NPC's abilities.\n \"\"\"\n \n inventory = ArrayProperty('DT_DOTA_UnitInventory', 'm_hItems', array_length = 14)\\\n .filter(lambda h: h != NEGATIVE)\\\n .map(EntityTrans())\n \"\"\"\n A list of the NPC's items.\n \"\"\"\n \n @property\n def is_alive(self):\n \"\"\"\n A boolean to test if the NPC is alive or not.\n \"\"\"\n return self.life_state == \"alive\"\n"
},
{
"alpha_fraction": 0.6242170929908752,
"alphanum_fraction": 0.6268267035484314,
"avg_line_length": 28.9375,
"blob_id": "976b88a719587d0c540b74c65d515718c189b074",
"content_id": "33838a31818b7a91b589fe0abffc57fa5033eb7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1916,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 64,
"path": "/tarrasque/ability.py",
"repo_name": "zehgota/Tarrasque",
"src_encoding": "UTF-8",
"text": "from .entity import *\nfrom .properties import *\nfrom .consts import *\n\n@register_entity(\"DT_DOTABaseAbility\")\n@register_entity_wildcard(\"DT_DOTA_Ability_*\")\n@register_entity_wildcard(\"DT_DOTA_Unit_Ability_*\")\n@register_entity_wildcard(\"DT_Ability_*\")\nclass BaseAbility(DotaEntity):\n \"\"\"\n Base class for all abilities. Currently does not delegate to other classes,\n but can do so.\n \"\"\"\n\n level = Property(\"DT_DOTABaseAbility\", \"m_iLevel\")\n \"\"\"\n The number of times the ability has been leveled up.\n \"\"\"\n\n off_cooldown_time = Property(\"DT_DOTABaseAbility\", \"m_fCooldown\")\n \"\"\"\n The time the ability comes off cooldown. Note that this does not reset\n once that time has been passed.\n \"\"\"\n\n @property\n def is_on_cooldown(self):\n \"\"\"\n Uses :attr:`off_cooldown_time` and :attr:`GameInfo.game_time` to\n calculate if the ability is on cooldown or not.\n \"\"\"\n current_time = self.stream_binding.info.game_time\n return current_time <= self.off_cooldown_time\n\n cooldown_length = Property(\"DT_DOTABaseAbility\", \"m_flCooldownLength\")\n \"\"\"\n How long the goes on cooldown for every time it is cast.\n \"\"\"\n\n mana_cost = Property(\"DT_DOTABaseAbility\", \"m_iManaCost\")\n \"\"\"\n The mana cost of the spell\n \"\"\"\n\n cast_range = Property(\"DT_DOTABaseAbility\", \"m_iCastRange\")\n \"\"\"\n The distance from the hero's position that this spell can be cast/targeted\n at.\n \"\"\"\n\n @property\n def is_ultimate(self):\n \"\"\"\n Use's the abilities position in :attr:`Hero.abilities` to figure out if\n this is the ultimate ability.\n\n TODO: Check this is reliable\n \"\"\"\n hero = self.owner\n index = -1\n for i, ability in enumerate(hero.abilities):\n if ability == self:\n index = i\n return index == len(hero.abilities) - 2 # -1 for 0, -1 for stats\n"
}
] | 40 |
CodeHunta/User-Data-Validation | https://github.com/CodeHunta/User-Data-Validation | 0b871c91213c48ba04c4be328a4cdad38293ff69 | 5ef6aed5870d34df07a6c84af558aea67ee498a9 | ada569729ec942c912664965ab2c341ba94dd91c | refs/heads/master | 2022-04-17T00:18:24.876973 | 2020-04-10T07:54:03 | 2020-04-10T07:54:03 | 254,554,191 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 23,
"blob_id": "b3480cf2562c056ca7a372bb8eea27a5613ec784",
"content_id": "b55bdb1c88cc88b4f756616d0491683a9db437dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 1,
"path": "/README.md",
"repo_name": "CodeHunta/User-Data-Validation",
"src_encoding": "UTF-8",
"text": "## User Data Validation\n"
},
{
"alpha_fraction": 0.6844444274902344,
"alphanum_fraction": 0.6918518543243408,
"avg_line_length": 27.08333396911621,
"blob_id": "fc2f06c6d3c98ed09307c2943e727c032df60697",
"content_id": "f9758e9033fb33bd6c15aad9fba89288e7ee30f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 675,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 24,
"path": "/userData.py",
"repo_name": "CodeHunta/User-Data-Validation",
"src_encoding": "UTF-8",
"text": "import random\nimport string\n\nuserData = []\ncontainer = []\n\n\nfirstName = input(\"Enter your first name: \")\nlastName = input(\"Enter your last name: \")\nemailAdd = input(\"Enter your email address: \")\n#userData = {'firstname':firstName, 'lastname':lastName, 'email':emailAdd, 'password':userPwd}\n\ndef main():\n\n def randomString(stringLength = 5):\n\n letters = string.ascii_lowercase\n return \" \".join(random.sample(letters,stringLength))\n \n userPwd = (firstName[0:2] + lastName[-2: ] + randomString(5))\n \n userData = {'firstname':firstName, 'lastname':lastName, 'email':emailAdd, 'password':userPwd}\n\n print(\"\\n\\nThis is your autogenerated password: \", userPwd)\n\n"
}
] | 2 |
lertsoft/osmnx | https://github.com/lertsoft/osmnx | 657a6c33dadeceb86cf2adbe39974812c45fb8c4 | 78d162da8295e2c1c36d10eed7942d697d5ae855 | a48c9a2073d62013a0880a678349e2a43d122cae | refs/heads/master | 2022-09-02T22:07:58.706682 | 2022-06-16T21:15:54 | 2022-06-16T21:15:54 | 219,793,827 | 0 | 0 | MIT | 2019-11-05T16:38:34 | 2019-11-05T16:38:37 | 2019-11-04T12:55:33 | null | [
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.557692289352417,
"avg_line_length": 16.33333396911621,
"blob_id": "9112fd8da2e9edd25e7db1468cf1cc3fb2c629e5",
"content_id": "adf026484f76740325c725fab7327fd91ecbcd09",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 52,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 3,
"path": "/osmnx/_version.py",
"repo_name": "lertsoft/osmnx",
"src_encoding": "UTF-8",
"text": "\"\"\"OSMnx package version.\"\"\"\n\n__version__ = \"1.2.1\"\n"
},
{
"alpha_fraction": 0.5079365372657776,
"alphanum_fraction": 0.6904761791229248,
"avg_line_length": 13,
"blob_id": "683d95388a0cfa417e01ad0f602c545f8652dc4f",
"content_id": "c71f4aafdd3b94f5a388cfcfac2bf0641018f606",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 126,
"license_type": "permissive",
"max_line_length": 17,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "lertsoft/osmnx",
"src_encoding": "UTF-8",
"text": "geopandas>=0.10\nmatplotlib>=3.5\nnetworkx>=2.8\nnumpy>=1.21\npandas>=1.4\npyproj>=3.3\nrequests>=2.27\nRtree>=1.0\nShapely>=1.8,<2.0\n"
},
{
"alpha_fraction": 0.7120953798294067,
"alphanum_fraction": 0.7120953798294067,
"avg_line_length": 33.52941131591797,
"blob_id": "5b77e0cd6998210122424db31c3af862b3337ed4",
"content_id": "899a906faf6a25ad46376430aae77a03334471ca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 587,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 17,
"path": "/environments/linux/create-environment.sh",
"repo_name": "lertsoft/osmnx",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nset -e\neval \"$(conda shell.bash hook)\"\nconda deactivate\nmamba env remove -n ox --yes\nmamba clean --all --yes --quiet\nmamba create -c conda-forge --strict-channel-priority -n ox --file \"../docker/requirements.txt\" --yes\neval \"$(conda shell.bash hook)\"\nconda activate ox\npip uninstall osmnx --yes\npip install -e ../../.\npython -m ipykernel install --sys-prefix --name ox --display-name \"Python (ox)\"\nmamba clean --all --yes --quiet\nmamba env export -n ox > environment.yml\nmamba list\njupyter kernelspec list\nipython -c \"import osmnx; print('OSMnx version', osmnx.__version__)\"\n"
}
] | 3 |
my-hello-world/Classic-Convolutional-Models | https://github.com/my-hello-world/Classic-Convolutional-Models | fe4e37f40fc3f45ed3133f180859b27f6d01c1f4 | 8443f4d96b30530305d3b26fbdc0f24a856b6259 | 56997a069b8e414f0a6b62b45033f5bfb946678c | refs/heads/master | 2020-04-01T20:55:04.184873 | 2018-08-01T09:33:19 | 2018-08-01T09:33:19 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5234495997428894,
"alphanum_fraction": 0.5490310192108154,
"avg_line_length": 30.913043975830078,
"blob_id": "bcd0edfe54ec80ea41773466363b7eeaf99c53df",
"content_id": "ddb32deb86cf24d363d17422b3feb51b414da521",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5160,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 161,
"path": "/ResNet34/ResNet34.py",
"repo_name": "my-hello-world/Classic-Convolutional-Models",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 31 15:09:31 2018\n\n@author: nouman\n\"\"\"\n\nimport torch \nimport torch.nn as nn\nfrom data_loader import *\nimport gc\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n#loading train, validation and test data\n\ntrain_loader, valid_loader = get_train_valid_loader(data_dir = './data',\n batch_size = 64,\n augment = False,\n random_seed = 1)\n\n\ntest_loader = get_test_loader(data_dir = './data',\n batch_size = 64)\n\n\n#hyperparameters\nlearning_rate = 0.01\nnum_epochs = 10\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride = 1, downsample = None):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU())\n self.conv2 = nn.Sequential(\n nn.Conv2d(out_channels, out_channels, kernel_size = 3, stride = 1, padding = 1),\n nn.BatchNorm2d(out_channels))\n self.downsample = downsample\n self.relu = nn.ReLU()\n self.out_channels = out_channels\n \n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.conv2(out)\n if self.downsample:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes = 10):\n super(ResNet, self).__init__()\n self.inplanes = 64\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size = 7, stride = 2, padding = 3),\n nn.BatchNorm2d(64),\n nn.ReLU())\n self.maxpool = nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1)\n self.layer0 = self._make_layer(block, 64, layers[0], stride = 1)\n self.layer1 = self._make_layer(block, 128, layers[1], stride = 2)\n self.layer2 = self._make_layer(block, 256, layers[2], stride = 2)\n self.layer3 = self._make_layer(block, 512, layers[3], stride = 2)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512, num_classes)\n \n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes:\n \n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes, kernel_size=1, stride=stride),\n nn.BatchNorm2d(planes),\n )\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n \n \n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n x = self.layer0(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\nmodel = ResNet(ResidualBlock, [3, 4, 6, 3]).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay = 0.0001, momentum = 0.9)\n\n# For updating learning rate\ndef update_lr(optimizer, lr): \n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n# Train the model\ntotal_step = len(train_loader)\ncurr_lr = learning_rate\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n \n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n gc.collect()\n if (i+1) % 100 == 0:\n print (\"Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}\"\n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n del images, labels, outputs, loss\n\n # Decay learning rate\n if (epoch+1) % 20 == 0:\n curr_lr /= 3\n update_lr(optimizer, curr_lr)\n\n# Test the model\nmodel.eval()\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))\n del images, labels, outputs\n\n# Save the model checkpoint\ntorch.save(model.state_dict(), 'resnet32.ckpt')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5076030492782593,
"alphanum_fraction": 0.5436174273490906,
"avg_line_length": 30.81528663635254,
"blob_id": "c93bad1cf516e697265335d08baf1f104e831ebb",
"content_id": "fbe0cdaaf8680287df2047f5b228aca6f0ea6cb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4998,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 157,
"path": "/AlexNet/main.py",
"repo_name": "my-hello-world/Classic-Convolutional-Models",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: nouman\n\"\"\"\n\nfrom data_loader import *\nimport torch\nimport torch.nn as nn\nimport gc\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Hyper-parameters \n\nnum_classes = 10\nnum_epochs = 20\nbatch_size = 64\nlearning_rate = 0.005\n\n# CIFAR10 dataset \ntrain_loader, valid_loader = get_train_valid_loader(data_dir = './data',\n batch_size = batch_size,\n augment = False,\n random_seed = 1)\n\ntest_loader = get_test_loader(data_dir = './data',\n batch_size = batch_size)\n\nclass ConvNet(nn.Module):\n def __init__(self, num_classes=10):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),\n nn.BatchNorm2d(96),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 3, stride = 2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 3, stride = 2))\n self.layer3 = nn.Sequential(\n nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(384),\n nn.ReLU())\n self.layer4 = nn.Sequential(\n nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(384),\n nn.ReLU())\n self.layer5 = nn.Sequential(\n nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 3, stride = 2))\n self.fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(9216, 4096),\n nn.ReLU())\n self.fc1 = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU())\n self.fc2= nn.Sequential(\n nn.Linear(4096, num_classes))\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n \n\nmodel = ConvNet(num_classes)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay = 0.005, momentum = 0.9) \n\n# Train the model\n\ntotal_step = len(train_loader)\n\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader): \n # Move tensors to the configured device\n images = images.to(device)\n labels = labels.to(device)\n \n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n gc.collect()\n if (i+1) % 10 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n #loss_history.append(loss)\n del images, labels, outputs, loss\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in valid_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n del images, labels, outputs\n \n print('Accuracy of the network on the {} validation images: {} %'.format(5000, 100 * correct / total)) \n\n# Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\n\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n del images, labels, outputs\n\n print('Accuracy of the network on the {} test images: {} %'.format(10000, 100 * correct / total)) \n\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in train_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n print('Accuracy of the network on the {} train images: {} %'.format(50000, 100 * correct / total))\n\n\ntorch.save(model, 'filename.pt')\n\n\n\n"
},
{
"alpha_fraction": 0.48125699162483215,
"alphanum_fraction": 0.5297495722770691,
"avg_line_length": 33.07065200805664,
"blob_id": "6d5633001a9c85036f90986a9824b4841c03a522",
"content_id": "248243246d3970d7a63af26ab24d101fde559add",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6269,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 184,
"path": "/VGG16/main.py",
"repo_name": "my-hello-world/Classic-Convolutional-Models",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: nouman\n\"\"\"\n\n\nfrom data_loader import *\nimport torch\nimport torch.nn as nn\n\nimport gc\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Hyper-parameters \nnum_classes = 10\nnum_epochs = 5\nbatch_size = 64\nlearning_rate = 0.01\n\n# CIFAR10 dataset \ntrain_loader, valid_loader = get_train_valid_loader(data_dir = './data',\n batch_size = batch_size,\n augment = False,\n random_seed = 1)\n\ntest_loader = get_test_loader(data_dir = './data',\n batch_size = batch_size)\n\nclass ConvNet(nn.Module):\n def __init__(self, num_classes=10):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU())\n self.layer2 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(), \n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU())\n self.layer4 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer5 = nn.Sequential(\n nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU())\n self.layer6 = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU())\n self.layer7 = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer8 = nn.Sequential(\n nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer9 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer10 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer11 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer12 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer13 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(7*7*512, 4096),\n nn.ReLU())\n self.fc1 = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU())\n self.fc2= nn.Sequential(\n nn.Linear(4096, num_classes))\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.layer6(out)\n out = self.layer7(out)\n out = self.layer8(out)\n out = self.layer9(out)\n out = self.layer10(out)\n out = self.layer11(out)\n out = self.layer12(out)\n out = self.layer13(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n\n\nmodel = ConvNet(num_classes).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay = 0.0005, momentum = 0.9) \n\n# Train the model\ntotal_step = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader): \n # Move tensors to the configured device\n images = images.to(device)\n labels = labels.to(device)\n \n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n gc.collect()\n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n del images, labels, outputs, loss\n \n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in valid_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n del images, labels, outputs\n print('Accuracy of the network on the {} test images: {} %'.format(5000, 100 * correct / total)) \n \n# Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\nwith torch.cuda.device(0):\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n del images, labels, outputs\n \n print('Accuracy of the network on the {} test images: {} %'.format(100 * correct / total)) \n \n \ntorch.save(model, 'filename1.pt')\n"
},
{
"alpha_fraction": 0.4715272784233093,
"alphanum_fraction": 0.49773648381233215,
"avg_line_length": 33.65289306640625,
"blob_id": "43917fc36126badc080eab2eb88082d29014ac0c",
"content_id": "f4318d178e3212dcbc287e44152eda09c4766903",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4197,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 121,
"path": "/LeNet5/main.py",
"repo_name": "my-hello-world/Classic-Convolutional-Models",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: nouman\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\n\nbatch_size = 64\nnum_classes = 10\nlearning_rate = 0.001\nnum_epochs = 1\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#Loading the dataset and preprocessing\ntrain_dataset = torchvision.datasets.MNIST(root = './data',\n train = True,\n transform = transforms.Compose([\n transforms.Resize((32,32)),\n transforms.ToTensor(),\n transforms.Normalize(mean = (0.1307,), std = (0.3081,))]),\n download = True)\n\n\ntest_dataset = torchvision.datasets.MNIST(root = './data',\n train = False,\n transform = transforms.Compose([\n transforms.Resize((32,32)),\n transforms.ToTensor(),\n transforms.Normalize(mean = (0.1325,), std = (0.3105,))]),)\n\n\ntrain_loader = torch.utils.data.DataLoader(dataset = train_dataset,\n batch_size = batch_size,\n shuffle = True)\n\n\ntest_loader = torch.utils.data.DataLoader(dataset = test_dataset,\n batch_size = batch_size,\n shuffle = True)\n\n#Defining the convolutional neural network\nclass ConvNeuralNet(nn.Module):\n def __init__(self, num_classes):\n super(ConvNeuralNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.fc = nn.Linear(400, 120)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(84, num_classes)\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.relu(out)\n out = self.fc1(out)\n out = self.relu1(out)\n out = self.fc2(out)\n return out\n \n \nmodel = ConvNeuralNet( num_classes).to(device)\n\n#Defining cost and optimizer\ncost = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n\n \ntotal_step = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader): \n images = images.to(device)\n labels = labels.to(device)\n #Forward pass\n outputs = model(images)\n loss = cost(outputs, labels)\n \t\n \t# Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \t\t\n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n \t\t .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n# Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\n \nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))\n\t \n\n"
}
] | 4 |
MicheleMorelli/Stats_recap | https://github.com/MicheleMorelli/Stats_recap | 61130bc016314b5d5a0af1934207f0920fde813f | 9e6009dd38ee2ac47db2fc0998dbec80b5398965 | 0114eced2d81f3097ad41cb6a137a75a15f6d220 | refs/heads/master | 2020-04-16T07:49:13.823240 | 2019-01-13T16:46:06 | 2019-01-13T16:46:06 | 165,400,671 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5772151947021484,
"alphanum_fraction": 0.5772151947021484,
"avg_line_length": 24.483871459960938,
"blob_id": "ad32eba24c051880eac2a56574cb44f8986a50b8",
"content_id": "83e601316f1e8a513ef0f9995491be6a4c4ad255",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 790,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 31,
"path": "/lib/helper.py",
"repo_name": "MicheleMorelli/Stats_recap",
"src_encoding": "UTF-8",
"text": "\"\"\"Helper methods for the Stats class\"\"\"\n\n\ndef get_numeric_series():\n \"\"\"Creates a list of numbers (float)\n Returns:\n [nums]: a list of numbers\n \"\"\"\n nums = []\n inp = get_number()\n while (inp):\n nums.append(inp)\n inp = get_number()\n return nums\n\ndef get_number():\n \"\"\"Reads a number from the standard input and returns it\n Returns:\n num (float): an float\n num (nil): if \"\" is passed via the input\n Raises:\n TypeError, ValueError: if anything other than a num or \"\" is entered \n as input\n \"\"\"\n num = input(\"Please enter a number:\")\n if not num: return \n try:\n return float(num)\n except (TypeError,ValueError) as e:\n print(\"Not a Number. Please enter a number.\")\n get_number()\n"
},
{
"alpha_fraction": 0.6687783002853394,
"alphanum_fraction": 0.6968325972557068,
"avg_line_length": 24.697673797607422,
"blob_id": "ffb9f338a4974686263f25026139a4c0ece98ec8",
"content_id": "c120e77b924117842d2e24b07fb09fb33cf649aa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1165,
"license_type": "permissive",
"max_line_length": 131,
"num_lines": 43,
"path": "/README.md",
"repo_name": "MicheleMorelli/Stats_recap",
"src_encoding": "UTF-8",
"text": "# Basic Statistical Functions #\n### by Michele Morelli ###\n.\\\n├── lib\\\n│ ├── helper.py\\\n│ ├── stats.py ---> (This is the main class!)\\\n│ └── symbols.py ---> (A small dictionary of Symbols)\\\n├── LICENCE.txt\\\n├── README.md\\\n└── test\\\n └── test.py ---> (all the pytests are here)\\\n\nThe new university term is approaching, and I found myself refreshing some statistics for the upcoming modules.\n\nI thought it would be a good idea to write down a couple of simple Python methods to help me refresh the basics (and the symbols!).\n\nJust to make sure that everything is working as it should, I used numpy's methods to double-check my functions in the pytests.\n\nGiven a numeric dataset, currently it shows the following functions:\n- Arithmetic mean\n- Median\n- Variance\n- Standard Deviation\n- Summation\n- Double Summation\n- Min\n- Max\n- Range\n- Mode\n\nTo run it:\n\n $python3 lib/stats.py [optional: put a list of number as arguments]\n\nSo for example:\n\n $python3 lib/stats.py 23 32 43 2342 342 34 234 234 2342 34 34\n\nFor the tests I used pytest:\n\n $pytest test/test.py\n\nEnjoy!\n"
},
{
"alpha_fraction": 0.49603360891342163,
"alphanum_fraction": 0.5968268513679504,
"avg_line_length": 22.043010711669922,
"blob_id": "3899291de0f2dc4060738667de35dff30ae03b3e",
"content_id": "75c99333551edc7bedbfecbce4f46740187b9f2f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2143,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 93,
"path": "/test/test.py",
"repo_name": "MicheleMorelli/Stats_recap",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nsys.path.append(os.path.join(sys.path[0],'..','lib'))\nfrom stats import *\nimport numpy as np\nimport scipy.stats\nimport pytest\n\n\"\"\"\nUsing numpy as an initial benchmark for testing\n\"\"\"\n\ndef test_mean():\n s = Stats(*range(1,10))\n assert s.func_mean() == np.mean(list(range(1,10))) \n\n\ndef test_mean2():\n s = Stats(*range(1,1010))\n assert s.func_mean() == np.mean(list(range(1,1010))) \n\ndef test_median():\n s = Stats(*range(1,10))\n assert s.func_median() == np.median(list(range(1,10))) \n\n\ndef test_median2():\n s = Stats(*range(1,1010))\n assert s.func_median() == np.median(list(range(1,1010))) \n\ndef test_variance():\n s = Stats(*range(1,1010))\n assert s.func_variance() == np.var(list(range(1,1010))) \n\n\ndef test_variance2():\n s = Stats(*range(1,100))\n assert s.func_variance() == np.var(list(range(1,100))) \n\n\ndef test_std():\n s = Stats(*range(1,1010))\n assert s.func_standard_deviation() == np.std(list(range(1,1010))) \n\n\ndef test_std2():\n s = Stats(*range(1,100))\n assert s.func_standard_deviation() == np.std(list(range(1,100))) \n\n\ndef test_large():\n s = Stats(*range(1,100))\n assert s.func_large() == max(list(range(1,100)))\n\n\ndef test_small():\n s = Stats(*range(1,100))\n assert s.func_small() == min(list(range(1,100)))\n\n\ndef test_range():\n s = Stats(*range(1,100))\n assert s.func_range() == np.ptp(list(range(1,100)))\n\n\ndef test_mode():\n a = [1,2,34,4,2,3,21,1,23,32,2,31,2,2432,234,2]\n s = Stats(*a)\n assert s.func_mode() == scipy.stats.mode(np.array(a))[0]\n\n\ndef test_summation():\n n = [1,2,34,4,2,3,21,1,23,32,2,31,2,2432,234,2] \n s = Stats(*n)\n assert s.func_summation() == sum(n)\n\n\ndef test_double_summation():\n n = [456,456,456] \n s = Stats(*n)\n assert s.func_double_summation() == 4104\n\n\ndef test_kurt():\n a = [1,2,34,4,2,3,21,1,23,32,2,31,2,2432,234,2]\n s = Stats(*a)\n assert pytest.approx(s.func_kurtosis(), .1) == scipy.stats.kurtosis(np.array(a))\n\n\ndef test_skew():\n a = [1,2,34,4,2,3,21,1,23,32,2,31,2,2432,234,2]\n s = Stats(*a)\n assert s.func_skewness() == scipy.stats.skew(np.array(a))\n"
},
{
"alpha_fraction": 0.38447320461273193,
"alphanum_fraction": 0.44731977581977844,
"avg_line_length": 27.473684310913086,
"blob_id": "c87a4a641bed7e743e447156338d047632cd8f25",
"content_id": "1ebd0ca81f80cea7a3dfc91f0478b6dae00c1c40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 19,
"path": "/lib/symbols.py",
"repo_name": "MicheleMorelli/Stats_recap",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSmall library of mathematical/ statistical symbols (as a dictionary)\n\"\"\"\n\ndef create_dictionary():\n return {\n \"variance\": \"\\u03C3\\u00B2\",\n \"standard_deviation\": \"\\u03C3\",\n \"mean\": \"\\u03BC\",\n \"median\": \"M\",\n \"mode\": \"Mo\",\n \"large\":\"max\",\n \"small\":\"min\",\n \"range\": \"[min,max]\",\n \"summation\":\"\\u2211\",\n \"double_summation\":\"\\u2211\"*2,\n \"kurtosis\":\"\\u03B2\\u2082\",\n \"skewness\":\"\\u03B3\\u2081\",\n }\n"
},
{
"alpha_fraction": 0.490525484085083,
"alphanum_fraction": 0.4954092502593994,
"avg_line_length": 26.228723526000977,
"blob_id": "0459bd1fd4f82582fae266b30bcc1abd7a3655ec",
"content_id": "e1dec18a34214bd223ace1ecb36c5f6ad5f706bb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5119,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 188,
"path": "/lib/stats.py",
"repo_name": "MicheleMorelli/Stats_recap",
"src_encoding": "UTF-8",
"text": "\"\"\"Playing with some basic stats functions to refresh them!\"\"\"\n\nimport re, sys, math\n\nimport symbols\nfrom helper import *\n\nclass Stats:\n # UTF-compatible math symbols as a reminder for myself:-)\n symbol = symbols.create_dictionary()\n \n def __init__(self,*args): \n \"\"\"Stats class constructor\n Takes a series of numeric values from the std input and puts them in\n a list n\n \"\"\"\n if (len(args)):\n self.n = args\n elif (len(sys.argv) > 1):\n self.n = [float(x) for x in sys.argv[1:]]\n else:\n self.n = get_numeric_series()\n # print(\"Stats Class initialised!\")\n\n\n def func_mean(self):\n \"\"\"Calculates the arithmetic mean of the n list of numbers\n Returns:\n mean (float): the mean value\n \"\"\"\n return sum(self.n) / len(self.n)\n \n \n def func_median(self):\n \"\"\"Calculates the median of the n list of numbers\n Returns:\n median (float): the median value of the series\n \"\"\"\n series = sorted(self.n)\n if (len(series) == 1):\n return series[0]\n elif (len(series) % 2):\n return series[(len(series) - 1) // 2]\n else:\n mid = len(series) // 2 \n return Stats(*series[mid - 1:mid + 1]).func_mean()\n\n\n def func_variance(self):\n \"\"\"Calculates the variance of the n list of numbers\n Returns:\n variance (float): the variance value\n \"\"\"\n return sum(map(lambda x: (x - self.func_mean())**2, self.n)) \\\n / len(self.n)\n \n\n def func_standard_deviation(self):\n \"\"\"Calculates the standard_deviation of the n list of numbers\n Returns:\n standard_deviation (float): the std. deviation value\n \"\"\"\n return math.sqrt(self.func_variance())\n \n \n def func_large(self):\n \"\"\"\n Returns:\n large (float): the largest value in the dataset\n \"\"\"\n return max(self.n)\n\n\n def func_small(self):\n \"\"\"\n Returns:0\n small(float): the smallest value in the dataset\n \"\"\"\n return min(self.n)\n\n\n def func_range(self):\n \"\"\"\n Returns:\n range (float): the range between min and max\n \"\"\"\n return self.func_large() - self.func_small()\n\n\n def func_mode(self):\n \"\"\"\n Returns:\n mode (float): the single elements that recurs the most in the \n dataset\n \"\"\"\n d = {}\n for i in self.n:\n d[i] = d.get(i, 0) + 1\n maxi = 0\n for i in d:\n if (d[i] > maxi):\n maxi = d[i]\n mode_lst = list(filter(lambda x: d[x] == maxi, d.keys()))\n return mode_lst[0] if len(mode_lst) == 1 else \"N/A\"\n\n \n def func_summation(self):\n \"\"\"\n Returns:\n summation (float): the summation of all the values in the dataset\n \"\"\"\n return sum((self.n))\n\n\n def func_double_summation(self):\n \"\"\"\n Returns:\n d_summation (float): the double summation of all the values in \n the dataset\n \"\"\"\n return self.func_summation() * len(self.n)\n\n \n def func_kurtosis(self):\n \"\"\"\n Returns:\n kurtosis (float): the peakedness of the dataset's distribution\n \"\"\"\n kurtosis = 0\n for i in self.n:\n partial_sum = i - self.func_mean()\n partial_sum /= self.func_standard_deviation()\n partial_sum **= 4\n kurtosis += partial_sum\n kurtosis /= len(self.n)\n kurtosis -= 3\n return kurtosis\n\n\n def func_skewness(self):\n \"\"\"\n Returns:\n skewness (float): the amount of asymmetry of distribution\n of the dataset\n \"\"\"\n skewness = 0\n for i in self.n:\n partial_sum = i - self.func_mean()\n partial_sum /= self.func_standard_deviation()\n partial_sum **= 3\n skewness += partial_sum\n skewness /= len(self.n)\n return skewness\n\n\n\n def print_values(self):\n \"\"\"\n Prints the values of all the stats functions for the numeric list n\n \"\"\"\n div = \"\\n\" + (\"*\" * 60) + \"\\n\"\n print( div + \"Basic Statistical Functions for numeric series {}\"\\\n .format(self.n).upper() + div)\n for attr in dir(self):\n if (not re.match(r'^func_*', attr )): continue\n func_name = re.sub(r'^func_',\"\", attr)\n sym = Stats.symbol[func_name] \n func_name = re.sub(r'_', \" \",func_name) # for printing\n value = getattr(self, attr)()\n value = \"{:.2f}\".format(value) if isinstance(value,float)\\\n else str(value)\n\n print(\"{} (symbol: {}): {}\"\\\n .format(func_name.capitalize(), \\\n sym,\\\n value\n )\n )\n print()\n\n \ndef main():\n s = Stats()\n s.print_values()\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 5 |
s1s1ty/ConwayGameOfLife | https://github.com/s1s1ty/ConwayGameOfLife | f81df5e54988ce5df60338890fe3c98904b3aeda | 89f3014755a1e56777ee236602edc71eb89751a6 | c27c8d8fbf996722d78b6713dbd807610b8192d8 | refs/heads/master | 2021-08-23T08:00:57.399149 | 2017-12-04T07:29:56 | 2017-12-04T07:29:56 | 112,950,753 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6071428656578064,
"alphanum_fraction": 0.622710645198822,
"avg_line_length": 25.901233673095703,
"blob_id": "109a815533df2da73af5995e637085223d4ca0d5",
"content_id": "4703a8c359993209e8472381a648efb0cc90efbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2184,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 81,
"path": "/gridAPI/views.py",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom rest_framework.views import APIView\nfrom rest_framework import generics\nfrom rest_framework.response import Response\n\nfrom .models import Grid\n\nfrom .serializers import GridSerializer\nfrom .conveygame import ConveyGameLife\n\n\nclass GridList(generics.ListCreateAPIView):\n\t\"\"\"\n\tRetrieve, and Create a grid instance.\n\t\"\"\"\n\n\tqueryset = Grid.objects.all()\n\tserializer_class = GridSerializer\n\n\tdef post(self, request):\n\t\tdata = request.data\n\t\tserializer = GridSerializer(data=data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=201)\n\t\treturn Response(serializer.errors, status=400)\n\n\nclass GridDetail(APIView):\n \"\"\"\n Retrieve, update or delete a snippet instance.\n \"\"\"\n def get_object(self, pk):\n try:\n return Grid.objects.get(pk=pk)\n except Grid.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n \tgrid = self.get_object(pk)\n \t\n \tif request.GET.get('after'):\n \t\tall_age = list(request.GET.get('after'))\n \t\t\n \t\tdata_list = []\n \t\t# keep this grid default bcz need to know how grid data are provided\n \t\tre = [[1,1,1],[0,1,0],[1,0,0]]\n \t\tmod = [[1,1,1],[0,1,0],[1,0,0]]\n \t\tfor index in all_age:\n \t\t\tif index!=',':\n \t\t\t\tv=re\n\t\t \t\tobj = ConveyGameLife(v,grid.x,grid.y,mod)\n\t\t \t\tre = obj.state_check()\n\t\t \t\tmod = list(re)\n\t\t \t\tprint mod\n\t\t \t\tdata_list.append({\n\t\t \t\t\t'age': index,\n\t\t \t\t\t'grid': list(mod)\n\t\t \t\t})\n\n\t \tre_data = {\n\t \t\t'x': grid.x,\n\t \t\t'y': grid.y,\n\t \t\t'data': data_list\n\t \t}\n\t \treturn Response(re_data)\n\n serializer = GridSerializer(grid)\n return Response(serializer.data)\n\n def patch(self, request, pk):\n grid = self.get_object(pk)\n serializer = GridSerializer(grid, data=request.data, partial=True) # set partial=True to update a data partially\n if serializer.is_valid():\n serializer.save()\n return JsonReponse(code=201, data=serializer.data)\n return JsonResponse(code=400, data=\"wrong parameters\")\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6961326003074646,
"alphanum_fraction": 0.6961326003074646,
"avg_line_length": 19.22222137451172,
"blob_id": "aeb9ca743bf945bf6470f364af3deb23ea1ea046",
"content_id": "52142037efad67c18b312b995afb81f4d49a6017",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/gridAPI/serializers.py",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\n\nfrom .models import Grid\n\n\nclass GridSerializer(serializers.ModelSerializer):\n class Meta:\n model = Grid\n fields = '__all__'"
},
{
"alpha_fraction": 0.5471698045730591,
"alphanum_fraction": 0.7547169923782349,
"avg_line_length": 16.66666603088379,
"blob_id": "829033c087e800eb907b38f7afdd24e2c92e34f3",
"content_id": "a9c929e13654bfe61cc5e5627384d4d6f5594c36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "Django==1.11\ndjangorestframework==3.7.3\npytz==2017.3\n"
},
{
"alpha_fraction": 0.7413793206214905,
"alphanum_fraction": 0.7413793206214905,
"avg_line_length": 28,
"blob_id": "87c7da028a9b348bbb9b052f0965b8c1f66bd22d",
"content_id": "b1a154fbc670cef04008e962072919e0444fefdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 2,
"path": "/README.md",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "# Conway-Game-of-Life-\nBuild API of Conway’s Game of Life\n"
},
{
"alpha_fraction": 0.6742424368858337,
"alphanum_fraction": 0.685606062412262,
"avg_line_length": 19.076923370361328,
"blob_id": "d6b6235da256bbb96907626c18df5816ab58df61",
"content_id": "f2580e9d1aa54e82179a20740b0bcc778dd653d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 264,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 13,
"path": "/gridAPI/models.py",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass Grid(models.Model):\n\tx = models.IntegerField(default=0)\n\ty = models.IntegerField(default=0)\n\tdata = models.TextField()\n\n\tdef __str__(self):\n\t\treturn str(data)\n\t\t\n"
},
{
"alpha_fraction": 0.5942857265472412,
"alphanum_fraction": 0.6057142615318298,
"avg_line_length": 20.875,
"blob_id": "f7f0ff932ccf066d3f0673f45da2d0a05e2e8489",
"content_id": "c4b1d1fdad7b84cbcd15b00f40eb3af24d558380",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 8,
"path": "/gridAPI/urls.py",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\n\nfrom .views import *\n\nurlpatterns = [\n url(r'^grids/$', GridList.as_view()),\n url(r'^grids/(?P<pk>[0-9]+)/$', GridDetail.as_view()),\n]\n"
},
{
"alpha_fraction": 0.5446009635925293,
"alphanum_fraction": 0.577464759349823,
"avg_line_length": 20.274999618530273,
"blob_id": "8e5a1e4e198e6ac07920b2780e8f2061709d65bb",
"content_id": "41e99dec99bc1357ed20cac4bb096ecbeb0c4b64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 40,
"path": "/gridAPI/conveygame.py",
"repo_name": "s1s1ty/ConwayGameOfLife",
"src_encoding": "UTF-8",
"text": "import copy\n\n\"\"\"\n\n1 1 1\n0 1 0\n1 0 0\n\n\"\"\"\n\nclass ConveyGameLife(object):\n\t\"\"\" Implementation of comway game of life \"\"\"\n\tdef __init__(self, am, row, col, mod):\n\t\tself.am = am\n\t\tself.r = row\n\t\tself.c = col\n\t\t# self.modify_am = copy.copy(am) # not working here\n\t\tself.modify_am = mod # bcz of python deep and sallow copy\n\n\tdef state_check(self):\n\t\tfor i in range(self.r):\n\t\t\tfor j in range(self.c):\n\t\t\t\tlive = 0\n\t\t\t\tfor k in range(-1,2,1):\n\t\t\t\t\tfor kk in range(-1,2,1):\n\t\t\t\t\t\tif(i+k>=0 and i+k<self.r and j+kk>=0 and j+kk<self.c):\n\n\t\t\t\t\t\t\tlive+=self.am[i+k][j+kk] # count neighbouring 1\n\n\t\t\t\tif self.am[i][j]==1:\n\t\t\t\t\tlive-=1 # as same element is not countable\n\t\t\t\t\tif live==2 or live==3:\n\t\t\t\t\t\tself.modify_am[i][j]=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.modify_am[i][j]=0\n\t\t\t\tif self.am[i][j]==0:\n\t\t\t\t\tif live==3:\n\t\t\t\t\t\tself.modify_am[i][j]=1\n\n\t\treturn self.modify_am\n\n"
}
] | 7 |
toutou826/FindProfRateBU | https://github.com/toutou826/FindProfRateBU | 0228c0db4d5ce6ed1d354e98b66c8a32a3f0e5ed | 21f489f94e8215a92f410053fb9c1f7cb88ff90d | e97b38a8c2f5ca7ec79fad5ee63294df554f4666 | refs/heads/master | 2020-09-06T22:46:31.994895 | 2019-11-21T18:07:52 | 2019-11-21T18:07:52 | 220,579,330 | 1 | 0 | null | 2019-11-09T02:20:34 | 2019-11-21T16:44:58 | 2019-11-21T17:02:35 | Python | [
{
"alpha_fraction": 0.6897546648979187,
"alphanum_fraction": 0.6897546648979187,
"avg_line_length": 22.89655113220215,
"blob_id": "30d9201b702eddcff80caa279f5b8641571cbecb",
"content_id": "ad8a00a94b9f875ea6dba9e0e810c67c1ce21fe5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 29,
"path": "/venv/lib/python3.7/site-packages/dryscrape/examples/google.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "import dryscrape\nimport sys\n\nif 'linux' in sys.platform:\n # start xvfb in case no X is running. Make sure xvfb \n # is installed, otherwise this won't work!\n dryscrape.start_xvfb()\n\nsearch_term = 'dryscrape'\n\n# set up a web scraping session\nsess = dryscrape.Session(base_url = 'http://google.com')\n\n# we don't need images\nsess.set_attribute('auto_load_images', False)\n\n# visit homepage and search for a term\nsess.visit('/')\nq = sess.at_xpath('//*[@name=\"q\"]')\nq.set(search_term)\nq.form().submit()\n\n# extract all links\nfor link in sess.xpath('//a[@href]'):\n print(link['href'])\n\n# save a screenshot of the web page\nsess.render('google.png')\nprint(\"Screenshot written to 'google.png'\")\n"
},
{
"alpha_fraction": 0.6962174773216248,
"alphanum_fraction": 0.6997635960578918,
"avg_line_length": 24.606060028076172,
"blob_id": "9f4cfe65a5235334c0c8d5d0d4ec60a23516344c",
"content_id": "aa44264b2392798be400b974c9df30428e3e1fa9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 850,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 33,
"path": "/venv/lib/python3.7/site-packages/dryscrape/docs/index.rst",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "Welcome to dryscrape's documentation!\n====================================\n\ndryscrape_ is a lightweight web scraping library for Python. It uses a\nheadless Webkit instance to evaluate Javascript on the visited pages. This\nenables painless scraping of plain web pages as well as Javascript-heavy\n“Web 2.0” applications like\nFacebook.\n\nIt is built on the shoulders of capybara-webkit_'s webkit-server_.\nA big thanks goes to thoughtbot, inc. for building this excellent\npiece of software!\n\n.. _dryscrape: https://github.com/niklasb/dryscrape\n.. _capybara-webkit: https://github.com/thoughtbot/capybara-webkit\n.. _webkit-server: https://github.com/niklasb/webkit-server\n\nContents\n----------\n\n.. toctree::\n :maxdepth: 2\n\n installation\n usage\n apidoc\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n"
},
{
"alpha_fraction": 0.6914103627204895,
"alphanum_fraction": 0.6914103627204895,
"avg_line_length": 33.925926208496094,
"blob_id": "42d5cbab877e557cc08290c8da28514e63695543",
"content_id": "fd7bbe79d6eb4e83804b87095038d30e5fb27dbe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1886,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 54,
"path": "/venv/lib/python3.7/site-packages/dryscrape/dryscrape/session.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "from dryscrape.driver.webkit import Driver as DefaultDriver\n\nfrom itertools import chain\ntry:\n import urlparse\nexcept ImportError:\n import urllib\n urlparse = urllib.parse\n\nclass Session(object):\n \"\"\" A web scraping session based on a driver instance. Implements the proxy\n pattern to pass unresolved method calls to the underlying driver.\n\n If no `driver` is specified, the instance will create an instance of\n ``dryscrape.session.DefaultDriver`` to get a driver instance (defaults to\n ``dryscrape.driver.webkit.Driver``).\n\n If `base_url` is present, relative URLs are completed with this URL base.\n If not, the `get_base_url` method is called on itself to get the base URL. \"\"\"\n\n def __init__(self,\n driver = None,\n base_url = None):\n self.driver = driver or DefaultDriver()\n self.base_url = base_url\n\n # implement proxy pattern\n def __getattr__(self, attr):\n \"\"\" Pass unresolved method calls to underlying driver. \"\"\"\n return getattr(self.driver, attr)\n\n def __dir__(self):\n \"\"\"Allow for `dir` to detect proxied methods from `Driver`.\"\"\"\n dir_chain = chain(dir(type(self)), dir(self.driver))\n return list(set(dir_chain))\n\n def visit(self, url):\n \"\"\" Passes through the URL to the driver after completing it using the\n instance's URL base. \"\"\"\n return self.driver.visit(self.complete_url(url))\n\n def complete_url(self, url):\n \"\"\" Completes a given URL with this instance's URL base. \"\"\"\n if self.base_url:\n return urlparse.urljoin(self.base_url, url)\n else:\n return url\n\n def interact(self, **local):\n \"\"\" Drops the user into an interactive Python session with the ``sess`` variable\n set to the current session instance. If keyword arguments are supplied, these\n names will also be available within the session. \"\"\"\n import code\n code.interact(local=dict(sess=self, **local))\n"
},
{
"alpha_fraction": 0.7262357473373413,
"alphanum_fraction": 0.7262357473373413,
"avg_line_length": 31.875,
"blob_id": "8911efe4fc543b6e7087edd3f218a6de07acf894",
"content_id": "26e02f3ae63ea0b1e4076da76d18c39d7f7b35a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 526,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 16,
"path": "/venv/lib/python3.7/site-packages/dryscrape/docs/usage.rst",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "Usage\n======\n\nFirst demonstration\n------------------------\n\nA code sample tells more than thousand words:\n\n.. literalinclude:: /../examples/google.py\n\nIn this sample, we use dryscrape to do a simple web search on Google.\nNote that we set up a Webkit driver instance here and pass it to a dryscrape\n:py:class:`~dryscrape.session.Session` in the constructor. The session instance\nthen passes every method call it cannot resolve -- such as\n:py:meth:`~webkit_server.CommandsMixin.visit`, in this case -- to the\nunderlying driver.\n"
},
{
"alpha_fraction": 0.6293706297874451,
"alphanum_fraction": 0.6337412595748901,
"avg_line_length": 26.261905670166016,
"blob_id": "26035a4b676646a39b7eceb9cfcf09859abe2ab6",
"content_id": "dcd63345b37eb52b060b801c3e90a076d6a75457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1144,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 42,
"path": "/findRate.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\n\n\n#Find teacher id\ndef findTeacher(aTeacher):\n url = \"https://www.ratemyprofessors.com/search.jsp?query=\"\n\n name = '+'.join(aTeacher.lower().split(' '))\n\n sauce = requests.get(url+name)\n\n soup = BeautifulSoup(sauce.text, \"html.parser\")\n\n #Find the teacher id if the teacher searched is from BU\n link = None\n for li in soup.find_all('li',class_= 'listing PROFESSOR'):\n for uni in li.find('span',class_='sub'):\n if uni.split(',')[0] == \"Boston University\":\n link = li.find('a').get('href')\n break\n return link\n\n\n# a = findTeacher(\"Dora Erdos\")\n# print(a)\n\n#Use the teacher id found to find teacher Rating\ndef findRating(aLink):\n base_url = 'https://www.ratemyprofessors.com'\n\n sauce = requests.get(base_url+aLink)\n\n soup = BeautifulSoup(sauce.text, \"html.parser\")\n\n grades = soup.find_all('div', class_='grade')\n\n result = f'Quality: {grades[0].get_text().strip()}\\nWould Take Again: {grades[1].get_text().strip()}\\nDifficulty: {grades[2].get_text().strip()}\\n'\n\n return result\n\n# findRating(findTeacher(\"Dora Erdos\"))"
},
{
"alpha_fraction": 0.7678018808364868,
"alphanum_fraction": 0.7678018808364868,
"avg_line_length": 23.769229888916016,
"blob_id": "6628760183165d31ed100a66de728a57218bd90a",
"content_id": "9a9b277b6544945c5236fc6b53fde8994dbeec1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 275,
"num_lines": 13,
"path": "/README.md",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "# FindPropRateBU\n\n\nDemo:\n\n\n\n\n\n\n\n\nThis program uses Twilio to send the information of a desired class via Python+Flask. It uses beautifulsoup to scrape the schedule of the desired class from BU class catalog and the rating of the professor for that class from RMP and send it to the receiver as a SMS message.\n\n"
},
{
"alpha_fraction": 0.6330274939537048,
"alphanum_fraction": 0.64449542760849,
"avg_line_length": 35.33333206176758,
"blob_id": "42be3d4cd9aef7d548c8a91fe719d194a7e4556d",
"content_id": "a7977ebca9dcda3b068c455d70b3e5072f330fef",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 12,
"path": "/venv/lib/python3.7/site-packages/dryscrape/setup.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "from distutils.core import setup, Command\n\nsetup(name='dryscrape',\n version='1.0.1',\n description='a lightweight Javascript-aware, headless web scraping library for Python',\n author='Niklas Baumstark',\n author_email='[email protected]',\n license='MIT',\n url='https://github.com/niklasb/dryscrape',\n packages=['dryscrape', 'dryscrape.driver'],\n install_requires=['webkit_server>=1.0', 'lxml', 'xvfbwrapper'],\n )\n"
},
{
"alpha_fraction": 0.6620370149612427,
"alphanum_fraction": 0.6620370149612427,
"avg_line_length": 11.70588207244873,
"blob_id": "907a8e70034cba50a8efed3ab6bb7fc11654fad1",
"content_id": "00f7b87dbe13123e69005fa00978d083dc2767b1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 17,
"path": "/venv/lib/python3.7/site-packages/dryscrape/dryscrape/xvfb.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "import atexit\nimport os\n\n_xvfb = None\n\n\ndef start_xvfb():\n from xvfbwrapper import Xvfb\n global _xvfb\n _xvfb = Xvfb()\n _xvfb.start()\n atexit.register(_xvfb.stop)\n\n\ndef stop_xvfb():\n global _xvfb\n _xvfb.stop()\n"
},
{
"alpha_fraction": 0.7536101341247559,
"alphanum_fraction": 0.7644404172897339,
"avg_line_length": 35.31147384643555,
"blob_id": "dc63074eaa27f51130a12a9bfd796c6b5c407d52",
"content_id": "e0650ce708ed54042922354db5a4a02d75deddb0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2220,
"license_type": "permissive",
"max_line_length": 239,
"num_lines": 61,
"path": "/venv/lib/python3.7/site-packages/dryscrape/README.md",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "\n**NOTE: This package is not actively maintained. It uses QtWebkit, which is end-of-life and probably doesn't get security fixes backported. Consider using a similar package like [Spynner](https://github.com/makinacorpus/spynner) instead.**\n\n\n# Overview\n\n**Author:** Niklas Baumstark\n\n\n\ndryscrape is a lightweight web scraping library for Python. It uses a \nheadless Webkit instance to evaluate Javascript on the visited pages. This \nenables painless scraping of plain web pages as well as Javascript-heavy \n“Web 2.0” applications like\nFacebook.\n\nIt is built on the shoulders of\n[capybara-webkit](https://github.com/thoughtbot/capybara-webkit)'s \n[webkit-server](https://github.com/niklasb/webkit-server). A big thanks goes \nto thoughtbot, inc. for building this excellent piece of software!\n\n# Changelog\n\n* 1.0: Added Python 3 support, small performance fixes, header names are now\n properly normalized. Also added the function `dryscrape.start_xvfb()` to\n easily start Xvfb.\n* 0.9.1: Changed semantics of the `headers` function in\n a backwards-incompatible way: It now returns a list of (key, value)\n pairs instead of a dictionary.\n\n# Supported Platforms\n\nThe library has been confirmed to work on the following platforms:\n\n* Mac OS X 10.9 Mavericks and 10.10 Yosemite\n* Ubuntu Linux\n* Arch Linux\n\nOther unixoid systems should work just fine.\n\nWindows is not officially supported, although dryscrape should work\nwith [cygwin](https://www.cygwin.com/).\n\n### A word about Qt 5.6\n\nThe 5.6 version of Qt removes the Qt WebKit module in favor of the new module Qt WebEngine. So far webkit-server has not been ported to WebEngine (and likely won't be in the near future), so Qt <= 5.5 is a requirement.\n\n# Installation, Usage, API Docs\n\nDocumentation can be found at \n[dryscrape's ReadTheDocs page](http://dryscrape.readthedocs.io/).\n\nQuick installation instruction for Ubuntu:\n\n # apt-get install qt5-default libqt5webkit5-dev build-essential python-lxml python-pip xvfb\n # pip install dryscrape\n\n# Contact, Bugs, Contributions\n\nIf you have any problems with this software, don't hesitate to open an\nissue on [Github](https://github.com/niklasb/dryscrape) or open a pull\nrequest or write a mail to **niklas baumstark at Gmail**.\n"
},
{
"alpha_fraction": 0.5418139100074768,
"alphanum_fraction": 0.5418139100074768,
"avg_line_length": 21.342105865478516,
"blob_id": "e2448a717b354bc45d545c0b14f4b75e2ea6ee3f",
"content_id": "88fd2168280ede5cfa5fb18971c06886ccdf7424",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 849,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 38,
"path": "/venv/lib/python3.7/site-packages/dryscrape/docs/apidoc.rst",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "API Documentation\n=================\n\nThis documentation also contains the API docs for the ``webkit_server``\nmodule, for convenience (and because I am too lazy to set up dedicated docs\nfor it).\n\nOverview\n----------\n\n.. inheritance-diagram:: dryscrape.session\n dryscrape.mixins\n dryscrape.driver.webkit\n webkit_server\n\nModule :mod:`dryscrape.session`\n-------------------------------\n\n.. automodule:: dryscrape.session\n :members:\n\nModule :mod:`dryscrape.mixins`\n-------------------------------\n\n.. automodule:: dryscrape.mixins\n :members:\n\nModule :mod:`dryscrape.driver.webkit`\n-------------------------------------\n\n.. automodule:: dryscrape.driver.webkit\n :members:\n\nModule :mod:`webkit_server`\n-------------------------------\n\n.. automodule:: webkit_server\n :members:\n"
},
{
"alpha_fraction": 0.6808846592903137,
"alphanum_fraction": 0.6861506104469299,
"avg_line_length": 22.737499237060547,
"blob_id": "a9d5da33296dc6820dba6edd1bbee9577e6888e7",
"content_id": "27d183119342194bdaddfe29e2d2bc1787af3cad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1899,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 80,
"path": "/venv/lib/python3.7/site-packages/dryscrape/docs/installation.rst",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": ".. highlight:: none\n\nInstallation\n============\n\nPrerequisites\n-------------\n\nBefore installing dryscrape_, you need to install some software it depends on:\n\n* Qt_, QtWebKit_\n* lxml_\n* pip_\n* xvfb_ (necessary only if no other X server is available)\n\nOn Ubuntu you can do that with one command (the ``#`` indicates that you need\nroot privileges for this):\n\n::\n\n # apt-get install qt5-default libqt5webkit5-dev build-essential \\\n python-lxml python-pip xvfb\n\nPlease note that Qt4 is also supported.\n\nOn Mac OS X, you can use Homebrew_ to install Qt and\neasy_install_ to install pip_:\n\n::\n\n # brew install qt\n # easy_install pip\n\nOn other operating systems, you can use pip_ to install lxml (though you might\nhave to install libxml and the Python headers first).\n\nRecommended: Installing dryscrape from PyPI\n-------------------------------\n\nThis is as simple as a quick\n\n::\n\n # pip install dryscrape\n\nNote that dryscrape supports Python 2.7 and 3 as of version 1.0.\n\nInstalling dryscrape from Git\n-------------------------------\n\nFirst, get a copy of dryscrape_ using Git:\n\n::\n\n $ git clone https://github.com/niklasb/dryscrape.git dryscrape\n $ cd dryscrape\n\nTo install dryscrape, you first need to install webkit-server_. You can use\npip_ to do this for you (while still in the dryscrape directory).\n\n::\n\n # pip install -r requirements.txt\n\nIf you want, you can of course also install the dependencies manually.\n\nAfterwards, you can use the ``setup.py`` script included to install dryscrape:\n\n::\n\n # python setup.py install\n\n.. _Qt: http://www.qt.io\n.. _QtWebKit: http://doc.qt.io/qt-5/qtwebkit-index.html\n.. _lxml: http://lxml.de/\n.. _webkit-server: https://github.com/niklasb/webkit-server/\n.. _pip: http://pypi.python.org/pypi/pip\n.. _dryscrape: https://github.com/niklasb/dryscrape/\n.. _Homebrew: http://brew.sh/\n.. _easy_install: https://pypi.python.org/pypi/setuptools\n"
},
{
"alpha_fraction": 0.6742814779281616,
"alphanum_fraction": 0.6742814779281616,
"avg_line_length": 27.87234115600586,
"blob_id": "6b85ba8ac1be798e481d079e3f700e50bd1f4bd3",
"content_id": "ee9fbe537bbdc9f2c60de6f7fbbc813bda8bff49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1357,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 47,
"path": "/receivedSMS.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "\nfrom flask import Flask, request, redirect\nfrom twilio.twiml.messaging_response import MessagingResponse\nfrom getClass import getClass\nfrom findRate import findTeacher, findRating\n\napp = Flask(__name__)\n\[email protected](\"/sms\", methods=['GET', 'POST'])\ndef incoming_sms():\n\n #Get class name from text\n body = request.values.get('Body', None)\n\n #set up response\n resp = MessagingResponse()\n\n #find class schedules\n result = getClass(body)\n sectionsOutput = f'There are {len(result)} sections for {body}.\\n'\n teacherSet = set()\n for section in result:\n teacherSet.add(section.teacher)\n sectionsOutput += f'Section Name: {section.sectionName}, Teacher Name: {section.teacher}, Time: {section.time} \\n'\n \n #Find Ratings\n RatingsOutput = ''\n for teacher in teacherSet:\n Teacherid = findTeacher(teacher) \n if Teacherid != None:\n RatingsOutput += f'Teacher: {teacher}:\\n{findRating(Teacherid)}'\n \n\n #Check if can find any ratings\n if RatingsOutput:\n RatingsOutput = 'These are the ratings for these professors I can find on RMP:\\n' + RatingsOutput\n else:\n RatingsOutput = 'Cannot find any rating on RMP'\n\n\n #Send Texts\n\n # print(sectionsOutput)\n resp.message(sectionsOutput)\n # print(RatingsOutput)\n resp.message(RatingsOutput)\n\n return str(resp)"
},
{
"alpha_fraction": 0.6459183692932129,
"alphanum_fraction": 0.655102014541626,
"avg_line_length": 33.89285659790039,
"blob_id": "ac1739b0b8cb7426d388d6c780f3b8c5a86f41de",
"content_id": "f45bdebc7aa55f857722b9527b0fa09daa4201a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 28,
"path": "/getClass.py",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\nimport collections\n\nurl = \"http://www.bu.edu/phpbin/course-search/section/\"\n#Input name of the class, find and return section,teacher and time from catalog.\ndef getClass(name):\n result = []\n Section = collections.namedtuple('Section',['sectionName','teacher','time'])\n #Get response from the name of the class\n querystring = {\"t\": name}\n sauce = requests.request(\"GET\", url, params=querystring)\n\n soup = BeautifulSoup(sauce.text, \"html.parser\")\n classList = soup.find_all(\"td\")\n divided = divide(classList, 8)\n #Append section, teacher and time to result\n for lst in divided:\n if lst[0].get_text()[-1] == \"1\":\n result.append(Section(name + lst[0].get_text(), lst[2].get_text(), lst[5].get_text()))\n return result\n\n#Divide an array of sub array of size n\ndef divide(arr, n):\n result = []\n for i in range(0, len(arr), n):\n result.append(arr[i:i + n])\n return result\n\n\n\n"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 20.33333396911621,
"blob_id": "66004a056c29be27cb0142aca507e8ea8e0a9b46",
"content_id": "de11a9458b19d0696643c852e58300c713542df2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 64,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 3,
"path": "/venv/lib/python3.7/site-packages/dryscrape/requirements.txt",
"repo_name": "toutou826/FindProfRateBU",
"src_encoding": "UTF-8",
"text": "lxml\ngit+git://github.com/niklasb/webkit-server.git\nxvfbwrapper\n"
}
] | 14 |
ov8525/project-4_motiond-detention-device_-ohany-villa | https://github.com/ov8525/project-4_motiond-detention-device_-ohany-villa | bc39b59a4079f37b36ffd9e854cb90c453f994d3 | 2731c76cf139b889bc32e615bf9d9d199498df50 | 45e5ab58ceaaeb16de5bfa27e0a0dce18ecc3a5c | refs/heads/master | 2023-03-03T23:50:36.970974 | 2021-02-08T00:01:04 | 2021-02-08T00:01:04 | 336,917,129 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.6421052813529968,
"avg_line_length": 34.625,
"blob_id": "00d5557a5e0637fbb3532394a41ab50906d9a7e6",
"content_id": "2e60e7cf831f02a983ceb88a968a0bb81206fd5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 8,
"path": "/main.py",
"repo_name": "ov8525/project-4_motiond-detention-device_-ohany-villa",
"src_encoding": "UTF-8",
"text": "while True:\n print(input.acceleration(Dimension.X))\n if input.acceleration(Dimension.X) > 25 or input.acceleration(Dimension.Y)> 25:\n light.set_all(light.rgb(255,0,0))\n music.ba_ding.play_until_done()\n else:\n light.clear()\n music.stop_all_sounds()\n"
},
{
"alpha_fraction": 0.5709677338600159,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 27.18181800842285,
"blob_id": "e9bf89329f82b6810c452b8f16780898224ac8b3",
"content_id": "23edd7e31a798fff54528880acd1153aa712b221",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 11,
"path": "/main.ts",
"repo_name": "ov8525/project-4_motiond-detention-device_-ohany-villa",
"src_encoding": "UTF-8",
"text": "while (true) {\n console.log(input.acceleration(Dimension.X))\n if (input.acceleration(Dimension.X) > 25 || input.acceleration(Dimension.Y) > 25) {\n light.setAll(light.rgb(255, 0, 0))\n music.baDing.playUntilDone()\n } else {\n light.clear()\n music.stopAllSounds()\n }\n \n}\n"
}
] | 2 |
Estefanos8080/Djangotut | https://github.com/Estefanos8080/Djangotut | ca5a07d64e7544dec531e12f8ef9d8a5005bd799 | 7b1085fb9d78995907c4443b90f7a016db6e0ee9 | 29414bdb780be73b656b9173a7a60a28974b7a49 | refs/heads/main | 2023-04-05T09:12:21.476170 | 2021-04-13T19:49:37 | 2021-04-13T19:49:37 | 356,130,133 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7169230580329895,
"avg_line_length": 28.636363983154297,
"blob_id": "5d78c9fe27a5537e251ebd42b3dfea6893208acd",
"content_id": "9ae620a4fb3a95b6f70efa478e81dbde62bcdc24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/products/models.py",
"repo_name": "Estefanos8080/Djangotut",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\nclass Products(models.Model):\n \n title = models.CharField(max_length = 120)\n description = models.TextField(blank = True)\n price = models.DecimalField(decimal_places=2, max_digits=1000)\n summary = models.TextField()\n featured = models.BooleanField()"
}
] | 1 |
wg4568/ChatServer | https://github.com/wg4568/ChatServer | 34a7b4b8d66f0dc5d69db8ff2e5959b0b38dfb63 | 94df08b158de05c048539eabab2f1f34fc0e8d19 | 5e5f6241370237d6cc96205ca74fb32cacff8337 | refs/heads/master | 2021-01-09T20:20:29.225784 | 2016-08-03T04:54:41 | 2016-08-03T04:54:41 | 64,816,153 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.587110698223114,
"alphanum_fraction": 0.5979031920433044,
"avg_line_length": 33.1368408203125,
"blob_id": "a07cfad2adf3bb657a8b3be15fb2dea4bf4f2697",
"content_id": "5164b374ed4722ab23c0bf5043c6e8b4de9e11ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3243,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 95,
"path": "/application.py",
"repo_name": "wg4568/ChatServer",
"src_encoding": "UTF-8",
"text": "# Start with a basic flask app webpage.\nfrom flask.ext.socketio import SocketIO, emit\nfrom flask import Flask, render_template, url_for, copy_current_request_context, request, session, redirect\nfrom random import random\nfrom time import sleep\nimport json, pickle, os, binascii, pickler\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = binascii.b2a_hex(os.urandom(16))\napp.config['DEBUG'] = True\napp.config['PICKLE_RESET'] = \"KGxwMAou\"\napp.config['BAD_NAMES'] = [\"wg4568\"]\napp.config['SECRET_PASSWORD'] = \"thepassword\"\n\n#turn the flask app into a socketio app\nsocketio = SocketIO(app)\n\nclass Reciever():\n def __init__(self):\n self.messages = pickler.load(\"messages\")\n\n def send(self, user, message):\n if len(message):\n self.messages.insert(0, (user, message))\n pickler.save(self.messages, \"messages\")\n socketio.emit('newmsg', {'content': message, 'user': user}, namespace='/msg')\n return \"Sent from \" + user + \" that reads, \" + message\n else:\n return \"Message was blank, not sent\"\n\n def render(self):\n# if not session[\"VIEW\"]:\n# return '<p id=\"alert\"><strong>' + self.messages[0][0] + ': </strong>' + self.messages[0][1] + '</p>'\n# else:\n\tif 1:\n html = \"\"\n for msg in self.messages[:session[\"VIEW\"]]:\n if msg[0] == \"ALERT\":\n html += '<p id=\"alert\"><strong>' + msg[0] + ': </strong>' + msg[1] + '</p>'\n else:\n html += '<p><strong>' + msg[0] + ': </strong>' + msg[1] + '</p>'\n return html\n\nrec = Reciever()\n\[email protected]_request\ndef before_request():\n try: session[\"VIEW\"]\n except KeyError: session[\"VIEW\"] = 0\n try: session[\"USERNAME\"]\n except KeyError: session[\"USERNAME\"] = \"AnonUser-\" + binascii.b2a_hex(os.urandom(4))\n# if not request.url.split(\"/\")[-1:][0] == \"send\":\n# rec.send(\"ALERT\", session[\"USERNAME\"] + \" has joined the room\")\n\[email protected]('/user_newid')\ndef user_newid():\n session[\"USERNAME\"] = \"AnonUser-\" + binascii.b2a_hex(os.urandom(4))\n return redirect(\"/\")\n\[email protected]('/user_setid', methods=[\"POST\"])\ndef user_setname():\n username = request.form[\"username\"]\n canbypass = False\n if username.split(\"-\")[-1:][0] == app.config[\"SECRET_PASSWORD\"]:\n canbypass = True\n username = username.split(\"-\")[0]\n if not username in app.config['BAD_NAMES'] or canbypass:\n session[\"USERNAME\"] = username\n return redirect(\"/\")\n\[email protected]('/send', methods=[\"POST\"])\ndef send():\n user = request.form[\"user\"]\n content = request.form[\"content\"]\n return rec.send(user, content)\n\[email protected]('/', methods=[\"GET\", \"POST\"])\ndef index():\n if request.args.get(\"viewall\"): session[\"VIEW\"] += 10\n else: session[\"VIEW\"] = 0\n print session[\"VIEW\"]\n return render_template('index.html', old=rec.render(), username=session[\"USERNAME\"])\n\[email protected]('connect', namespace='/msg')\ndef test_connect():\n print('Client connected')\n\[email protected]('disconnect', namespace='/msg')\ndef test_disconnect():\n# rec.send(\"ALERT\", session[\"USERNAME\"] + \" has left the room\",)\n print('Client disconnected')\n\n\nif __name__ == '__main__':\n socketio.run(app, host=\"0.0.0.0\")\n"
},
{
"alpha_fraction": 0.3991684019565582,
"alphanum_fraction": 0.4033263921737671,
"avg_line_length": 34.62963104248047,
"blob_id": "9a238a890d2d5b630fca19b94e01d3c60e166e02",
"content_id": "4da9e89d33e4f137ef5f15aa36397db4d1c86057",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 27,
"path": "/static/js/send.js",
"repo_name": "wg4568/ChatServer",
"src_encoding": "UTF-8",
"text": "$(function() {\n $('button').click(function() {\n msg = document.getElementById('messagebox').value\n if (msg.charAt(0) == \"!\") {\n commands={ \n \"!rules\":\"<strong>RULES:</strong><br/>1) Respect others<br/>2) No impersonation<br/>3) No spam\" \n };\n if (commands[msg] == undefined) {\n $(\"<p id='rules'>Command not found</p>\").hide().prependTo(\"#log\").fadeIn(\"slow\");\n } else {\n $(\"<p id='rules'>\" + commands[msg] + \"</p>\").hide().prependTo(\"#log\").fadeIn(\"slow\");\n }\n } else {\n $.ajax({\n url: '/send',\n data: $('form').serialize(),\n type: 'POST',\n success: function(response) {\n console.log(response);\n },\n error: function(error) {\n console.log(error);\n }\n })\n };\n });\n});\n"
},
{
"alpha_fraction": 0.7181628346443176,
"alphanum_fraction": 0.743215024471283,
"avg_line_length": 25.66666603088379,
"blob_id": "3a3ae23b749124a9962d933a7a2f37b01e24fc81",
"content_id": "16258ac98e324768afe2f5238a750906a9effbb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 18,
"path": "/pickler.py",
"repo_name": "wg4568/ChatServer",
"src_encoding": "UTF-8",
"text": "from base64 import b64encode, b64decode\nimport pickle\nimport os\n\nstorage_file = \"picklejar/\"\nextension = \".pickle.b64\"\n\ndef save(obj, name):\n\tglobal storage_file, extension\n\topen(storage_file + name + extension, \"w\").write(b64encode(pickle.dumps(obj)))\n\ndef load(name):\n\tglobal storage_file, extension\n\treturn pickle.loads(b64decode(open(storage_file + name + extension, \"r\").read()))\n\ndef remove(name):\n\tglobal storage_file, extension\n\tos.remove(storage_file + name + extension)"
},
{
"alpha_fraction": 0.4873034954071045,
"alphanum_fraction": 0.4897218942642212,
"avg_line_length": 29.629629135131836,
"blob_id": "8d228d19acad7b5370b5a5ca73dcbb73497cc3b9",
"content_id": "c45401372d93fe97801a278f2400e039cfc7576f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 827,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 27,
"path": "/static/js/recieve.js",
"repo_name": "wg4568/ChatServer",
"src_encoding": "UTF-8",
"text": "$(document).ready(function(){\n var socket = io.connect('http://' + document.domain + ':' + location.port + '/msg');\n\n\tArray.prototype.remove = function() {\n\t var what, a = arguments, L = a.length, ax;\n\t while (L && this.length) {\n\t what = a[--L];\n\t while ((ax = this.indexOf(what)) !== -1) {\n\t this.splice(ax, 1);\n\t }\n\t }\n\t return this;\n\t};\n\n socket.on('newmsg', function(msg) {\n console.log(\"Received msg from \" + msg.user + \" that reads, \" + msg.content)\n\n if (msg.user == \"ALERT\") {\n\t msg_string = '<p id=\"alert\"><strong>' + msg.user + ': </strong>' + msg.content + '</p>';\n\t } else {\n\t msg_string = '<p><strong>' + msg.user + ': </strong>' + msg.content + '</p>';\n\t }\n\n $('#log').prepend($(msg_string).fadeIn('slow'));\n });\n\n});\n"
}
] | 4 |
sgopi2k9/PythonSeleniumAutomation | https://github.com/sgopi2k9/PythonSeleniumAutomation | 2afdd39a8e89f58df55fe0afa31acfc3fbd72f00 | 07121d81cac6e2278072818ba80b8b9cde1e76ed | 579a3cf00dedc8ed0e2d7778d9fba0152e8a79b0 | refs/heads/master | 2022-12-20T20:34:03.410458 | 2020-09-27T06:09:05 | 2020-09-27T06:09:05 | 298,964,926 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6504504680633545,
"alphanum_fraction": 0.6540540456771851,
"avg_line_length": 29.77777862548828,
"blob_id": "ac3fac8279c46051db1cd53adcbcde564125bb10",
"content_id": "4ab955f28b9da08c5a5d4072e300593bb42c524c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 555,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 18,
"path": "/utilities/custom_logger.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "import logging\nimport inspect\n\nclass CustomLogger():\n\n def customLog(self,logLevel = logging.DEBUG):\n loggerName = inspect.stack()[1][3]\n logger = logging.getLogger(loggerName)\n logger.setLevel(logging.DEBUG)\n\n fhandler = logging.FileHandler(\"automation.log\",mode='a')\n fhandler.setLevel(logLevel)\n\n formatter = logging.Formatter('%(asctime)s -%(name)s %(levelname)s %(message)s',datefmt='%d/%m/%y %I:%M:%S')\n fhandler.setFormatter(formatter)\n\n logger.addHandler(fhandler)\n return logger\n\n"
},
{
"alpha_fraction": 0.5482625365257263,
"alphanum_fraction": 0.5514800548553467,
"avg_line_length": 31.3125,
"blob_id": "40a133d2517b0fb894d05898df88149cf7345acd",
"content_id": "cec3d8e3f63277e60dbac9c0d5d4276c8fbaaa8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1554,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 48,
"path": "/utilities/teststatus.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "from base.custom_driver import CustomDriver\nfrom utilities.custom_logger import CustomLogger as cl\nimport logging\nfrom utilities.read_excel_data import writeExcelData\n\nclass TestStatus(CustomDriver):\n\n log = cl.customLog(logging.DEBUG)\n count = 2\n\n def __init__(self, driver):\n super().__init__(driver)\n self.resultList = []\n\n def setResult(self, result):\n try:\n if result is not None:\n if result:\n self.resultList.append(\"PASS\")\n self.log.info(\"Verification passed\")\n else:\n self.resultList.append(\"FAIL\")\n self.log.error(\"***VERIFICATION FAILED***\")\n else:\n self.resultList.append(\"FAIL\")\n self.log.error(\"***VERIFICATION FAILED***\")\n except:\n self.log.error(\"***EXCEPTION OCCURED***\")\n\n def mark(self,result):\n self.setResult(result)\n\n def mark_final(self, testcase, result, filepath):\n self.setResult(result)\n if \"FAIL\" in self.resultList:\n self.log.error(\"***\"+ testcase+ \"FAILED ***\")\n self.screenShot()\n writeExcelData(filepath,self.count,3,\"Test Failed\")\n self.count+=1\n self.resultList.clear()\n assert True == False\n\n else:\n self.log.info(testcase + \" PASSED\")\n writeExcelData(filepath, self.count, 3, \"Test Passed\")\n self.count+=1\n self.resultList.clear()\n assert True == True\n\n\n\n"
},
{
"alpha_fraction": 0.5993099808692932,
"alphanum_fraction": 0.6010349988937378,
"avg_line_length": 39.118812561035156,
"blob_id": "6286680d2d4fbefe9e14bd9172df431464b976b9",
"content_id": "9806069ee9c26e5e43ebb603845148a0cded6036",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4058,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 101,
"path": "/base/custom_driver.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import *\nfrom utilities.custom_logger import CustomLogger as cl\nimport logging\nimport time\nimport os\n\nclass CustomDriver():\n\n log = cl.customLog(logging.DEBUG)\n\n def __init__(self, driver):\n self.driver = driver\n\n def screenShot(self):\n filename = str(time.time()*1000) + \".png\"\n screenshotdir = \"E:\\\\Workspace_python\\\\AutoFramework\\\\base\\\\screenshots\"\n relativefilename = screenshotdir + filename\n print(relativefilename)\n try:\n self.driver.save_screenshot(relativefilename)\n self.log.info(\"Screen shot taken\")\n except:\n self.log.error(\"***Exception occured screen shot not taken***\")\n\n\n def getByType(self, locatorType = \"id\"):\n self.locatorType = locatorType.lower()\n if self.locatorType == \"id\":\n return By.ID\n elif self.locatorType == \"name\":\n return By.NAME\n elif self.locatorType == \"css\":\n return By.CSS_SELECTOR\n elif self.locatorType == \"xpath\":\n return By.XPATH\n elif self.locatorType == \"tagname\":\n return By.TAG_NAME\n elif self.locatorType == \"link\":\n return By.LINK_TEXT\n elif self.locatorType == \"partial_link\":\n return By.PARTIAL_LINK_TEXT\n elif self.locatorType == \"class_name\":\n return By.CLASS_NAME\n else:\n self.log.info(\"Locator type: \"+ self.locatorType +\" is NOT supported\")\n return False\n\n def getElement(self, locator, locatorType = \"id\"):\n element = None\n byType = self.getByType(locatorType)\n try:\n element = self.driver.find_element(byType, locator)\n self.log.info(\"Element found with Locator: \"+locator+\" with locator type: \"+locatorType)\n except:\n self.log.error(\"Element NOT found with Locator: \"+locator+\" with locator type: \"+locatorType)\n return element\n\n def clickElement(self, locator, locatorType =\"id\"):\n element = None\n try:\n element = self.getElement(locator, locatorType)\n element.click()\n self.log.info(\"Element clicked with Locator: \"+locator+\" with locator type: \"+locatorType)\n except:\n self.log.error(\"Element NOT clicked with Locator: \" + locator + \" with locator type: \" + locatorType)\n\n def isElementPresent(self, locator, locatorType =\"id\"):\n element = None\n try:\n element = self.getElement(locator, locatorType)\n #element.click()\n self.log.info(\"Element found with Locator: \"+locator+\" with locator type: \"+locatorType)\n return True\n except:\n self.log.error(\"Element NOT found with Locator: \" + locator + \" with locator type: \" + locatorType)\n return False\n\n\n def sendKeys(self, fieldValue, locator, locatorType=\"id\"):\n element = None\n try:\n element = self.getElement(locator, locatorType)\n element.send_keys(fieldValue)\n self.log.info(\"Sent keys with Locator: \" + locator + \" with locator type: \" + locatorType)\n except:\n self.log.error(\"Keys NOT sent with Locator: \" + locator + \" with locator type: \" + locatorType)\n\n def getTitle(self):\n return self.driver.title\n\n def waitForElement(self, locator, locatorType = \"id\"):\n element = None\n byType = self.getByType(locatorType)\n wait = WebDriverWait(self.driver,10,poll_frequency=1,ignored_exceptions=[NoSuchElementException,\n ElementNotVisibleException,\n ElementNotSelectableException])\n element = wait.until(EC.element_to_be_clickable((byType,locator)))\n return element\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.26421403884887695,
"alphanum_fraction": 0.35451504588127136,
"avg_line_length": 20.214284896850586,
"blob_id": "0a56ab3609dbdc67df80f10e7f3e9a323d21ed82",
"content_id": "3c37ebff124d13943324e8b6a5c299cb2eb1219e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 14,
"path": "/utilities/arrange.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "def arrange():\n a = [64, 25, 12, 22, 11, 1,2,44,3,122, 23, 34,-1,0]\n b=[]\n min = a[0]\n for i in range(0,len(a)):\n for j in range(i+1,len(a)):\n if a[i] > a[j]:\n t = a[i]\n a[i] = a[j]\n a[j] = t\n\n print(str(a))\n\narrange()\n\n\n"
},
{
"alpha_fraction": 0.6289592981338501,
"alphanum_fraction": 0.6289592981338501,
"avg_line_length": 28.177778244018555,
"blob_id": "fa3708c8f95fcf372889ae95f6eb83e3dbafd3ff",
"content_id": "8a20b52348176caee56c891738606c24926c62dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1326,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 45,
"path": "/pages/home/login_page.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "from base.custom_driver import CustomDriver\nfrom utilities.custom_logger import CustomLogger as cl\nimport logging\n\nclass LoginPage(CustomDriver):\n\n log = cl.customLog(logging.DEBUG)\n\n def __init__(self, driver):\n self.driver = driver\n super().__init__(self.driver)\n\n def clickLogin(self):\n self.clickElement(\"//a[contains(text(),'Login')]\", \"xpath\")\n\n def enterEmail(self , username):\n email_field = self.waitForElement(\"user_email\")\n self.sendKeys(username, \"user_email\")\n\n def enterPassword(self, password):\n self.sendKeys(password, \"user_password\")\n\n def clickLoginButton(self):\n self.clickElement(\"//input[@value = 'Log In']\", \"xpath\")\n\n def verifyLoginSuccesful(self):\n result = self.isElementPresent(\"gravatar\", \"class_name\")\n return result\n\n def verifyLoginFailed(self):\n result = self.isElementPresent(\"//div[contains(text(),'Invalid email or password.')]\",\"xpath\")\n return result\n\n def verifyTitle(self):\n title = self.getTitle()\n if 'Google' in title:\n return True\n else:\n return False\n\n def login(self, username, password):\n self.clickLogin()\n self.enterEmail(username)\n self.enterPassword(password)\n self.clickLoginButton()\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6858006119728088,
"alphanum_fraction": 0.6918429136276245,
"avg_line_length": 33.55263137817383,
"blob_id": "7084d6a8dcb613a387b509038fbd384484e6da9d",
"content_id": "dde2ffaab0c2e7eb9c402638d11d6fa643ec6b70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1324,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 38,
"path": "/tests/home/login_test.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "from pages.home.login_page import LoginPage\nfrom utilities.teststatus import TestStatus\nimport unittest\nimport pytest\nimport time\nfrom utilities.read_excel_data import readExcelData\nfrom ddt import ddt,data,unpack\n\n@ddt\[email protected](\"oneTimeSetUp\")\nclass TestLogin(unittest.TestCase):\n\n filepath = \"E:\\\\Workspace_python\\\\AutoFramework\\\\excel_data.xlsx\"\n\n @pytest.fixture(autouse=True)\n def classSetUp(self, oneTimeSetUp):\n self.lp = LoginPage(self.driver)\n self.ts = TestStatus(self.driver)\n\n @pytest.mark.run(order=2)\n def test_validLogin(self):\n self.driver.get('https://learn.letskodeit.com/p/practice')\n result1 = self.lp.verifyTitle()\n self.ts.mark(result1)\n self.lp.login(\"[email protected]\" , \"abcabc\")\n result2 = self.lp.verifyLoginSuccesful()\n self.ts.mark_final(\"test_validLogin\",result2,self.filepath)\n time.sleep(10)\n\n @data(*readExcelData(\"E:\\\\Workspace_python\\\\AutoFramework\\\\excel_data.xlsx\"))\n @unpack\n @pytest.mark.run(order=1)\n def test_invalidLogin(self, username, password):\n\n self.driver.get('https://learn.letskodeit.com/p/practice')\n self.lp.login(username, password)\n result = self.lp.verifyLoginFailed()\n self.ts.mark_final(\"test_invalidLogin\", result, self.filepath)\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.625165581703186,
"alphanum_fraction": 0.6331126093864441,
"avg_line_length": 23.354839324951172,
"blob_id": "68a6d5a4d0af3aa92e957ddfd71c1975760e4a57",
"content_id": "1d3a697d6aed20d3e9c2dfa23479712e3d79871e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 755,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 31,
"path": "/utilities/read_excel_data.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "import openpyxl\n\ndef readExcelData(filepath):\n wb = openpyxl.load_workbook(filepath)\n ws = wb[\"Sheet1\"]\n my_data = []\n t=[]\n maxRow = ws.max_row\n maxColumn = ws.max_column\n\n for i in range(2,maxRow+1):\n for j in range(1,maxColumn):\n t.append(ws.cell(row=i,column=j).value)\n my_data.append(list(t))\n t.clear()\n return my_data\n\ndef writeExcelData(filepath,row_num,column_num, result_message):\n wb = openpyxl.load_workbook(filepath)\n ws = wb[\"Sheet1\"]\n ws.cell(row=row_num,column=column_num).value = result_message\n wb.save(filepath)\n\n\n\"\"\"\ndef writeExcelData(filepath, result):\n wb = openpyxl.load_workbook(filepath)\n ws = wb.get_sheet_by_name(\"Sheet 1\")\n ws.\n\"\"\"\n#readExcelData()\n"
},
{
"alpha_fraction": 0.7140575051307678,
"alphanum_fraction": 0.7156549692153931,
"avg_line_length": 25.04166603088379,
"blob_id": "a33504c8efbb7206a9e12be5a58c9a7db8932f00",
"content_id": "e366b53cbfa0110e77e93e8a880dde51cc034b8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 626,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 24,
"path": "/tests/conftest.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom base.webdriverfactory import WebDriverFactory\n\[email protected](scope = \"class\")\ndef oneTimeSetUp(request, browser):\n baseURL = 'https://learn.letskodeit.com/p/practice'\n wdf = WebDriverFactory(browser)\n driver = wdf.getWebDriverInstance()\n driver.get(baseURL)\n driver.maximize_window()\n driver.implicitly_wait(5)\n\n if request.cls is not None:\n request.cls.driver = driver\n\n yield driver\n driver.quit()\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\")\n\[email protected](scope=\"session\")\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\n"
},
{
"alpha_fraction": 0.5775281190872192,
"alphanum_fraction": 0.5775281190872192,
"avg_line_length": 26.5,
"blob_id": "edaf9926835ebe3f3ebeedbcd77507475f0b2e44",
"content_id": "0cc85c86bcdd627b28d5e08159f553cf71d0b5d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 16,
"path": "/base/webdriverfactory.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\n\nclass WebDriverFactory():\n\n def __init__(self, browser):\n self.browser = browser\n\n def getWebDriverInstance(self):\n if self.browser == \"Ie\":\n return webdriver.Ie()\n elif self.browser == \"chrome\":\n return webdriver.Chrome()\n elif self.browser == \"firefox\":\n return webdriver.Firefox()\n else:\n print(\"Not supported browser\")\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5725806355476379,
"alphanum_fraction": 0.6129032373428345,
"avg_line_length": 9.25,
"blob_id": "08084f1130a1157f8460f183dbfad054eabf18c7",
"content_id": "deabe36cf05d8ef09898383c925bf9b04f01b9ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 12,
"path": "/configfiles/random_digit.py",
"repo_name": "sgopi2k9/PythonSeleniumAutomation",
"src_encoding": "UTF-8",
"text": "s = \"geeks for geeks contribute practice\"\nw1 = \"geeks\"\nw2 = \"practice\"\n\na = s.split()\nl1=0\nwhile(i<=len(s)):\n\n\n\n\nprint(l1)\n\n"
}
] | 10 |
rbirkby/PIZeroLights | https://github.com/rbirkby/PIZeroLights | 4fe7f5937e1c66780e47d4900d9b464b1bc8ca74 | d0ba8c36ea73e9e17469c5531e3f16535b654e46 | 40ea1ac9d4bb0fde98b841a22203ec3f28e54d45 | refs/heads/master | 2021-01-10T05:00:32.162768 | 2015-11-30T19:28:22 | 2015-11-30T19:28:22 | 47,140,975 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4870588183403015,
"alphanum_fraction": 0.5858823657035828,
"avg_line_length": 14,
"blob_id": "8f167129dd03223fc29e6b025461b546c2dd9ef2",
"content_id": "c9f465c0f20239bc7e4f6a3c1ffc5d5d67f6df6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 27,
"path": "/lights.py",
"repo_name": "rbirkby/PIZeroLights",
"src_encoding": "UTF-8",
"text": "import time\nimport scrollphat as s\n\ns.clear()\ns.set_brightness(2)\n\n#time.sleep(2)\n\n# draw arrow\ns.set_pixel(5, 4, 1)\ns.set_pixel(5, 0, 1)\ns.set_pixel(4, 1, 1)\ns.set_pixel(4, 3, 1)\ns.set_pixel(3, 2, 1)\ns.set_pixel(4, 2, 1)\ns.set_pixel(5, 2, 1)\ns.set_pixel(6, 2, 1)\ns.set_pixel(7, 2, 1)\ns.set_pixel(8, 2, 1)\ns.set_pixel(9, 2, 1)\ns.set_pixel(10, 2, 1)\n\n\nwhile True:\n\ts.scroll()\n\ts.update()\n\ttime.sleep(0.05)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
}
] | 1 |
yorkrobotics/York_FRC_2018 | https://github.com/yorkrobotics/York_FRC_2018 | ec6b00842aae0b56f4b137abf3ea708d3565c568 | f26f7a5ff22d4b412080396d3459a3127f183c55 | 5edf9b7cf4b94b8010116ef855b7e1974dac2f63 | refs/heads/master | 2022-01-31T21:12:00.036570 | 2018-03-28T22:41:22 | 2018-03-28T22:41:22 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.526090681552887,
"alphanum_fraction": 0.5603079795837402,
"avg_line_length": 52.181819915771484,
"blob_id": "1c3e04263009cb9abc0e2313ae658bb11d213266",
"content_id": "26c87cd2ccce880657383ad14e02a28a380247ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 370,
"num_lines": 22,
"path": "/MotionMagic/reversal.py",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfileName = input(\"fileName: \")\nfile = open(fileName, \"r\")\ncontents = file.readlines()\n\nfileNameList = list(fileName)\nfor i in range(len(fileNameList)):\n\tif (fileNameList[i] == \"R\"):\n\t\tfileNameList[i] = \"L\"\n\telif (fileNameList[i] == \"L\"):\n\t\tfileNameList[i] = \"R\"\n\nfileName = \"\".join(fileNameList)\nprint(fileName)\nfile = open(fileName, \"w\")\nfor line in contents:\n\tline = line.split(\",\")\n\tline = list(map(float, line))\n\tvector = str(format(line[0], \".5f\"))+\", \"+str(format(-line[1], \".5f\"))+\", \"+str(format(-line[2], \".5f\"))+\", \"+str(format(line[3], \".5f\"))+\", \"+str(format(line[4], \".5f\"))+\", \"+str(format(-line[5], \".5f\"))+\", \"+str(format(line[6], \".5f\"))+\", \"+str(format(line[7], \".5f\"))+\", \"+str(format(line[8], \".5f\"))+\", \"+str(format(line[9], \".5f\"))+\"\\n\"\n\t#vector = str(format(line[0]-timeBias, \".5f\"))+\", \"+str(format(line[1], \".5f\"))+\", \"+str(format(line[2], \".5f\"))+\", \"+str(format(line[3], \".5f\"))+\", \"+str(format(line[4], \".5f\"))+\", \"+str(format(line[5], \".5f\"))+\", \"+str(format(x, \".5f\"))+\", \"+str(format(y, \".5f\"))+\", \"+str(format(line[6], \".5f\"))+\", \"+str(format(line[7], \".5f\"))+\", \"+str(format(line[8], \".5f\"))+\"\\n\"\n\tfile.write(vector)"
},
{
"alpha_fraction": 0.595020592212677,
"alphanum_fraction": 0.6238581538200378,
"avg_line_length": 24.96744155883789,
"blob_id": "2c8ca6a9faba73f0ec1bac73c60060e77ea25364",
"content_id": "4a8c0562c4be45b5bdb5aa774e33390ea8de2b7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5583,
"license_type": "no_license",
"max_line_length": 358,
"num_lines": 215,
"path": "/MotionMagic/motionMagic.py",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfile = open(\"cleanUp.csv\", \"r\")\ncontents = file.readlines()\n\npath = []\nfor line in contents:\n\tline = line.split(\",\")\n\tpath.append(list(map(float, line)))\n\nstages = []\nstage = []\nlastSpeed = 1\niter = 0\nnewVectors = []\nfor vector in path:\n\titer += 1\n\tcurSpeed = vector[5]\n\tif ((curSpeed>0 and lastSpeed<=0) or (curSpeed<0 and lastSpeed>=0) or (curSpeed==0 and abs(lastSpeed)!=0)):\n\t\tstages.append(np.array(stage))\n\t\tstage = []\n\telif (iter == len(path)):\n\t\tstage.append(np.array(vector))\n\t\tstages.append(np.array(stage))\n\tstage.append(np.array(vector))\n\tlastSpeed = curSpeed\n\nmaxPosAcc = 0.86\nmaxNegAcc = -1.36\nmaxSpeed = 2\n\n#maxPosAcc = 0.86\n#maxNegAcc = -0.4\n#maxSpeed = 1.8\n\nfile = open(\"timeOptimal.csv\", \"w\")\nvectors = []\nlastDist = 0\ntimeElapsed = 0\niBias = 0\nfor i in range(len(stages)):\n\tstage = stages[i-iBias]\n\tif (len(stage)<2):\n\t\tprint(i)\n\t\tdel stages[i-iBias]\n\t\tiBias += 1\n\t\tcontinue\n\tinitPos = stage[0][4]\n\tfinalPos = stage[len(stage)-1][4]\n\tdisp = finalPos-initPos\n\tdeltaTime = 0.001\n\tdisTravelled = 0\n\tspeed = 0\n\tlastSpeed = 0\n\tacc = 0\n\titera = 0\n\tcutOffIdx = 0\n\twhile (abs(disTravelled) < abs(disp)):\n\t\tvector = []\n\t\tvector.append(timeElapsed)\n\t\tvector.append(lastDist+disTravelled)\n\t\t#print(lastDist, disTravelled)\n\t\tvector.append(speed)\n\t\tif (disp >= 0):\n\t\t\tslowDownTime = -speed/maxNegAcc\n\t\t\tprojectedDisp = disTravelled+speed*slowDownTime/2\n\t\t\tif (projectedDisp < disp):\n\t\t\t\tspeed += maxPosAcc*deltaTime\n\t\t\t\tacc = maxPosAcc\n\t\t\t\tif (speed > maxSpeed):\n\t\t\t\t\tspeed = maxSpeed\n\t\t\telse:\n\t\t\t\tspeed += maxNegAcc*deltaTime\n\t\t\t\tacc = maxNegAcc\n\t\telse:\n\t\t\tslowDownTime = -speed/maxPosAcc\n\t\t\tprojectedDisp = disTravelled+speed*slowDownTime/2\n\t\t\tif (projectedDisp > disp):\n\t\t\t\tspeed += maxNegAcc*deltaTime\n\t\t\t\tacc = maxNegAcc\n\t\t\t\tif (speed < -maxSpeed):\n\t\t\t\t\tspeed = -maxSpeed\n\t\t\telse:\n\t\t\t\tspeed += maxPosAcc*deltaTime\n\t\t\t\tacc = maxPosAcc\n\t\tdisTravelled += (lastSpeed+speed)/2*deltaTime\n\t\tlastSpeed = speed\n\t\ttimeElapsed += deltaTime\n\t\tvector.append(acc)\t\n\t\tvector.append(projectedDisp)\n\n\t\tif (itera%20 == 0):\n\t\t\tcurDisp = vector[1]\n\t\t\ttargetIdx = cutOffIdx\n\t\t\tfor n in range(cutOffIdx, int(len(stage)-1)):\n\t\t\t\t wayPointDisp = stage[n][4]\n\t\t\t\t if (disp >= 0):\n\t\t\t\t \tif (wayPointDisp<curDisp):\n\t\t\t\t \t\ttargetIdx = n\n\t\t\t\t \telse:\n\t\t\t\t \t\tbreak\n\t\t\t\t else:\n\t\t\t\t \tif (wayPointDisp>curDisp):\n\t\t\t\t \t\ttargetIdx = n\n\t\t\t\t \telse:\n\t\t\t\t \t\tbreak\n\t\t\tcutOffIdx = targetIdx\n\n\t\t\twayPoint = stage[targetIdx]\n\n\t\t\ttime = vector[0]\n\n\t\t\ttheta = wayPoint[1]\n\n\t\t\t'''lastIdx = targetIdx-1\n\t\t\tif (lastIdx < 0):\n\t\t\t\tlastIdx = 0\n\t\t\tnextIdx = targetIdx+1\n\t\t\tif (nextIdx >= len(path)):\n\t\t\t\tnextIdx = len(path)-1\n\t\t\tomega = (stage[nextIdx][1]-stage[lastIdx][1])/(0.02*2)'''\n\n\t\t\ts = vector[1]\n\n\t\t\tv = vector[2]\n\n\t\t\tx = wayPoint[6]\n\n\t\t\ty = wayPoint[7]\n\n\t\t\tliftPos = wayPoint[8]\n\n\t\t\tintakeSpeedL = wayPoint[9]\n\t\t\tintakeSpeedR = wayPoint[10]\n\t\t\tnewVector = [time, theta, s, v, x, y, liftPos, intakeSpeedL, intakeSpeedR]\n\t\t\tnewVectors.append(newVector)\n\t\t\t#newVector = str(format(time, \".5f\"))+\", \"+str(format(theta, \".5f\"))+\", \"+str(format(omega, \".5f\"))+\", \"+str(format(s, \".5f\"))+\", \"+str(format(v, \".5f\"))+\", \"+str(format(x, \".5f\"))+\", \"+str(format(y, \".5f\"))+\", \"+str(format(liftPos, \".5f\"))+\", \"+str(format(intakeSpeedL, \".5f\"))+\", \"+str(format(intakeSpeedR, \".5f\"))+\", \"+str(targetIdx)+\"\\n\"\n\t\titera += 1\n\n\t\tvectors.append(vector)\n\tlastDist += disTravelled \n\nfor i in range(len(newVectors)):\n\tvector = newVectors[i]\n\tlastI = i-1\n\tnextI = i+1\n\tif (lastI < 0):\n\t\tlastI = 0\n\tif (nextI >= len(newVectors)):\n\t\tnextI = len(newVectors)-1\n\tlastTheta = newVectors[lastI][1]\n\tnextTheta = newVectors[nextI][1]\n\tdeltaTime = newVectors[nextI][0]-newVectors[lastI][0]\n\tomega = (nextTheta-lastTheta)/deltaTime\n\tnewVector = str(format(vector[0], \".5f\"))+\", \"+str(format(vector[1], \".5f\"))+\", \"+str(format(omega, \".5f\"))+\", \"+str(format(vector[2], \".5f\"))+\", \"+str(format(vector[3], \".5f\"))+\", \"+str(format(vector[4], \".5f\"))+\", \"+str(format(vector[5], \".5f\"))+\", \"+str(format(vector[6], \".5f\"))+\", \"+str(format(vector[7], \".5f\"))+\", \"+str(format(vector[8], \".5f\"))+\"\\n\"\n\tfile.write(newVector)\n\tprint(newVector)\n\n'''effectiveVectors = []\ncutOffIdx = 0\ndeltaTime = 0.02\nfile = open(\"timeOptimal.csv\", \"w\")\nmaxOmega = -1\nfor i in range((len(vectors))):\n\tif (i%(deltaTime*1000) == 0):\n\t\tvector = vectors[i]\n\t\tdisp = vector[1]\n\t\ttargetIdx = cutOffIdx\n\t\tlowestErr = 10\n\t\tspeed = vector[2]\n\t\tfor n in range(cutOffIdx, int(cutOffIdx+20)): \n\t\t\twayPointDisp = path[n][4]\n\t\t\terror = abs(wayPointDisp-disp)\n\t\t\tif (error < lowestErr):\n\t\t\t\tlowestErr = error\n\t\t\t\ttargetIdx = n\n\t\tcutOffIdx = targetIdx\n\n\t\t#wayPoint = [fakeRuntime, theta, omega1, omega2, s, v, x, y, lift, lIntake, rIntake]\n\t\t#vector = [runtime, s, v]\n\t\twayPoint = path[targetIdx]\n\n\t\ttime = vector[0]\n\n\t\ttheta = wayPoint[1]\n\n\t\tlastIdx = targetIdx-1\n\t\tif (lastIdx < 0):\n\t\t\tlastIdx = 0\n\t\tnextIdx = targetIdx+1\n\t\tif (nextIdx >= len(path)):\n\t\t\tnextIdx = len(path)-1\n\t\tomega = (path[nextIdx][1]-path[lastIdx][1])/(deltaTime*2)\n\t\tif (maxOmega < omega):\n\t\t\tmaxOmega = omega\n\n\t\ts = vector[1]\n\n\t\tv = vector[2]\n\n\t\tx = wayPoint[6]\n\n\t\ty = wayPoint[7]\n\n\t\tliftPos = wayPoint[8]\n\n\t\tintakeSpeedL = wayPoint[9]\n\t\tintakeSpeedR = wayPoint[10]\n\n\t\tvector = str(format(time, \".5f\"))+\", \"+str(format(theta, \".5f\"))+\", \"+str(format(omega, \".5f\"))+\", \"+str(format(s, \".5f\"))+\", \"+str(format(v, \".5f\"))+\", \"+str(format(x, \".5f\"))+\", \"+str(format(y, \".5f\"))+\", \"+str(format(liftPos, \".5f\"))+\", \"+str(format(intakeSpeedL, \".5f\"))+\", \"+str(format(intakeSpeedR, \".5f\"))+\", \"+str(targetIdx)+\"\\n\"\n\n\t\t#effectiveVectors.append(vector)\n\t\tfile.write(vector)\n\t\tprint(vector)\nprint(maxOmega)'''\n"
},
{
"alpha_fraction": 0.8236514329910278,
"alphanum_fraction": 0.8236514329910278,
"avg_line_length": 21.952381134033203,
"blob_id": "47c16c4911afe895a7f171871292b62413c581c0",
"content_id": "cb84d62be68248e5d242f9e87a9ce1fd78890c6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 21,
"path": "/To_do.md",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "Priority from high to low.\n\nWhen you finish one, delete that entry.\n\nAutomate cube suck\nAutomate lift height (low, switch, scale)\nAutomate lift and spit\nLift calibration button\nLift PID tuning\nDrive get acceleration\nAuto speed constant tuning\nAuto speed err PID tuning\nAuto displacement err PID tuning\nAuto omega constant tuning\nAuto omega error PID tuning\nAuto angle err PID tuning\nAuto update coordinate\nClean up auto code\nRecord coordinate\nPlay back coordinate\nFollow coordinate\n"
},
{
"alpha_fraction": 0.676222026348114,
"alphanum_fraction": 0.6958656907081604,
"avg_line_length": 32.806949615478516,
"blob_id": "b9420a81007ec93365ec52bfef3d98f373a563d2",
"content_id": "c92b1b62d260e401e5393e36c9857099d28bc783",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 8756,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 259,
"path": "/src/org/usfirst/frc/team5171/robot/Robot.java",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "package org.usfirst.frc.team5171.robot;\n\nimport edu.wpi.first.wpilibj.IterativeRobot;\nimport edu.wpi.first.wpilibj.smartdashboard.SendableChooser;\nimport edu.wpi.first.wpilibj.smartdashboard.SmartDashboard;\nimport edu.wpi.first.wpilibj.DriverStation;\nimport edu.wpi.first.wpilibj.DriverStation.Alliance;\nimport edu.wpi.first.wpilibj.PWM;\nimport edu.wpi.first.wpilibj.command.Scheduler;\n\nimport static org.usfirst.frc.team5171.robot.Macro.*;\n\nimport org.usfirst.frc.team5171.robot.commands.*;\nimport org.usfirst.frc.team5171.robot.subsystems.*;\n\n/**\n * The VM is configured to automatically run this class, and to call the\n * functions corresponding to each mode, as described in the IterativeRobot\n * documentation. If you change the name of this class or the package after\n * creating this project, you must also update the manifest file in the resource\n * directory.\n */\npublic class Robot extends IterativeRobot {\n\tpublic static OI oi;\n\t\n\tPWM pwm = new PWM(0);\n\tAutoMode[] modes = new AutoMode[5];\n\tAutoMode autoMode;\n\t\n\tSendableChooser<String> priorityChooser = new SendableChooser<String>();\n\tSendableChooser<String> positionChooser = new SendableChooser<String>();\n\n\tint axisList[] = { LEFT_X, THROTTLE, LEFT_UP, RIGHT_UP, TURN, RIGHT_Y }; //the list of axis you are interested to track\n\tint buttonList[] = { A, B, X, Y, LB, RB }; //the list of buttons you are interested to track\n\tController driveStick = new Controller(0, axisList, buttonList, 1, 18, 200, 1.6); //driving xbox controller with port 0, 1% deadband and 18% cutoff running at 200Hz. The return value is x^1.6\n\tController controlStick = new Controller(1, axisList, buttonList, 1, 20, 200, 1.6); //secondary xbox controller with port 0, 1% deadband and 20% cutoff running at 200Hz. The return value is x^1.6\n\n\tint leftMotors[] = { 1, 3 };\n\tint rightMotors[] = { 2, 4 }; //motor CAN IDs\n\tDrive drive = new Drive(leftMotors, rightMotors, 200); //200Hz\n\n\tCubeLifter lift = new CubeLifter(6, 9, 200); //lift with motor 6, limit switch 9 at 200Hz\n\n\tint[] intakeMotors = { 7, 8 };\n\tIntake intake = new Intake(intakeMotors, 200);\n\t\n\tClimber climber = new Climber(5);\n\n\tStreamingServer stream = new StreamingServer(); //camera streaming\n\t\n\tRecord recorder;\n\tThread recordingThread = new Thread();\n\n\t/**\n\t * This function is run when the robot is first started up and should be used\n\t * for any initialization code.\n\t */\n\t@Override\n\tpublic void robotInit() {\n\t\toi = new OI();\n\t\tpriorityChooser.addDefault(\"Switch\", switchFirst);\n\t\tpriorityChooser.addObject(\"Scale\", scaleFirst);\n\t\t\n\t\tpositionChooser.addDefault(\"Left Start\", leftStart);\n\t\tpositionChooser.addObject(\"Middle Start\", middleStart);\n\t\tpositionChooser.addObject(\"Right Start\", rightStart);\n\n\t\tSmartDashboard.putData(\"Priority Chooser\", priorityChooser);\n\t\tSmartDashboard.putData(\"Position Chooser\", positionChooser);\n\t\t\n\t\tmodes[0] = new AutoSwitchFromLeft(drive, lift, intake, 100);\n\t\tmodes[1] = new AutoSwitchFromMiddle(drive, lift, intake, 100);\n\t\tmodes[2] = new AutoSwitchFromRight(drive, lift, intake, 100);\n\t\tmodes[3] = new AutoScaleFromLeft(drive, lift, intake, 100);\n\t\tmodes[4] = new AutoScaleFromRight(drive, lift, intake, 100); //initialize all 5 auto modes here\n\n\t\tintake.start();\n\t\tlift.start();\n\t\tstream.start(); //start intake, lift, and streaming service\n\t}\n\n\t@Override\n\tpublic void disabledInit() {\n\t\tif (!modes[0].isFresh()) {\n\t\t\tmodes[0] = new AutoSwitchFromLeft(drive, lift, intake, 100);\n\t\t}\n\t\tif (!modes[1].isFresh()) {\n\t\t\tmodes[1] = new AutoSwitchFromMiddle(drive, lift, intake, 100);\n\t\t}\n\t\tif (!modes[2].isFresh()) {\n\t\t\tmodes[2] = new AutoSwitchFromRight(drive, lift, intake, 100);\n\t\t}\n\t\tif (!modes[3].isFresh()) {\n\t\t\tmodes[3] = new AutoScaleFromLeft(drive, lift, intake, 100);\n\t\t}\n\t\tif (!modes[4].isFresh()) {\n\t\t\tmodes[4] = new AutoScaleFromRight(drive, lift, intake, 100);\n\t\t}\n\t}\n\n\t@Override\n\tpublic void disabledPeriodic() {\n\t}\n\n\t@Override\n\tpublic void autonomousInit() {\n\t\tString priority = priorityChooser.getSelected();\n\t\tString position = positionChooser.getSelected();\n\t\t\n\t\tSystem.out.println(position);\n\t\tif (priority == switchFirst) {\n\t\t\tif (position == leftStart) {\n\t\t\t\tautoMode = modes[0];\n\t\t\t\tSystem.out.println(\"mode0\");\n\t\t\t} else if (position == middleStart) {\n\t\t\t\tautoMode = modes[1];\n\t\t\t\tSystem.out.println(\"mode1\");\n\t\t\t} else if (position == rightStart) {\n\t\t\t\tautoMode = modes[2];\n\t\t\t\tSystem.out.println(\"mode2\");\n\t\t\t}\n\t\t} else if (priority == scaleFirst) {\n\t\t\tif (position == leftStart) {\n\t\t\t\tautoMode = modes[3];\n\t\t\t\tSystem.out.println(\"mode3\");\n\t\t\t} else if (position == rightStart) {\n\t\t\t\tautoMode = modes[4];\n\t\t\t\tSystem.out.println(\"mode4\");\n\t\t\t}\n\t\t} //get the desired auto mode\n\n\t\tDriverStation station = DriverStation.getInstance();\n\t\tString platePlacement = station.getGameSpecificMessage();\n\t\t//platePlacement = SmartDashboard.getString(SDkD, \"\");\n\n\t\tint[] platePos = { 0, 0 };\n\t\tif (platePlacement.length() > 0) {\n\t\t\tfor (int i = 0; i < 2; i++) {\n\t\t\t\tif (platePlacement.charAt(i) == 'L') {\n\t\t\t\t\tplatePos[i] = -1;\n\t\t\t\t} else if (platePlacement.charAt(i) == 'R') {\n\t\t\t\t\tplatePos[i] = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t} //get the position of plates. {1, -1} means the switch on the right but the scale is on the left\n\t\t\n\t\tSystem.out.println(platePos[0] +\" \"+ platePos[1]);\n\t\tif (autoMode != null && autoMode.isFresh()) {\n\t\t\tautoMode.initialize(platePos);\n\t\t\tautoMode.execute();\n\t\t\tautoMode.isFinished();\n\t\t}\n\t}\n\n\t/**\n\t * This function is called periodically during autonomous\n\t */\n\t@Override\n\tpublic void autonomousPeriodic() {\n\t}\n\n\t@Override\n\tpublic void teleopInit() {\n\t\tif (autoMode != null) {\n\t\t\tautoMode.isFinished();\n\t\t}\n\t\t\n\t\tif (!driveStick.isAlive()) {\n\t\t\tdriveStick.start();\n\t\t}\n\t\tif (!controlStick.isAlive()) {\n\t\t\tcontrolStick.start();\n\t\t}\n\t\tif (!drive.isAlive()) {\n\t\t\tdrive.start();\n\t\t}\n\t\tdrive.zeroSensor();\n\t}\n\n\t/**\n\t * This function is called periodically during operator control\n\t */\n\t@Override\n\tpublic void teleopPeriodic() {\n\t\tif (driveTestMode) { // Set PID constants in test mode\n\t\t\tdouble kP = Double.parseDouble(SmartDashboard.getString(SDkP, \"\"));\n\t\t\tdouble kI = Double.parseDouble(SmartDashboard.getString(SDkI, \"\"));\n\t\t\tdouble kD = Double.parseDouble(SmartDashboard.getString(SDkD, \"\"));\n\t\t\tdrive.setPIDConstants(kP, kI, kD);\n\t\t}\n\t\tif (liftTestMode) {\n\t\t\tdouble kP = Double.parseDouble(SmartDashboard.getString(SDkP, \"\"));\n\t\t\tdouble kI = Double.parseDouble(SmartDashboard.getString(SDkI, \"\"));\n\t\t\tdouble kD = Double.parseDouble(SmartDashboard.getString(SDkD, \"\"));\n\t\t\tlift.setPIDConstants(kP, kI, kD);\n\t\t}\n\t\t\n\t\tif(!recordingThread.isAlive() && SmartDashboard.getBoolean(\"DB/Button 0\", false) && driveStick.getButton(A)==true && drive.getCurSpeed()>0) {\n\t\t\tString file = SmartDashboard.getString(SDkP, \"\"); //recording file name\n\t\t\trecorder = new Record(file, drive, lift, intake, 60); //initialize recorder with drive, lift, and intake infomation at 60Hz\n\t\t\trecordingThread = new Thread(recorder);\n\t\t\trecordingThread.start(); //start recording if the smartdashboard button 0 is pressed\n\t\t} else if (recordingThread.isAlive()) { \n\t\t\tdrive.setRecordingStat(true);\n\t\t} else {\n\t\t\tdrive.setRecordingStat(false);\n\t\t}\n\t\t\n\t\tlift.updateDisplacement(controlStick.getAxis(LEFT_UP) - controlStick.getAxis(RIGHT_UP));\n\t\t//lifter.updateSpeed(controlStick.get(LEFT_UP)-controlStick.get(RIGHT_UP));\n\t\tif (controlStick.getButton(INTAKE_POS_BUTTON)) {\n\t\t\tlift.updatePosition(liftHome);\n\t\t} else if (controlStick.getButton(SWITCH_POS_BUTTON)) {\n\t\t\tlift.updatePosition(liftSwitchHeight);\n\t\t} else if (controlStick.getButton(SCALE_POS_BUTTON)) {\n\t\t\tlift.updatePosition(liftMaxHeight);\n\t\t} else if (controlStick.getButton(LIFT_RECENTER)) {\n\t\t\tlift.liftRecenter();\n\t\t}\n\t\t\n\t\tif (lift.protectionMode()) {\n\t\t\tdrive.restrictedAcc();\n\t\t\tdrive.normalAcc();\n\t\t}\n drive.setLiftHeight(lift.getCurPos());\n\t\tif (driveStick.getButton(LB) && driveStick.getButton(RB)) {\n\t\t\tdrive.updateVelocity(driveStick.getAxis(THROTTLE), driveStick.getAxis(TURN));\n\t\t} else {\n\t\t\tdrive.updateVelocity(-driveStick.getAxis(THROTTLE), driveStick.getAxis(TURN));\n\t\t}\n\t\t\n\t\tdouble[] intakeSpeed = { -driveStick.getAxis(LEFT_UP) - controlStick.getAxis(LEFT_X),\n\t\t\t\tdriveStick.getAxis(RIGHT_UP) - controlStick.getAxis(TURN) };\n\t\tintake.updateSpeed(intakeSpeed);\n\t\tif (intakeSpeed[0]-intakeSpeed[1] > 0.1 ) {\n\t\t\tpwm.setSpeed(-0.3);\n\t\t} else if(intakeSpeed[0]-intakeSpeed[1] < -0.1) {\n\t\t\tpwm.setSpeed(-0.6);\n\t\t} else {\n//\t\t\tAlliance color = DriverStation.getInstance().getAlliance();\n//\t\t\tif(color == DriverStation.Alliance.Blue){\n//\t\t\t\tpwm.setSpeed(0.3);\n//\t\t\t} else {\n//\t\t\t\tpwm.setSpeed(0.6);\n//\t\t\t}\n\t\t\tpwm.setSpeed(0.0);\n\t\t}\n\t\tSystem.out.println(pwm.getRaw());\n\t\t\n\t\tclimber.updateSpeed(controlStick.getAxis(THROTTLE));\n\t}\n\n\t/**\n\t * This function is called periodically during test mode\n\t */\n\t@Override\n\tpublic void testPeriodic() {\n\t}\n}\n"
},
{
"alpha_fraction": 0.516869068145752,
"alphanum_fraction": 0.5661268830299377,
"avg_line_length": 35.14634323120117,
"blob_id": "ad07c62bc5b1664a046543a25a2902167bf96e76",
"content_id": "845a1b79046e50159501120bdeaf8a4b42ab383b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1482,
"license_type": "no_license",
"max_line_length": 382,
"num_lines": 41,
"path": "/MotionMagic/dataCleanUp.py",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport math\n\nfile = open(\"test.csv\", \"r\")\ncontents = file.readlines()\n\npath = []\nfile = open(\"cleanUp.csv\", \"w\")\nusefulData = False\ntimeBias = 0\nx = 0\ny = 0\nlastTime = 0\nlastPos = 0\nfor line in contents:\n\tline = line.split(\",\")\n\tline = list(map(float, line))\n\tif (usefulData == False):\n\t\tif (line[5]!=0):\n\t\t\tusefulData = True\n\t\t\ttimeBias = line[0]\n\t\telse:\n\t\t\tcontinue\n\n\t'''theta = line[1]*3.1415926/180\n\tv = line[5]\n\tdeltaTime = line[0]-lastTime\n\tlastTime = line[0]\n\tdc = deltaTime*v\n\tdx = dc*math.sin(theta)\n\tdy = dc*math.cos(theta)\n\tx+=dx\n\ty+=dy'''\n\tcurPos = line[4]\n\tif ((curPos-lastPos<0 and line[5]>0) or (curPos-lastPos>0 and line[5]<0)):\n\t\tcontinue\n\tlastPos = curPos\n\tvector = str(format(line[0]-timeBias, \".5f\"))+\", \"+str(format(line[1], \".5f\"))+\", \"+str(format(line[2], \".5f\"))+\", \"+str(format(line[3], \".5f\"))+\", \"+str(format(line[4], \".5f\"))+\", \"+str(format(line[5], \".5f\"))+\", \"+str(format(line[6], \".5f\"))+\", \"+str(format(line[7], \".5f\"))+\", \"+str(format(line[8], \".5f\"))+\", \"+str(format(line[9], \".5f\"))+\", \"+str(format(line[10], \".5f\"))+\"\\n\"\n\t#vector = str(format(line[0]-timeBias, \".5f\"))+\", \"+str(format(line[1], \".5f\"))+\", \"+str(format(line[2], \".5f\"))+\", \"+str(format(line[3], \".5f\"))+\", \"+str(format(line[4], \".5f\"))+\", \"+str(format(line[5], \".5f\"))+\", \"+str(format(x, \".5f\"))+\", \"+str(format(y, \".5f\"))+\", \"+str(format(line[6], \".5f\"))+\", \"+str(format(line[7], \".5f\"))+\", \"+str(format(line[8], \".5f\"))+\"\\n\"\n\tfile.write(vector)\n\tprint(vector)\n"
},
{
"alpha_fraction": 0.6169902682304382,
"alphanum_fraction": 0.6563106775283813,
"avg_line_length": 24.407407760620117,
"blob_id": "d6433e9b27ca4362afe3bc5d6cbf40034c3eebd4",
"content_id": "00e85311ba2e1f9efb1af76c3688a3e60efe6621",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2060,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 81,
"path": "/MotionMagic/plot_symmetry.py",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport math\n\nplt.axis([-5, 5, 0, 10])\nplt.ion()\n\ndef draw(fileName, idx, correctionEnabled, loss):\n\tfile = open(fileName+\".csv\", \"r\")\n\tcontents = file.readlines()\n\n\tx = 0\n\ty = 0\n\tvectors = []\n\tfor vector in contents:\n\t\tvector = vector.split(\",\")\n\t\tvector = list(map(float, vector))\n\t\tvectors.append(np.array(vector))\n\n\t#vector = [runtime, theta, omega1, omega2, s, v, x, y, lift, lIntake, rIntake]\n\tvectors = np.array(vectors)\n\tendTime = vectors[len(vectors)-1][0]\n\tstartTime = time.time()\n\tlastIdx = 0\n\tlastTime = 0\n\tmaxCorrection = 3\n\twhile(lastIdx < len(vectors)):\n\t\ttime.sleep(0.001)\n\t\trunTime = time.time()-startTime\n\t\t#print(runTime)\n\t\tif(runTime >= vectors[lastIdx][0]):\n\t\t\tpltSrt = time.time()\n\t\t\tvector = vectors[lastIdx]\n\t\t\tlastIdx += loss\n\n\t\t\tdesX = vector[idx+1]\n\t\t\tdesY = vector[idx+2]\n\t\t\tangleCorrection = 180/3.1415926*math.atan2(desX-x, desY-y)-vector[1];\n\t\t\thypoCorrection = math.hypot(desX-x, desY-y);\n\t\t\tangleCorrection *= math.pow(hypoCorrection, 1);\n\t\t\t#print(desX, x, desY, y, angleCorrection)\n\t\t\tif (angleCorrection > maxCorrection):\n\t\t\t\tangleCorrection = maxCorrection\n\t\t\telif (angleCorrection < -maxCorrection):\n\t\t\t\tangleCorrection = -maxCorrection\n\t\t\tif (vector[idx] < 0):\n\t\t\t\tangleCorrection *= -1\n\n\t\t\tif (correctionEnabled):\n\t\t\t\ttheta = (vector[1]+angleCorrection)*3.1415926/180\n\t\t\telse:\n\t\t\t\ttheta = (vector[1])*3.1415926/180\n\t\t\tv = vector[idx]\n\t\t\tdeltaTime = vector[0]-lastTime\n\t\t\tlastTime = vector[0]\n\t\t\tdc = v*deltaTime\n\t\t\tdx = dc*math.sin(theta)\n\t\t\tdy = dc*math.cos(theta)\n\t\t\tx += dx\n\t\t\ty += dy\n\n\t\t\tplt.scatter(x, y, s=1)\n\t\t\tprint(x, y)\n\t\t\tplt.pause(0.001)\n\n#draw(\"cleanUp\", 5, True, 1)\n#draw(\"timeOptimal\", 4, False)\n#draw(\"timeOptimal\", 4, False, 1)\nfileName = input(\"fileName: \")\ndraw(fileName, 4, True, 3)\n\nfileNameList = list(fileName)\nfor i in range(len(fileNameList)):\n\tif (fileNameList[i] == \"R\"):\n\t\tfileNameList[i] = \"L\"\n\telif (fileNameList[i] == \"L\"):\n\t\tfileNameList[i] = \"R\"\nfileName = \"\".join(fileNameList)\ndraw(fileName, 4, True, 3)\ntime.sleep(3600)\n\n\n"
},
{
"alpha_fraction": 0.7105262875556946,
"alphanum_fraction": 0.7105262875556946,
"avg_line_length": 18,
"blob_id": "e51bf1ade81a8c21df2dcca469e54b0b70eeb0c0",
"content_id": "5b03c9b91ae1d871487f3ad232c3ea5b531b73ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 6,
"path": "/git.sh",
"repo_name": "yorkrobotics/York_FRC_2018",
"src_encoding": "UTF-8",
"text": "read -p \"Commit note: \" note\necho processing\necho $note\ngit add .\ngit commit -m \"$note\"\ngit push -u origin master\n"
}
] | 7 |
Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.- | https://github.com/Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.- | 8ca5065e1997ed88e259bf0ff7cf84ff71505a0b | d2e60fe27377182317ced5731a2e0c8f2b10e69b | e6eac3970a51cc3f4c0f2dcab9ffb86b8d04222b | refs/heads/main | 2023-07-06T21:31:11.253700 | 2021-12-22T20:41:32 | 2021-12-22T20:41:32 | 393,032,601 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5463311672210693,
"alphanum_fraction": 0.5496372580528259,
"avg_line_length": 41.5234375,
"blob_id": "2301d60bab830997b02c7be8b510b1d6f31d0c7a",
"content_id": "6384dad473919ed14ab4f9cfb22dd4b4ea152955",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10889,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 256,
"path": "/Website Files Team 18/website/forms.py",
"repo_name": "Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.-",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed\nfrom flask_login import current_user\nfrom wtforms import (StringField, PasswordField, SubmitField, BooleanField,\n TextAreaField, SelectField, IntegerField,DateTimeField,DateField)\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError, Optional\nfrom website.models import Register,Admin,Patient,Doctor,Operation\n\n\n\n\n# forms.py\nclass RegistrationForm(FlaskForm):\n username = StringField('Username',\n validators=[DataRequired(), Length(min=2, max=20)])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password',\n validators=[DataRequired(), EqualTo('password')])\n registeredas= SelectField('Register as:',\n validators=[DataRequired()],\n choices = [('Admin', 'admin'), ('Doctor', 'doctor'),('Patient', 'patient')])\n submit = SubmitField('Sign Up')\n\n #check that email and username were not taken before \n def validate_username(self, username):\n user = Register.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username is taken. Please choose a different one.')\n\n def validate_email(self, email):\n user = Register.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email is taken. Please choose a different one.')\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember = BooleanField('Remember Me')\n submit = SubmitField('Login')\n\n\nclass ContactUsForm(FlaskForm):\n name = StringField('Name',\n validators=[DataRequired(), Length(min=2, max=20)])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n phone = IntegerField('Phone', validators=[DataRequired()])\n message = TextAreaField('Message', validators=[DataRequired()])\n submit = SubmitField('Send')\n\n\nclass AddDoctorForm (FlaskForm):\n id = IntegerField(\"ID\",\n validators = [DataRequired()]) \n ssn = IntegerField(\"SSN\",\n validators = [DataRequired()])\n name = StringField(\"Name\", \n validators = [DataRequired() , Length (min = 2 , max = 50 )])\n sex= SelectField('Sex',\n validators=[DataRequired()],\n choices = [('male', 'Male'), ('female', 'Female')])\n address = StringField (\"Address\",\n validators = [DataRequired(), Length(min = 2 , max = 50)])\n age = IntegerField(\"Age\",\n validators = [DataRequired()])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n phone = IntegerField(\"Phone\",\n validators = [DataRequired()]) \n sub_department= SelectField('Sub-Department',\n validators=[DataRequired()],\n choices = [('Cardiovascular_Surgery','Cardiovascular Surgery'),\n ('Neurosurgery','Neurosurgery'),\n ('Pediatric_Surgery','Pediatric Surgery'),\n ('Bone_Surgery','Bone Surgery')])\n submit = SubmitField(\"Add Doctor\") \n\n def validate_id (self, id):\n id = Doctor.query.filter_by(id=id.data).first()\n if id :\n raise ValidationError(\"This ID is already taken\")\n\n def validate_ssn (self, ssn):\n ssn = Doctor.query.filter_by(ssn = ssn.data).first()\n if ssn :\n raise ValidationError(\"This SSN is already exist\")\n \n def validate_email (self, email) :\n email = Doctor.query.filter_by(email = email.data).first()\n if email :\n raise ValidationError(\"This email is taken. Please choose a different one.\")\n\n\n# class UpdateDoctorForm(FlaskForm,idd):\n# id = IntegerField(\"ID\",\n# validators = [DataRequired()]) \n# ssn = IntegerField(\"SSN\",\n# validators = [DataRequired()])\n# name = StringField(\"Name\", \n# validators = [DataRequired() , Length (min = 2 , max = 50 )])\n# sex= SelectField('Sex',\n# validators=[DataRequired()],\n# choices = [('male', 'Male'), ('female', 'Female')])\n# address = StringField (\"Address\",\n# validators = [DataRequired(), Length(min = 2 , max = 50)])\n# age = IntegerField(\"Age\",\n# validators = [DataRequired()])\n# email = StringField('Email',\n# validators=[DataRequired(), Email()])\n# phone = IntegerField(\"Phone\",\n# validators = [DataRequired()]) \n# sub_department= SelectField('Sub-Department',\n# validators=[DataRequired()],\n# choices = [('Cardiovascular_Surgery','Cardiovascular Surgery'),\n# ('Neurosurgery','Neurosurgery'),\n# ('Pediatric_Surgery','Pediatric Surgery'),\n# ('Bone_Surgery','Bone Surgery')])\n# submit = SubmitField(\"Update Doctor\") \n\n# doctor = Doctor.query.get(id=idd) \n\n# def validate_username(self, id):\n# if id.data != doctor.id:\n# user = doctor.query.filter_by(id=id.data).first()\n# if user:\n# raise ValidationError('That ID is taken. Please choose a different one.')\n\n# def validate_username(self, ssn):\n# if ssn.data != doctor.ssn:\n# user = doctor.query.filter_by(ssn=ssn.data).first()\n# if user:\n# raise ValidationError('That SSN is taken. Please choose a different one.')\n\n# def validate_email(self, email):\n# if email.data != doctor.email:\n# user = doctor.query.filter_by(email=email.data).first()\n# if user:\n# raise ValidationError('That Email is taken. Please choose a different one.') \n\n\n\nclass AddPatientForm (FlaskForm):\n id = IntegerField(\"ID\",\n validators = [DataRequired()]) \n ssn = IntegerField(\"SSN\",\n validators = [DataRequired()])\n name = StringField(\"Name\", \n validators = [DataRequired() , Length (min = 2 , max = 50 )])\n sex= SelectField('Sex',\n validators=[DataRequired()],\n choices = [('male', 'Male'), ('female', 'Female')])\n address = StringField (\"Address\",\n validators = [DataRequired(), Length(min = 2 , max = 50)])\n age = IntegerField(\"Age\",\n validators = [DataRequired()])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n phone = IntegerField(\"Phone\",\n validators = [DataRequired()]) \n\n medical_history = StringField (\"Medical History\",\n validators = [DataRequired(), Length(min = 2 , max = 50)])\n\n image_file = FileField('Select Patient Scan ', validators=[FileAllowed(['jpg', 'png']), Optional()]) \n\n submit = SubmitField(\"Add Patient\") \n\n def validate_id (self, id):\n id = Patient.query.filter_by(id=id.data).first()\n if id :\n raise ValidationError(\"This ID is already taken\")\n\n def validate_ssn (self, ssn):\n ssn = Patient.query.filter_by(ssn = ssn.data).first()\n if ssn :\n raise ValidationError(\"This SSN is already exist\")\n \n def validate_email (self, email) :\n email = Patient.query.filter_by(email = email.data).first()\n if email :\n raise ValidationError(\"This email is taken. Please choose a different one.\")\n\n\nclass UploadPatientScanForm (FlaskForm):\n\n image_file = FileField('Select Patient Scan ', validators=[FileAllowed(['jpg', 'png'])]) \n submit = SubmitField(\"Upload Patient Scan\") \n\n\n\n\nclass AddAdminForm (FlaskForm):\n id = IntegerField(\"ID\",\n validators = [DataRequired()]) \n ssn = IntegerField(\"SSN\",\n validators = [DataRequired()])\n name = StringField(\"Name\", \n validators = [DataRequired() , Length (min = 2 , max = 50 )])\n sex= SelectField('Sex',\n validators=[DataRequired()],\n choices = [('male', 'Male'), ('female', 'Female')])\n address = StringField (\"Address\",\n validators = [DataRequired(), Length(min = 2 , max = 50)])\n age = IntegerField(\"Age\",\n validators = [DataRequired()])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n phone = IntegerField(\"Phone\",\n validators = [DataRequired()]) \n\n submit = SubmitField(\"Add Admin\") \n\n def validate_id (self, id):\n id = Admin.query.filter_by(id=id.data).first()\n if id :\n raise ValidationError(\"This ID is already taken\")\n\n def validate_ssn (self, ssn):\n ssn = Admin.query.filter_by(ssn = ssn.data).first()\n if ssn :\n raise ValidationError(\"This SSN is already exist\")\n \n def validate_email (self, email) :\n email = Admin.query.filter_by(email = email.data).first()\n if email :\n raise ValidationError(\"This email is taken. Please choose a different one.\")\n\n\n\n\nclass AddOperationForm (FlaskForm):\n code = IntegerField(\"Code\",\n validators = [DataRequired()]) \n location = StringField(\"Location\", \n validators = [DataRequired() , Length (min = 2 , max = 50 )])\n date = DateField(\"Date\",format='%Y-%m-%d',validators = [Optional()])\n \n start_time = DateTimeField(\"Start Time\",format='%H-%M-%S',validators = [Optional()])\n \n end_time = DateTimeField(\"End Time\",format='%H-%M-%S',validators = [Optional()])\n\n patient_id = IntegerField(\"Patient ID\", \n validators = [DataRequired()])\n doctor_id = IntegerField(\"Doctor ID\", \n validators = [DataRequired()])\n\n submit = SubmitField(\"Add Operation\") \n\n def validate_code (self, code):\n code = Operation.query.filter_by(code=code.data).first()\n if code :\n raise ValidationError(\"This Code is already taken for another operation\")\n\n\n\n"
},
{
"alpha_fraction": 0.568561851978302,
"alphanum_fraction": 0.8143812417984009,
"avg_line_length": 84.42857360839844,
"blob_id": "36230a9c0bd1e64acf736ea5dba3db5da40b346a",
"content_id": "e9017032d327cef9ea9ce06f1ff8283a4cb0dd71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 598,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 7,
"path": "/README.md",
"repo_name": "Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.-",
"src_encoding": "UTF-8",
"text": "# Radiology-Information-System-RIS-for-Surgery-Dept.-\n\n# For the website tour please consider watching the following [video](team18_website_tour.mp4)\n\n\n\n\n"
},
{
"alpha_fraction": 0.6388283967971802,
"alphanum_fraction": 0.6388283967971802,
"avg_line_length": 36.635536193847656,
"blob_id": "fb414655b5992cf7f3bd49ffb12d1d94d27c9dcb",
"content_id": "254715a7c7996a8d4ecdb0f512488451c0d1373b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16524,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 439,
"path": "/Website Files Team 18/website/routes.py",
"repo_name": "Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.-",
"src_encoding": "UTF-8",
"text": "import os\n# import secrets\nfrom flask import render_template, url_for, flash, redirect, request, abort\nfrom website import app, db\nfrom website.forms import (RegistrationForm, LoginForm, ContactUsForm,\n AddDoctorForm, AddPatientForm, UploadPatientScanForm,\n AddAdminForm, AddOperationForm)\n\nfrom website.models import Register, ContactUs, Admin, Patient, Doctor, Operation\nfrom flask_login import login_user, current_user, logout_user, login_required\n\n\n# routes.py\[email protected](\"/\")\[email protected](\"/home\")\ndef home():\n return render_template('home.html', title='Home')\n\n\[email protected](\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\[email protected](\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = Register(username=form.username.data, email=form.email.data,\n password=form.password.data, registered_as=form.registeredas.data)\n db.session.add(user)\n db.session.commit()\n flash(\n f'Account has been created for {form.username.data} as an {form.registeredas.data} You are now able to log in!', 'success')\n return redirect(url_for('home')) # login\n\n return render_template('register.html', title='Register', form=form)\n\n\[email protected](\"/contact-us\", methods=['GET', 'POST'])\ndef contact_us():\n form = ContactUsForm()\n if form.validate_on_submit():\n message = ContactUs(name=form.name.data, email=form.email.data,\n phone=form.phone.data, message=form.message.data)\n db.session.add(message)\n db.session.commit()\n flash('Thank you for contacting with us your request/message has been reported', 'success')\n return redirect(url_for('home'))\n return render_template('contact_us.html', title='Contact Us', form=form)\n\n\n\n\n# admin login\[email protected](\"/login-admin\", methods=['GET', 'POST'])\ndef login_admin():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Register.query.filter_by(email=form.email.data).first()\n if user and user.password == form.password.data:\n if user.registered_as == 'Admin':\n wanted_user = Admin.query.filter_by(email=user.email).first()\n\n login_user(wanted_user, remember=form.remember.data)\n flash(\n f'Successful Login for {user.username} as an {user.registered_as}', 'success')\n\n return redirect(url_for('admin'))\n\n\n else:\n flash(\n 'This user is not registered as an Admin please choose your correct login path', 'danger')\n return redirect(url_for('home'))\n\n else:\n flash('Unsuccessful Login. Please check email and password', 'danger')\n return render_template('login_all.html', title='Login Admin', form=form, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin\")\ndef admin():\n return render_template('admin.html', title='Admin', USER='ADMIN')\n\n\n# doctor login\[email protected](\"/login-doctor\", methods=['GET', 'POST'])\ndef login_doctor():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Register.query.filter_by(email=form.email.data).first()\n if user and user.password == form.password.data:\n if user.registered_as == 'Doctor':\n wanted_user = Doctor.query.filter_by(email=user.email).first()\n\n\n login_user(wanted_user, remember=form.remember.data)\n flash(\n f'Successful Login for {user.username} as a {user.registered_as}', 'success')\n\n return render_template( 'doctor_account.html', doctor = wanted_user, title='Login Doctor', form=form, USER='DOCTOR')\n\n else:\n flash(\n 'This user is not registered as an Doctor please choose your correct login path', 'danger')\n return redirect(url_for('home'))\n\n else:\n flash('Unsuccessful Login. Please check email and password', 'danger')\n return render_template('login_all.html', title='Login Doctor', form=form, USER='DOCTOR')\n\n\n@login_required\[email protected](\"/doctor-account\")\ndef doctor_account(doctor):\n return render_template('doctor_account.html', doctor=doctor, title='Doctor Account', USER='DOCTOR')\n\n\n# patient login\[email protected](\"/login-patient\", methods=['GET', 'POST'])\ndef login_patient():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Register.query.filter_by(email=form.email.data).first()\n if user and user.password == form.password.data:\n if user.registered_as == 'Patient':\n wanted_user = Patient.query.filter_by(email=user.email).first()\n\n login_user(wanted_user, remember=form.remember.data)\n flash(f'Successful Login for {user.username} as a {user.registered_as}', 'success')\n\n\n return render_template( 'patient_account.html', patient = wanted_user, title='Login Patient', form=form, USER='PATIENT')\n\n else:\n flash(\n 'This user is not registered as an Patient please choose your correct login path', 'danger')\n return redirect(url_for('home'))\n\n else:\n flash('Unsuccessful Login. Please check email and password', 'danger')\n return render_template('login_all.html', title='Login Patient', form=form, USER='PATIENT')\n\n\n@login_required\[email protected](\"/patient-account\", methods=['GET', 'POST'])\ndef patient_account(patient):\n return render_template('patient_account.html', patient = patient, title='Patient Account', USER='PATIENT')\n\n\[email protected](\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n\n# admin main page\n# admin deals with doctor records\n@login_required\[email protected](\"/admin/veiw-doctors\")\ndef admin_view_doctors():\n all_doctors = Doctor.query.all()\n return render_template(\"admin_view_doctors.html\", all_doctors=all_doctors, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/add-new-doctor\", methods=['GET', 'POST'])\ndef admin_add_new_doctor():\n form = AddDoctorForm()\n if form.validate_on_submit():\n doctor = Doctor(id=form.id.data, ssn=form.ssn.data, name=form.name.data,\n sex=form.sex.data, address=form.address.data, age=form.age.data,\n email=form.email.data, phone=form.phone.data,\n sub_department=form.sub_department.data)\n db.session.add(doctor)\n db.session.commit()\n flash(\n f'{doctor.name} record has been added to doctors table in the database!', 'success')\n return redirect(url_for('admin_view_doctors'))\n return render_template(\"admin_add_new_doctor.html\", form=form,\n legend=\"Add New Doctor\", title=\"Add New Doctor\", USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/veiw-doctor-operations/<int:id>\")\ndef admin_view_doctor_operations(id):\n doctor = Doctor.query.filter_by(id=id).first()\n return render_template(\"admin_view_doctor_operations.html\", title='Veiw Doctor Operations',\n doctor=doctor, USER='ADMIN')\n\n\n# @login_required\n# @app.route('admin/update-doctor', methods = ['GET', 'POST'])\n# def admin_update_doctor(idd):\n# form = UpdateDoctorForm(idd)\n\n# if request.method == 'POST':\n# doctor_data = Doctor.query.get(id)\n# doctor_data.id = form.id.data\n# doctor_data.ssn = form.ssn.data\n# doctor_data.name = form.name.data\n# doctor_data.sex = form.sex.data\n# doctor_data.address = form.address.data\n# doctor_data.age = form.age.data\n# doctor_data.email = form.email.data\n# doctor_data.phone = form.phone.data\n# doctor_data.sub_department = form.sub_department.data\n\n# db.session.commit()\n# flash(f'Doctor {doctor_data.name} account has been successfully updated!', 'success')\n# return redirect(url_for('admin_view_doctors'))\n\n# elif request.method == 'GET':\n# form.id.data = doctor_data.id\n# form.ssn.data = doctor_data.ssn\n# form.name.data = doctor_data.name\n# form.sex.data = doctor_data.sex\n# form.address.data = doctor_data.address\n# form.age.data = doctor_data.age\n# form.email.data = doctor_data.email\n# form.phone.data = doctor_data.phone\n# form.sub_department.data = doctor_data.sub_department\n\n# return render_template(\"admin_add_new_doctor.html\", title = 'Update Doctor',\n# USER = 'ADMIN')\n\n\n@login_required\[email protected](\"/admin/delete-doctor/<int:id>\", methods=['GET', 'POST'])\ndef admin_delete_doctor(id):\n doctor = Doctor.query.filter_by(id=id).first()\n for operation in doctor.operations:\n db.session.delete(operation)\n db.session.commit()\n\n db.session.delete(doctor)\n db.session.commit()\n return redirect(url_for('admin_view_doctors'))\n\n\n# admin deals with patient records\n@login_required\[email protected](\"/admin/veiw-patients\")\ndef admin_view_patients():\n all_patients = Patient.query.all()\n return render_template(\"admin_view_patients.html\", all_patients=all_patients, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/add-new-patient\", methods=['GET', 'POST'])\ndef admin_add_new_patient():\n form = AddPatientForm()\n if form.validate_on_submit():\n patient = Patient(id=form.id.data, ssn=form.ssn.data, name=form.name.data,\n sex=form.sex.data, address=form.address.data, age=form.age.data,\n email=form.email.data, phone=form.phone.data,\n medical_history=form.medical_history.data, image_file=form.image_file.data)\n\n db.session.add(patient)\n db.session.commit()\n flash(f' {patient.name} has been add to patients table in database please upload patient scan !', 'success')\n return redirect(url_for('admin_view_patients', id=patient.id))\n return render_template(\"admin_add_new_patient.html\", form=form,\n title=\"Add New Patient\", USER='ADMIN')\n\n\n\n@login_required\[email protected](\"/admin/upload-patient-scan/<int:id>/\", methods=['GET', 'POST'])\ndef admin_upload_patient_scan(id):\n patient = Patient.query.filter_by(id=id).first()\n form = UploadPatientScanForm()\n if form.validate_on_submit():\n if form.image_file.data:\n\n picture_file = save_picture(form.image_file.data)\n patient.image_file = picture_file\n\n db.session.commit()\n flash(\n f'{patient.image_file} Scan File has been uploaded for {patient.name}!', 'success')\n return redirect(url_for('admin_view_patients'))\n image_file = url_for(\n 'static', filename='patients_scans/' + patient.image_file)\n return render_template('admin_upload_patient_scan.html', title='Upload Patient Scan', image_file=image_file, form=form, USER='ADMIN')\n\n\ndef save_picture(form_image_file):\n f_name, f_ext = os.path.splitext(form_image_file.filename)\n picture_fn = f_name + f_ext\n picture_path = os.path.join(\n app.root_path, 'static/patients_scans', picture_fn)\n form_image_file.save(picture_path)\n flash(f'{form_image_file}', 'danger')\n\n return picture_fn\n\n\n@login_required\[email protected](\"/admin/veiw-patient-operations/<int:id>\")\ndef admin_view_patient_operations(id):\n patient = Patient.query.filter_by(id=id).first()\n return render_template(\"admin_view_patient_operations.html\", title='Veiw Patient Operations',\n patient=patient, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/delete-patient/<int:id>\", methods=['GET', 'POST'])\ndef admin_delete_patient(id):\n patient = Patient.query.filter_by(id=id).first()\n for operation in patient.operations:\n db.session.delete(operation)\n db.session.commit()\n\n db.session.delete(patient)\n db.session.commit()\n return redirect(url_for('admin_view_patients'))\n\n\n\n\n# #admin deals with OPERATION records\n@login_required\[email protected](\"/admin/veiw-admins\")\ndef admin_view_admins():\n all_admins = Admin.query.all()\n return render_template(\"admin_view_admins.html\", all_admins=all_admins, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/add-new-admin\", methods=['GET', 'POST'])\ndef admin_add_new_admin():\n form = AddAdminForm()\n if form.validate_on_submit():\n admin = Admin(id=form.id.data, ssn=form.ssn.data, name=form.name.data,\n sex=form.sex.data, address=form.address.data, age=form.age.data,\n email=form.email.data, phone=form.phone.data,\n )\n db.session.add(admin)\n db.session.commit()\n flash(\n f'{admin.name} record has been added to admins table in the database!', 'success')\n return redirect(url_for('admin_view_admins'))\n return render_template(\"admin_add_new_admin.html\", form=form,\n legend=\"Add New Admin\", title=\"Add New Admin\", USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/delete-admin/<int:id>\", methods=['GET', 'POST'])\ndef admin_delete_admin(id):\n admin = Admin.query.filter_by(id=id).first()\n\n db.session.delete(admin)\n db.session.commit()\n return redirect(url_for('admin_view_admins'))\n\n\n\n\n# #admin deals with Operations records\n@login_required\[email protected](\"/admin/veiw-operations\")\ndef admin_view_operations():\n all_operations = Operation.query.all()\n return render_template(\"admin_view_operations.html\", all_operations=all_operations, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/add-new-operation\", methods=['GET', 'POST'])\ndef admin_add_new_operation():\n form = AddOperationForm()\n if form.validate_on_submit():\n patient = Patient.query.filter_by(id=form.patient_id.data).first()\n doctor = Doctor.query.filter_by(id=form.doctor_id.data).first()\n\n if patient and doctor:\n operation = Operation(code=form.code.data, location=form.location.data, date=form.date.data,\n start_time=form.start_time.data, end_time=form.end_time.data,\n doctor_id=form.doctor_id.data, patient_id=form.patient_id.data\n )\n db.session.add(operation)\n db.session.commit()\n flash(\n f'operation record has been added for patient id {operation.patient_id} and doctor id {operation.doctor_id} to operations table in the database!', 'success')\n return redirect(url_for('admin_view_operations'))\n else:\n flash(\n 'Please make sure that patient and doctor IDs are registered first', 'danger')\n return redirect(url_for('admin'))\n return render_template(\"admin_add_new_operation.html\", form=form,\n legend=\"Add New Operation\", title=\"Add New Operation\", USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/veiw-operation_details/<int:code>\")\ndef admin_view_operation_details(code):\n operation = Operation.query.filter_by(code=code).first()\n return render_template(\"admin_view_operation_details.html\", title='Veiw Operation Details',\n operation=operation, USER='ADMIN')\n\n\n@login_required\[email protected](\"/admin/delete-operation/<int:code>\", methods=['GET', 'POST'])\ndef admin_delete_operation(code):\n operation = Operation.query.filter_by(code=code).first()\n\n db.session.delete(operation)\n db.session.commit()\n return redirect(url_for('admin_view_operations'))\n\n\n# #admin deals with Contactus\n@login_required\[email protected](\"/admin/veiw-contact-us-responses\")\ndef admin_view_contact_us_responses():\n all_responses = ContactUs.query.all()\n return render_template(\"admin_view_contact_us_responses.html\", all_responses=all_responses, USER='ADMIN')\n\n\n\n@login_required\[email protected](\"/admin/delete-contact-us-response/<int:id>\", methods=['GET', 'POST'])\ndef admin_delete_contact_us_response(id):\n response = ContactUs.query.filter_by(id=id).first()\n\n db.session.delete(response)\n db.session.commit()\n return redirect(url_for('admin_view_contact_us_responses'))\n\n\n"
},
{
"alpha_fraction": 0.6569433808326721,
"alphanum_fraction": 0.6711543202400208,
"avg_line_length": 41.6489372253418,
"blob_id": "3a1ccbb85dffe408160175751e8e068bb6a40e02",
"content_id": "ffb7bd29cb1d3b068dcc76d3fa85d0d0f6162925",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4011,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 94,
"path": "/Website Files Team 18/website/models.py",
"repo_name": "Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.-",
"src_encoding": "UTF-8",
"text": "from website import db, login_manager,app\nfrom flask_login import UserMixin\n\n\n@login_manager.user_loader\ndef load_user(email):\n user = Admin.query.get(str(email))\n if user == None:\n user = Doctor.query.get(str(email))\n if user == None:\n user = Patient.query.get(str(email))\n return user\n\n\n\n\n#models.py\nclass Admin(db.Model,UserMixin):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n ssn = db.Column(db.Integer, unique=True ,nullable=False)\n name = db.Column(db.String(50), nullable=False)\n sex = db.Column(db.Enum('male','female'), nullable=False)\n address = db.Column(db.String(50), nullable=False) \n age = db.Column(db.Integer, nullable=False)\n email = db.Column(db.String(50), unique=True, nullable=False)\n phone = db.Column(db.Integer, nullable=False)\n\n def __repr__(self):\n return f\"Admin('{self.id}', '{self.ssn}', '{self.email}')\"\n\n\nclass Doctor(db.Model,UserMixin):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n ssn = db.Column(db.Integer, unique=True ,nullable=False)\n name = db.Column(db.String(50), nullable=False)\n sex = db.Column(db.Enum('male','female'), nullable=False)\n address = db.Column(db.String(50), nullable=False) \n age = db.Column(db.Integer, nullable=False)\n email = db.Column(db.String(50), unique=True, nullable=False)\n phone = db.Column(db.Integer, nullable=False)\n sub_department = db.Column(db.Enum('Cardiovascular_Surgery','Neurosurgery','Pediatric_Surgery','Bone_Surgery'),nullable=False)\n operations = db.relationship('Operation', backref='surgeon', lazy=True)\n\n def __repr__(self):\n return f\"Doctor('{self.id}', '{self.ssn}', '{self.email}')\"\n\n\nclass Patient(db.Model,UserMixin):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n ssn = db.Column(db.Integer, unique=True ,nullable=False)\n name = db.Column(db.String(50), nullable=False)\n sex = db.Column(db.Enum('male','female'), nullable=False)\n address = db.Column(db.String(50), nullable=False) \n age = db.Column(db.Integer, nullable=False)\n email = db.Column(db.String(50), unique=True, nullable=False)\n phone = db.Column(db.Integer, nullable=False)\n medical_history = db.Column(db.String(50), nullable=False) \n image_file = db.Column(db.String(50), nullable=False,default= 'default.jpg')\n operations = db.relationship('Operation', backref='ppatient', lazy=True)\n\n def __repr__(self):\n return f\"Patient('{self.id}', '{self.ssn}', '{self.email}')\"\n\n\nclass Operation(db.Model,UserMixin):\n code = db.Column(db.Integer, primary_key=True, autoincrement=True)\n location = db.Column(db.String(50), nullable=False)\n date = db.Column(db.Date ,nullable=False,default= '2021-01-15')\n start_time = db.Column(db.Time ,nullable=False,default= '00:00:00')\n end_time = db.Column(db.Time ,nullable=False,default= '01:00:00')\n patient_id = db.Column(db.Integer, db.ForeignKey('patient.id'), nullable=False)\n doctor_id = db.Column(db.Integer, db.ForeignKey('doctor.id'), nullable=False)\n\n def __repr__(self):\n return f\"Operation('{self.code}', '{self.date}', '{self.start_time}','{self.end_time}')\"\n\n \nclass Register(db.Model,UserMixin):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n username = db.Column(db.String(20),unique=True ,nullable=False)\n email = db.Column(db.String(50),unique=True, nullable=False)\n password = db.Column(db.String(50), nullable=False)\n registered_as = db.Column(db.Enum('Admin','Doctor','Patient'), nullable=False)\n\n def __repr__(self):\n return f\"Register('{self.email}', '{self.registered_as}')\"\n\n\nclass ContactUs(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(50) ,nullable=False)\n email = db.Column(db.String(50), nullable=False)\n phone = db.Column(db.Integer, nullable=False)\n message = db.Column(db.Text(150), nullable=False)\n\n\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.766603410243988,
"avg_line_length": 32,
"blob_id": "5572d3e937ebdd16dcb83a7251480714f71e6498",
"content_id": "b2e648baf45e04ba8a6d443bcfe1cdcce8f271d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 16,
"path": "/Website Files Team 18/website/__init__.py",
"repo_name": "Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.-",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n# from flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql://root:mysql@localhost/surgerydepartmentdb2\"\n# app.config=['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n# bcrypt = Bcrypt(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\n\nfrom website import routes"
},
{
"alpha_fraction": 0.37063151597976685,
"alphanum_fraction": 0.3733905553817749,
"avg_line_length": 41.376625061035156,
"blob_id": "452b79dece26b7b2bbc9d12e1641a0c3e93ad13b",
"content_id": "4622da0dc9238aa7411a65df954735be358c5a8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3262,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 77,
"path": "/Website Files Team 18/website/templates/admin_view_operation_details.html",
"repo_name": "Mohamed-Galloul/Hospital-Information-System-HIS-for-Surgery-Dept.-",
"src_encoding": "UTF-8",
"text": "{% extends \"layout.html\" %}\n{% block content %}\n\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"col md-12\">\n <div class=\"jumbotron p-3\">\n <button type=\"button\" class=\"btn btn-dark btn-lg float-right \"\n onclick=location.href=\"{{url_for('admin_view_operations')}}\">Back\n </button>\n <h2 class=\"h2\">Doctor Details</h2>\n <table class=\"table table-hover table-striped\">\n <thead class=\"thead-dark\">\n <tr>\n <th scope=\"col\">ID</th>\n <th scope=\"col\">SSN</th>\n <th scope=\"col\">Name</th>\n <th scope=\"col\">Sex</th>\n <th scope=\"col\">Address</th>\n <th scope=\"col\">Age</th>\n <th scope=\"col\">Email</th>\n <th scope=\"col\">Phone</th>\n <th scope=\"col\">Sub-Department</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td scope=\"row\">{{operation.surgeon.id}}</td>\n <td>{{operation.surgeon.ssn}}</td>\n <td>{{operation.surgeon.name}}</td>\n <td>{{operation.surgeon.sex }}</td>\n <td>{{operation.surgeon.address}}</td>\n <td>{{operation.surgeon.age}}</td>\n <td>{{operation.surgeon.email}}</td>\n <td>{{operation.surgeon.phone}}</td>\n <td>{{operation.surgeon.sub_department}}</td>\n <tr>\n <tbody>\n </table>\n <hr>\n <h2 class=\"h2\">Patient Details</h2>\n <table class=\"table table-hover table-striped\">\n <thead class=\"thead-dark\">\n <tr>\n <th scope=\"col\">ID</th>\n <th scope=\"col\">SSN</th>\n <th scope=\"col\">Name</th>\n <th scope=\"col\">Sex</th>\n <th scope=\"col\">Address</th>\n <th scope=\"col\">Age</th>\n <th scope=\"col\">Email</th>\n <th scope=\"col\">Phone</th>\n <th scope=\"col\">Medical History</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td scope=\"row\">{{operation.ppatient.id}}</td>\n <td>{{operation.ppatient.ssn}}</td>\n <td>{{operation.ppatient.name}}</td>\n <td>{{operation.ppatient.sex }}</td>\n <td>{{operation.ppatient.address}}</td>\n <td>{{operation.ppatient.age}}</td>\n <td>{{operation.ppatient.email}}</td>\n <td>{{operation.ppatient.phone}}</td>\n <td>{{operation.ppatient.medical_history}}</td>\n <td>\n <tr>\n <tbody>\n </table>\n </div>\n </div>\n </div>\n</div>\n\n</div>\n{% endblock %}"
}
] | 6 |
DannyA0/Dice_Roller | https://github.com/DannyA0/Dice_Roller | 1b11ef306095ca12014c9aeb88a9becc94da47dc | ce6d268f554107029772fdff62294add583767f5 | 70f5002ddcdc5dedc012371ec048362803496508 | refs/heads/master | 2021-05-17T15:51:17.302505 | 2021-04-23T16:20:41 | 2021-04-23T16:20:41 | 250,854,930 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4815843105316162,
"alphanum_fraction": 0.5124930739402771,
"avg_line_length": 25.850515365600586,
"blob_id": "390a6986428d75a031e6b95c92377c531ebbce99",
"content_id": "08dd2eed26aba9254919d8437c50f46502f6ba64",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5403,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 194,
"path": "/Dice Roller 1.2 Eng/Funcs.py",
"repo_name": "DannyA0/Dice_Roller",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\nfrom random import *\r\n\r\n# This file is what it is, I guess I might be able to do this another way, with\r\n# classes maybe (??) But for now, I'm starting out and this works, and I can\r\n# Understand how.\r\n\r\ndef toInt(In):\r\n '''\r\n this is so if you just write for example d20. it turns the '' in the Input\r\n list, into a 1, to roll a single die. I could've done it another way maybe,\r\n but getting the 'menu' with help and invalid input to work by my own was tireing,\r\n so this just wokrs.\r\n '''\r\n if In[0] == '':\r\n return str(1)\r\n else:\r\n return In[0]\r\n\r\ndef help(In):\r\n '''\r\n Called when In is 'h' or 'H'\r\n ----------------\r\n prints: Information on what the program expects as Input.\r\n ----------------\r\n Input: Asks for a new input.\r\n ----------------\r\n returns input In.\r\n '''\r\n os.system('cls')\r\n print ('''\r\n Help\r\n\r\n To roll a die you have to write the # of die\r\n to roll followed by die denomination, for example,\r\n if you want to roll 3 six sided die, you would\r\n write '3d6' (No air quotes), '3D6' would also work,\r\n for single dice, 1d6 or d6 would both work.\r\n\r\n Supported denominations: d4, d6, d8, d10, d12, d20\r\n ''')\r\n In = input(\"\\n Type 'h' for help, 'c' to close or your dice roll. \\n > \")\r\n return In\r\n\r\ndef invIn(In):\r\n '''\r\n Called when '' is entered as input.\r\n Takes 1 argument In.\r\n --------------------\r\n prints error message\r\n ---------------------\r\n returns new input : In\r\n '''\r\n print (\"\\n You have to input something...\")\r\n In = input(\"\\n Type 'h' for help, 'c' to close or your dice roll. \\n > \")\r\n return In\r\n\r\n#This function is called when you get the right format, to initiate the roll.\r\ndef dieRoll(roll):\r\n '''\r\n Takes one input List 'roll'\r\n ------------------\r\n Your roll input (now: In) after being processed by checkSyntax()\r\n ------------------\r\n returns an int :\r\n your result\r\n '''\r\n if roll[0] == '':\r\n numberOfDie = 1\r\n else:\r\n numberOfDie = int(roll[0])\r\n if roll[1] == 'd4' or roll[1] == 'D4':\r\n return rollD4(numberOfDie)\r\n elif roll[1] == 'd6' or roll[1] == 'D6':\r\n return rollD6(numberOfDie)\r\n elif roll[1] == 'd8' or roll[1] == 'D8':\r\n return rollD8(numberOfDie)\r\n elif roll[1] == 'd10' or roll[1] == 'D10':\r\n return rollD10(numberOfDie)\r\n elif roll[1] == 'd12' or roll[1] == 'D12':\r\n return rollD12(numberOfDie)\r\n elif roll[1] == 'd20' or roll[1] == 'D20':\r\n return rollD20(numberOfDie)\r\n\r\n# This are all the functions for the different die.\r\n\r\ndef rollD4(numberOfDie):\r\n '''\r\n Takes one input: numberOfDice\r\n --------------------------\r\n returns a d4 die roll, times the number of die.\r\n '''\r\n if numberOfDie == 1:\r\n return randint(1,4)\r\n else:\r\n return (randint(1,4) + rollD4(numberOfDie-1))\r\n\r\ndef rollD6(numberOfDie):\r\n '''\r\n Takes one input: numberOfDice\r\n --------------------------\r\n returns a d6 die roll, times the number of die.\r\n '''\r\n if numberOfDie == 1:\r\n return randint(1,6)\r\n else:\r\n return (randint(1,6) + rollD6(numberOfDie-1))\r\n\r\ndef rollD8(numberOfDie):\r\n '''\r\n Takes one input: numberOfDice\r\n --------------------------\r\n returns a d8 die roll and adds any aditional rolls.\r\n '''\r\n if numberOfDie == 1:\r\n return randint(1,8)\r\n else:\r\n return (randint(1,8) + rollD6(numberOfDie-1))\r\n\r\ndef rollD10(numberOfDie):\r\n '''\r\n Takes one input: numberOfDice\r\n --------------------------\r\n returns a d10 die roll, times the number of die.\r\n '''\r\n if numberOfDie == 1:\r\n return randint(1,10)\r\n else:\r\n return (randint(1,10) + rollD6(numberOfDie-1))\r\n\r\ndef rollD12(numberOfDie):\r\n '''\r\n Takes one input: numberOfDice\r\n --------------------------\r\n returns a d12 die roll, times the number of die.\r\n '''\r\n if numberOfDie == 1:\r\n return randint(1,12)\r\n else:\r\n return (randint(1,12) + rollD6(numberOfDie-1))\r\n\r\ndef rollD20(numberOfDie):\r\n '''\r\n Takes one input: numberOfDice\r\n --------------------------\r\n returns a d20 die roll, times the number of die.\r\n '''\r\n if numberOfDie == 1:\r\n return randint(1,20)\r\n else:\r\n return (randint(1,20\r\n\r\n ) + rollD6(numberOfDie-1))\r\n\r\ndef checkSyntax(In):\r\n \"\"\"\r\n Takes two inputs >\r\n\r\n In: Input from end User\r\n diceTypes: the dice types.\r\n -----------\r\n Returns\r\n\r\n syntax: A list syntax = [Number of die to roll, type of dice]\r\n -----------\r\n\r\n \"\"\"\r\n diceTypes = ['d4', 'd6', 'd8', 'd10', 'd12', 'd20',\r\n 'D4', 'D6', 'D8', 'D10', 'D12', 'D20']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '']\r\n numberOfDice = ''\r\n diceType = ''\r\n for ch in range(len(In)):\r\n if In[ch] in numbers:\r\n numberOfDice += In[ch]\r\n elif In[ch] == 'd' or In[ch] == 'D':\r\n diceType = In[ch:len(In)+1]\r\n break\r\n else:\r\n break\r\n\r\n check = [numberOfDice, diceType]\r\n\r\n if check[0] == '':\r\n check[0] = '1'\r\n try:\r\n check[0] = int(check[0])\r\n except:\r\n return 'error'\r\n if check[1] in diceTypes:\r\n return check\r\n else:\r\n return 'error'\r\n"
},
{
"alpha_fraction": 0.7632367610931396,
"alphanum_fraction": 0.7682317495346069,
"avg_line_length": 90,
"blob_id": "91b5795fff9ad10829cb99e78fe75013f7da2ad0",
"content_id": "ce9464db9dd1d6b2073e8cb7b22d5bfa8a0243bf",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1001,
"license_type": "permissive",
"max_line_length": 131,
"num_lines": 11,
"path": "/README.md",
"repo_name": "DannyA0/Dice_Roller",
"src_encoding": "UTF-8",
"text": "# Dice_Roller\nThis is my first program written in python to put into practice what I have been learning.\n\nSo pretty much that, I'm just starting out with programming (for like a year, but now I'm really taking it more serously)\nand I'm learning from different resources, right now, I'm auditing MITx: 6.00.1x and it's been one of the most helpfull resources\nI have tried. Also I'm just reading the docs, PY4E, and SICP (That last one I havent touched much). And yeah. I've been having a\nlot of fun writing [redacted] that doesn't work, lol. But yeah, all jokes aside I'm really enjoying programming, and any tips, code\nmodifications on this little project, or even features, are all well recieved. Also, you can do with my code whatever you want.\n\nSo yeah. My name is Dani BTW. And when I'm not programming, I'm acting, currently studying at a university here in my country, \nand I'm teaching myself cs, programming and game development (With godot for the engine, Aseprite for the art) on the side.\n"
},
{
"alpha_fraction": 0.50352942943573,
"alphanum_fraction": 0.5082352757453918,
"avg_line_length": 27.65116310119629,
"blob_id": "7e92d977da882bded0df2e2c6354b1a04e799df5",
"content_id": "0298e3e45181ccf3206a39e85d505633ca6b3cf0",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1275,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 43,
"path": "/Dice Roller 1.2 Eng/Dice roller 1.2.py",
"repo_name": "DannyA0/Dice_Roller",
"src_encoding": "UTF-8",
"text": "import os\r\nfrom Funcs import *\r\nimport time\r\nos.system('cls')\r\n\r\nprint(\"\\n Welcome to: \\n 'Dice Roller 1.2' \\n \\n\")\r\n\r\nIn = input(\" Type 'h' for help, 'c' to close or your dice roll. \\n > \")\r\n\r\n# Menu loop: This shit was hard to figure out on my own, so it's probably not\r\n# the best solution, but it's my solution. lol. I was actually kinda :| when\r\n# I wrote this and it turned out to be easier than the first few things I tried.\r\n# But hey that's how you learn.\r\nwhile True:\r\n if In == 'h' or In == 'H':\r\n In = help(In)\r\n\r\n elif In == '':\r\n In = invIn(In)\r\n\r\n elif In == 'c':\r\n print ('\\n Thanks for using! Goodbye!')\r\n sys.exit()\r\n else:\r\n In = checkSyntax(In)\r\n if In == 'error':\r\n print ('\\n Invalid input...')\r\n In = input(\"\\n Type 'h' for help, 'c' to close or your dice roll. \\n > \")\r\n else:\r\n result = dieRoll(In)\r\n\r\n number = str(toInt(In))\r\n\r\n os.system('cls')\r\n print(\"\\n 'Dice Roller 1.2'\")\r\n\r\n print('\\n Rolling ' + number + ' ' + In[1] + '...')\r\n\r\n time.sleep(2)\r\n\r\n print('\\n Your roll was: ' + str(result))\r\n\r\n In = input(\"\\n Type 'h' for help, 'c' to close or your dice roll. \\n > \")\r\n"
}
] | 3 |
gordonchen0117/CEB101 | https://github.com/gordonchen0117/CEB101 | bb47b5c4c3308563caee40e608a59defe309f91a | bd311816b12eee0e3a113aaf05d9862c296c15fb | 021cfe0dcdffcf820950c9123ca462ffb5b47768 | refs/heads/main | 2023-02-23T01:26:09.458668 | 2021-01-31T04:28:08 | 2021-01-31T04:28:08 | 312,530,370 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.42495810985565186,
"alphanum_fraction": 0.45778894424438477,
"avg_line_length": 37.49677276611328,
"blob_id": "f5829d461c46ee294aeb2a4b384172914e83f619",
"content_id": "0dfc3795c53c5b88804a0f8b0750abdd85accecb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6376,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 155,
"path": "/trademark/zone4_DataMining.py",
"repo_name": "gordonchen0117/CEB101",
"src_encoding": "UTF-8",
"text": "import re\nimport json\nimport pymongo\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\ndb = client['ceb101']\ncol = db['Ming_zone4']\n\nfind_list = list()\nfor i in col.find({},{\"title\":1,\"_id\":0}):\n find_list.append(i)\n# find_list\n\nn = 0\nfor zone4_title in find_list :\n zone4_dict = {}\n zone4 = col.find_one(zone4_title)\n rep = zone4['content'].replace('(',' ').replace(')',' ')\n try:\n re_words_class =re.findall(\"第19條.{0,20}\", rep)[0]\n try :\n re_words_plaintiff = re.findall(\"註冊.{0,20}「.{0,50}」商標\", rep)[0].replace('「','|').replace('」','|').split('|')[1]\n except IndexError:\n try :\n re_words_plaintiff = re.findall(\"核駁著名.{0,10}「.{0,50}」商標\", rep)[0].replace('「','|').replace('」','|').split('|')[1] \n except IndexError:\n try :\n re_words_plaintiff = re.findall(\"據以核駁.{0,20}「.{0,50}」商標\", rep)[0].replace('「','|').replace('」','|').split('|')[1] \n except IndexError:\n try :\n re_words_plaintiff = re.findall(\"據駁.{0,20}「.{0,50}」商標\", rep)[0].replace('「','|').replace('」','|').split('|')[1] \n \n except :\n# print(zone4['title'])\n continue\n re_photo = re.findall(\"相同.{0,5}文\",rep)\n\n if len(re_photo) != 0 :\n try:\n\n col_trade = db['trademark_jsonData']\n re_words_plaintiff_json = col_trade.find_one({\"tmark-name\":re_words_plaintiff})\n re_words_plaintiff_id = re_words_plaintiff_json['_id']\n\n except :\n continue \n\n try : \n re_words_Established = re.findall(\"主旨.*?\\n\", rep)[0].split('\\\\n')[0].split(',')[1].replace('主旨:','')\n except :\n re_words_Established = \"應予核駁\"\n\n # re_words_Established =re.findall(\"主文.*\", rep)[0].replace('主文','|').replace('。','|').split('|')[1] \\\n # .replace('不受理','不成立')\n try :\n re_word_rule = re.findall(\"商標法施行細則.{0,20}\", rep)[0].replace(\"第\",' ').replace('類',' ').split(' ')[2]\n except :\n re_word_rule ='None'\n\n\n law_re = re.findall(\"商標法第.{0,3}\", rep)\n law_res = list()\n for i in law_re :\n try :\n res = int(i.replace('條','').split('第')[1])\n\n law_res.append(res)\n except :\n pass\n\n re_res = re.findall(\"爰依.{0,20}\", rep)\n try :\n add_re_res = re_res[0].replace(\"第\",\" \").replace(\"條\",\" \").split(\" \")\n for j in add_re_res :\n try :\n law_res.append(int(j))\n except :\n pass\n except :\n pass\n law_list = list()\n # print(law_res)\n for x in law_res:\n if x not in law_list:\n law_list.append(x)\n # # law_str =\"、\".join(law_list)\n\n\n key_word1 = len(re.findall(\"有可能.{0,40}誤認\", rep))\n key_word2 = len(re.findall(\"爰依.\", rep))\n key_word3 = len(re.findall(\"屬構成近似之商標.\", rep))\n key_word4 = len(re.findall(\"不具.{0,20}識別\", rep))\n key_word5 = len(re.findall(\"應予撤銷.\", rep))\n key_word6 = len(re.findall(\"系爭.{0,20}撤銷\", rep))\n key_word7 = len(re.findall(\"有致.{0,40}誤認\", rep))\n key_word8 = len(re.findall(\"不成立.\", rep))\n key_word9 = len(re.findall(\"應具有.{0,20}識別.\", rep))\n key_word10 = len(re.findall(\"近似程度極低.\", rep))\n key_word11 = len(re.findall(\"不會.{0,30}誤認\", rep))\n key_word12 = len(re.findall(\"無法認定.\", rep))\n key_word13 = len(re.findall(\"不會.{0,30}混淆\", rep))\n\n zone4_dict['title'] = zone4['title']\n zone4_dict['plaintiff'] = re_words_plaintiff\n zone4_dict['defendant'] = zone4['markname'].replace('及圖','')\n # zone4_dict['law'] = law_list\n zone4_dict['rule'] = re_word_rule\n zone4_dict['key_word1'] = key_word1 #有可能...誤認\n zone4_dict['key_word2'] = key_word2 #爰依\n zone4_dict['key_word3'] = key_word3 #屬構成近似之商標\n zone4_dict['key_word4'] = key_word4 #不具...識別\n zone4_dict['key_word5'] = key_word5 #應予撤銷\n zone4_dict['key_word6'] = key_word6 #系爭...撤銷\n zone4_dict['key_word7'] = key_word7 #有致...誤認\n zone4_dict['key_word8'] = key_word8 #不成立\n zone4_dict['key_word9'] = key_word9 #應具有...識別\n zone4_dict['key_word10'] = key_word10 #近似程度極低\n zone4_dict['key_word11'] = key_word11 #不會...誤認\n zone4_dict['key_word12'] = key_word12 #無法認定\n zone4_dict['key_word13'] = key_word13 #不會...混淆\n for i in range(1,112):\n zone4_dict['law_'+str(int(i))] = 0\n\n for i in law_list :\n if i < 112 :\n\n zone4_dict['law_'+str(int(i))] += 1\n\n established = re.findall('不成立',re_words_Established)\n if len(established) != 0 :\n zone4_dict['established'] = 0\n else :\n zone4_dict['established'] = 1\n n +=1\n\n# print(zone4['title'])\n# print(zone4['markname'])\n# print(re_words_plaintiff)\n# print(law_list)\n # zone4_dict['established'] = re_words_Established\n# print(zone4_dict)\n # print(\"title:\",zone4['title'])\n \n # print(\"defendant:\",zone4['markname'])\n# print(re_words_Established)\n# print(re_word_rule) \n# print(\"-\"*20)\n print(n)\n with open('zone4_data_similar_word.json','a',encoding = 'utf-8') as f :\n json.dump(zone4_dict, f, ensure_ascii=False)\n \n# print(m)\n except :\n pass\n \n"
},
{
"alpha_fraction": 0.6467611193656921,
"alphanum_fraction": 0.6639676094055176,
"avg_line_length": 31.933332443237305,
"blob_id": "b9e5cd86e7c055c4b4ea202af9e10b3903e0c200",
"content_id": "2eb020cc3fff87e29cdf33b5520750ce72d9869f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 988,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 30,
"path": "/trademark/updata_data_tmarkid.py",
"repo_name": "gordonchen0117/CEB101",
"src_encoding": "UTF-8",
"text": "import re\nimport json\nimport pymongo\nfrom pymongo import MongoClient\nimport jieba\n\nclient = MongoClient('localhost', 27017)\ndb = client['ceb101']\n# col = db['final_data_photo']\ncol = db['final_data_zone1_zone2']\n\nfind_list = list()\nfor i in col.find({},{\"_id\":1}):\n find_list.append(i)\n# find_list\n\nfor zone1_title in find_list :\n zone1_dict = {}\n zone1 = col.find_one(zone1_title)\n re_words_plaintiff = zone1['plaintiff']\n col_trade = db['trademark_jsonData']\n re_words_plaintiff_json = col_trade.find_one({\"tmark-name\":re_words_plaintiff})\n if re_words_plaintiff_json['goodsclass-code'] != 'None' :\n re_words_plaintiff_id =re_words_plaintiff_json['goodsclass-code'] + '_' + re_words_plaintiff_json['_id']\n else :\n re_words_plaintiff_None = 'Empty'\n re_words_plaintiff_id =re_words_plaintiff_None + '_' + re_words_plaintiff_json['_id']\n\n print(re_words_plaintiff_id)\n col.update(zone1_title,{\"$set\":{\"tmark_id\":re_words_plaintiff_id}})\n"
},
{
"alpha_fraction": 0.6240875720977783,
"alphanum_fraction": 0.6569343209266663,
"avg_line_length": 23.176469802856445,
"blob_id": "fc16c7dc7ca590f07ebe9eea99f7e3141b67784e",
"content_id": "24b242e6d92cf213fb7a94524650e03bd2f29674",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 822,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 34,
"path": "/trademark/zone1_data_to_csv.py",
"repo_name": "gordonchen0117/CEB101",
"src_encoding": "UTF-8",
"text": "import re\nimport json\nimport pymongo\nfrom pymongo import MongoClient\nimport pprint\nimport csv\nimport pandas as pd\n\nclient = MongoClient('localhost', 27017)\ndb = client['ceb101']\ncol = db['final_data_photo_similar']\n\nfind_list = list()\nfor i in col.find({},{\"title\":1,\"_id\":0}):\n if i != None :\n find_list.append(i)\n# find_list\n\nzone1_dict = {}\nfor zone1_title in find_list :\n zone1 = col.find_one(zone1_title)\n# print(zone1)\n try :\n zone1_dict[zone1['title']] = [zone1['tmark_id'],zone1['established']]\n except :\n pass\n \n# print(zone1_dict)\ncolumns = ['tmark_id','established']\nzone1_pd = pd.DataFrame.from_dict(data = zone1_dict, orient='index',columns = columns)\n# print(zone1_pd)\nzone1_csv = zone1_pd.to_csv('final_data_photo_similar_test.csv', encoding = 'utf-8')\n\nzone1_pd\n"
},
{
"alpha_fraction": 0.5075163245201111,
"alphanum_fraction": 0.5392156839370728,
"avg_line_length": 34.3757209777832,
"blob_id": "327f1e2429b4e5a9ba641d1a8452af44ec6ed68b",
"content_id": "cff3e681947f4a193234b022bb3e00e77d91e555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6504,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 173,
"path": "/trademark/zone1_DataMining.py",
"repo_name": "gordonchen0117/CEB101",
"src_encoding": "UTF-8",
"text": "import re\nimport json\nimport pymongo\nfrom pymongo import MongoClient\nimport pprint\nimport jieba\n\nclient = MongoClient('localhost', 27017)\ndb = client['ceb101']\ncol = db['Ming_zone1']\n\njieba.load_userdict('./company.txt')\n\nfind_list = list()\nfor i in col.find({},{\"title\":1,\"_id\":0}):\n find_list.append(i)\n \nn = 0\nm = 0\ndef run() :\n re_words_Established =re.findall(\"主文.*\", rep)[0].replace('主文','|').replace('。','|').split('|')[1] \\\n .replace('不受理','不成立')\n try :\n re_word_rule = re.findall(\"商標法施行細則.{0,20}\", rep)[0].replace(\"第\",' ').replace('類',' ').split(' ')[2]\n except :\n re_word_rule ='None'\n\n\n law_re = re.findall(\"商標法第.{0,3}\", rep)\n law_res = list()\n for i in law_re :\n try :\n res = int(i.replace('條','').split('第')[1])\n\n law_res.append(res)\n except :\n pass\n\n re_res = re.findall(\"爰依.{0,20}\", rep)\n try :\n add_re_res = re_res[0].replace(\"第\",\" \").replace(\"條\",\" \").split(\" \")\n for j in add_re_res :\n try :\n law_res.append(int(j))\n except :\n pass\n except :\n pass\n law_list = list()\n# print(law_res)\n for x in law_res:\n if x not in law_list:\n law_list.append(x)\n# law_str =\"、\".join(law_list)\n\n\n key_word1 = len(re.findall(\"有可能.{0,40}誤認\", rep))\n key_word2 = len(re.findall(\"爰依.\", rep))\n key_word3 = len(re.findall(\"屬構成近似之商標.\", rep))\n key_word4 = len(re.findall(\"不具.{0,20}識別\", rep))\n key_word5 = len(re.findall(\"應予撤銷.\", rep))\n key_word6 = len(re.findall(\"系爭.{0,20}撤銷\", rep))\n key_word7 = len(re.findall(\"有致.{0,40}誤認\", rep))\n key_word8 = len(re.findall(\"不成立.\", rep))\n key_word9 = len(re.findall(\"應具有.{0,20}識別.\", rep))\n key_word10 = len(re.findall(\"近似程度極低.\", rep))\n key_word11 = len(re.findall(\"不會.{0,30}誤認\", rep))\n key_word12 = len(re.findall(\"無法認定.\", rep))\n key_word13 = len(re.findall(\"不會.{0,30}混淆\", rep))\n\n zone1_dict['title'] = zone1['title']\n zone1_dict['plaintiff'] = re_words_plaintiff\n zone1_dict['defendant'] = zone1['markname'].replace('及圖','')\n zone1_dict['rule'] = re_word_rule\n zone1_dict['key_word1'] = key_word1 #有可能...誤認\n zone1_dict['key_word2'] = key_word2 #爰依\n zone1_dict['key_word3'] = key_word3 #屬構成近似之商標\n zone1_dict['key_word4'] = key_word4 #不具...識別\n zone1_dict['key_word5'] = key_word5 #應予撤銷\n zone1_dict['key_word6'] = key_word6 #系爭...撤銷\n zone1_dict['key_word7'] = key_word7 #有致...誤認\n zone1_dict['key_word8'] = key_word8 #不成立\n zone1_dict['key_word9'] = key_word9 #應具有...識別\n zone1_dict['key_word10'] = key_word10 #近似程度極低\n zone1_dict['key_word11'] = key_word11 #不會...誤認\n zone1_dict['key_word12'] = key_word12 #無法認定\n zone1_dict['key_word13'] = key_word13 #不會...混淆\n for i in range(1,112):\n zone1_dict['law_'+str(int(i))] = 0\n\n for i in law_list :\n if i < 112 :\n\n zone1_dict['law_'+str(int(i))] += 1\n\n established = re.findall('異議不成立',re_words_Established)\n if len(established) != 0 :\n zone1_dict['established'] = 0\n else :\n zone1_dict['established'] = 1\n\n \nfor zone1_title in find_list :\n zone1_dict = {}\n zone1 = col.find_one(zone1_title)\n rep = zone1['content'].replace('(',' ').replace(')',' ').replace(':','').replace(':','')\n\n\n try:\n re_words_plaintiff_jieba =jieba.cut(re.findall(\"正本 .{0,30}\", rep)[0].split(' ')[1].split('【')[0])\n\n for i in re_words_plaintiff_jieba :\n i_len = len(re.findall(\"公司\", i))\n if i_len != 0 :\n re_words_plaintiff_company = i\n try :\n re_word_rule = re.findall(\"商標法施行細則.{0,20}\", rep)[0].replace(\"第\",' ').replace('類',' ').split(' ')[2]\n except :\n re_word_rule ='None'\n\n try: \n col_trade = db['trademark_jsonData']\n re_words_plaintiff_json = col_trade.find_one({\"company\":re_words_plaintiff_company,\"goodsclass-code\" : re_word_rule.split('、')[0]})\n re_words_plaintiff = re_words_plaintiff_json['tmark-name']\n# print(re_words_plaintiff_company)\n# print(re_words_plaintiff['tmark-name'])\n except :\n continue\n except :\n re_words_plaintiff_jieba =jieba.cut(re.findall(\"異議人.{0,40}\", rep)[0].replace('<','人').split('人')[1])\n\n for i in re_words_plaintiff_jieba :\n i_len = len(re.findall(\"公司\", i))\n if i_len != 0 :\n re_words_plaintiff_company = i\n try :\n re_word_rule = re.findall(\"商標法施行細則.{0,20}\", rep)[0].replace(\"第\",' ').replace('類',' ').split(' ')[2]\n except :\n re_word_rule ='None'\n\n try :\n col_trade = db['trademark_jsonData']\n re_words_plaintiff_json = col_trade.find_one({\"company\":re_words_plaintiff_company,\"goodsclass-code\" : re_word_rule.split('、')[0]})\n re_words_plaintiff = re_words_plaintiff_json['tmark-name']\n except :\n continue\n \n \n re_similar_word = re.findall(\".{0,10}圖樣.{0,20}\",rep)\n# re_similar_word_1 = re.findall(\".{0,10}文.{0,20}區辨\",rep)\n if len(re_similar_word) != 0 :\n run()\n n += 1 \n photo_dict = dict()\n col_final_data_zone1_zone2 = db['final_data_zone1_zone2']\n re_photo_id_json = col_final_data_zone1_zone2.find_one(zone1_title)\n photo_dict[\"title\"] = re_photo_id_json[\"title\"]\n photo_dict[\"tmark_id\"] = re_photo_id_json[\"tmark_id\"]\n photo_dict[\"established\"] = re_photo_id_json[\"established\"]\n print(photo_dict)\n# print(zone1['title'])\n # zone1_dict['established'] = re_words_Established\n # print(zone1_dict)\n # print(\"title:\",zone1['title'])\n # print(re_words_plaintiff)\n # print(\"defendant:\",zone1['markname'])\n # print(re_words_Established)\n # print(re_word_rule) \n print(n)\n# print(m)\n print(\"-\"*20)\n with open('final_data_similar_photo.json','a',encoding = 'utf-8') as f :\n json.dump(photo_dict, f, ensure_ascii=False)\n"
}
] | 4 |
travisbstop/djangoTutorial | https://github.com/travisbstop/djangoTutorial | 1e831c51111ca30f8878c0e96a91e31a2042967b | ef9fa418094740ffe9344ca1e4949561945ec9fd | a111fd49c799e18d4955753843cc4cdee71f1516 | refs/heads/master | 2023-01-28T17:38:54.543896 | 2020-12-10T01:43:00 | 2020-12-10T01:43:00 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.671600341796875,
"alphanum_fraction": 0.671600341796875,
"avg_line_length": 40.61538314819336,
"blob_id": "5868e66b4adebb00b2025fc4571684a6e96613a9",
"content_id": "edc91bc0ad62188687355b13282073e57bfbba54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1081,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 26,
"path": "/mysite/main/forms.py",
"repo_name": "travisbstop/djangoTutorial",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\n\nclass NewUserForm(UserCreationForm):\n #this is how we extend a previously existing form. \n #here we create a field for an email that is required\n #for the form to be valid.\n email = forms.EmailField(required=True)\n\n #i'm guessing that this is like \"meta data\" or in this case data about the form.\n class Meta:\n #i'm guessing that this connects this form to the database via the User model.\n model = User\n #these are the fields for the form\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n #commit the data to the database (user registration data)\n def save(self, commit=True):\n #this is the user model object without the email\n user = super(NewUserForm, self).save(commit=False)\n #set the value of the email from the form\n user.email = self.cleaned_data['email']\n if commit:\n #commit it to the database\n user.save()\n return user"
},
{
"alpha_fraction": 0.7064117193222046,
"alphanum_fraction": 0.7064117193222046,
"avg_line_length": 34.52000045776367,
"blob_id": "36c254b7f885f55c84441f76047f103b0b2d23f1",
"content_id": "896a986d5224c9b78ee769a50190621f71613347",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 889,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 25,
"path": "/mysite/main/admin.py",
"repo_name": "travisbstop/djangoTutorial",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Tutorial, TutorialSeries, TutorialCategory\nfrom tinymce.widgets import TinyMCE\nfrom django.db import models\n# Register your models here.\n\n#this is the attributes of the Tutorial model that will appear \n#on the admin page as well as the order in which they will appear.\nclass TutorialAdmin(admin.ModelAdmin):\n\n fieldsets = [\n (\"title/date published\", {\"fields\": [\"title\", \"datePublished\"]}),\n (\"content\", {\"fields\": [\"content\"]}),\n #not that you would ever want to see/manually chage a foreign key but it's just nice to see.\n (\"foreignKey/slug\", {\"fields\": [\"series\", \"slug\"]})\n ]\n\n formfield_overrides = {\n models.TextField: {'widget': TinyMCE()}\n }\n#register a model\n\nadmin.site.register(TutorialCategory)\nadmin.site.register(TutorialSeries)\nadmin.site.register(Tutorial, TutorialAdmin)\n\n"
},
{
"alpha_fraction": 0.707153856754303,
"alphanum_fraction": 0.7189912796020508,
"avg_line_length": 37.09803771972656,
"blob_id": "e5d3d9072b14b5b075c84673476f18851dccdf0d",
"content_id": "9c39bd4dfffe753b906b0780fa14109f5d5cd1fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1943,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 51,
"path": "/mysite/main/models.py",
"repo_name": "travisbstop/djangoTutorial",
"src_encoding": "UTF-8",
"text": "from django.db import models\nimport time\n\n\n\n\nclass TutorialCategory(models.Model):\n category = models.CharField(max_length=200)\n categorySummary = models.CharField(max_length=200)\n slug = models.CharField(max_length=200)\n\n class Meta:\n #this is for our admin page so it shows up as Catergories instead of Catergorys\n verbose_name_plural = \"Categories\"\n\n def __str__(self):\n return self.category\n\nclass TutorialSeries(models.Model):\n series = models.CharField(max_length=200)\n \"\"\"\n a series is related to a category via category foreign key. \n on delete tells what to do with related objects when a entity is deleted.\n For example, if a category was deleted, you could set it to cascade and delete all\n series related to that category. We obviously don't want to do that in this case so \n we set it to the default category.\n \"\"\" \n category = models.ForeignKey(TutorialCategory, default=1, verbose_name=\"Category\", on_delete=models.SET_DEFAULT)\n summary = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = \"Series\"\n\n def __str__(self):\n return self.series\n\n\n#/seems like each table in the database is modeled by a class like the one below.\n#when making migrations on an actual database, must provide default value on new/added rows.\n#so good database design off the bat is important.\nclass Tutorial(models.Model):\n title = models.CharField(max_length=50)\n datePublished = models.DateTimeField(\"date published\")\n content = models.TextField(null=True)\n\n #this connects the Tutorial table with the TutorialSeries table. better documentation on what this does below.\n series = models.ForeignKey(TutorialSeries, default=1, verbose_name=\"Series\", on_delete=models.SET_DEFAULT)\n #URL to first tutorial in a tutorial series.\n slug = models.CharField(max_length=200, default=1)\n def __str__(self):\n return self.title\n"
},
{
"alpha_fraction": 0.6990678906440735,
"alphanum_fraction": 0.7043941617012024,
"avg_line_length": 41.91428756713867,
"blob_id": "36c8c7c10aa11aa078c01aa862f82e1d8fde3a6d",
"content_id": "078d9f96bedb2c702e0913b7ed9e8f6d6a011ee4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1502,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 35,
"path": "/mysite/main/urls.py",
"repo_name": "travisbstop/djangoTutorial",
"src_encoding": "UTF-8",
"text": "\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom . import views\n\n#this gives us a way of referencing paths without having the exact path. look in views.py to see how.\n#ex. main:register, boom we have that path and we know what to show when redirected.\napp_name = \"main\"\n\n\"\"\"\nat the URI of '' we want to display whatever is in views.home.\n\"\"\"\nurlpatterns = [\n path('', views.home, name='home'),\n path('register/', views.register, name=\"register\"),\n #we wouldn't wanna call this logout because it would overrride djangos logout function.\n path('logout/', views.logout_request, name=\"logout\"),\n path(\"login/\", views.login_request, name=\"login\"),\n #the carrots denote that it is a variable that will be passed to function views.single_slug(request, single_slug).\n #This url must not have any slashes. \n path(\"<single_slug>\", views.single_slug, name=\"single_slug\"),\n]\n"
},
{
"alpha_fraction": 0.6807567477226257,
"alphanum_fraction": 0.681495726108551,
"avg_line_length": 51.43410873413086,
"blob_id": "6873450701afa83f0c35f47d3e6cc71b8e41f564",
"content_id": "2409f826de20dc3dcffcee6010d906f90a18a383",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6766,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 129,
"path": "/mysite/main/views.py",
"repo_name": "travisbstop/djangoTutorial",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import Tutorial, TutorialCategory, TutorialSeries\n#convenient form thats already created for us in django!\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib import messages\n#this is the form that i extended from UserCreationForm\nfrom .forms import NewUserForm\n# Create your views here.\n\"\"\"\nslug is the url. single slug refers to showing different things at the same url rather than redirecting\nto a new one. \n/cooking/\n/tutorial-1-scrambled-eggs\n\"\"\"\ndef single_slug(request, single_slug):\n #need to know: is this a category url or tutorial url?\n categories = [c.slug for c in TutorialCategory.objects.all()]\n if single_slug in categories:\n #category is foreign key in TutorialSeries, double underscore slug gets us the slug of a category.\n #returns list of series that are associated with a category that has a slug that matches the single_slug.\n matching_series = TutorialSeries.objects.filter(category__slug=single_slug)\n\n series_urls = {}\n for m in matching_series.all():\n #first series is foreign key in tutorials, second series is series attribute in TutorialSeries.\n #returns all tutorial objects that are a part of series m, and part 1 has the earliest published date.\n part_one = Tutorial.objects.filter(series__series=m.series).earliest(\"datePublished\")\n #this will give us the tutorial object for the first tutorial in each series\n series_urls[m] = part_one.slug\n return render(request, \"main/category.html\", {\"part_ones\": series_urls})\n\n tutorials = [t.slug for t in Tutorial.objects.all()]\n if single_slug in tutorials:\n #get tutorial in database with slug attribute that matches single_slug.\n #potential issue with this: what if multiple Tutorials have the same slug? \n this_tutorial = Tutorial.objects.get(slug = single_slug)\n #ez query, find all tutorials that are in the same series as this tutorial.\n #foreign key series in Tutorial points to TutorialSeries entry. TutorialSeries entry\n #has attribute that is called series which is the name of the series. So this returns\n #all of the Tutorial objects that have foreign keys that point to TutorialSeries entries\n #that have the same series name as the tutorial we just clicked on, or this tutorial. \n #phew. \n tutorials_from_series = Tutorial.objects.filter(series__series=this_tutorial.series).order_by(\"datePublished\")\n #get the index of the current tutorial to pop it out in the list\n this_tutorial_idx = list(tutorials_from_series).index(this_tutorial)\n return render(request, \"main/tutorial.html\", {\"tutorial\":this_tutorial, \"sidebar\":tutorials_from_series, \"this_tutorial_idx\":this_tutorial_idx})\n return HttpResponse(f\"{single_slug} is a tutorial!!!\")\n \n return HttpResponse(f\"{single_slug} does not correspond to anything. 404\")\n\ndef home(request):\n \"\"\"\n Three things are passesd to render: the request, (get/post),\n the location of the template (html), \n and the context (perhaps data from a database)\n\n why \"main/templates/main/home\" ? when we reference templates,\n django looks through all apps for templates directory, multiple \n apps might have a \"main.html\" template, so there could be conflict without\n this distinction with the extra main directory in the templates folder.\n \"\"\"\n #keep in mind that User model must be imported.\n #looks \n return render(request=request, \n template_name=\"main/categories.html\",\n context={\"Categories\": TutorialCategory.objects.all})\n\n\ndef register(request):\n #if the user clicks the register button, request is post (make changes to server)\n if request.method == \"POST\":\n #fill form with data that they inserted\n form = NewUserForm(request.POST)\n if form.is_valid():\n #commit to the database\n user = form.save()\n\n #cleaned_data makes the data in the username field in a normalized format.\n username = form.cleaned_data.get('username')\n #this always goes to the exact user that is supposed to get this message.\n #site-wide notifications are done differently, maybe putting something in the header file.\n #this doesn't actually send a message, but it stores it.\n messages.success(request, f\"New Account Created: {username}\")\n #login the user\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}\")\n #return them to the homepage\n #finds that app_name = 'main' in urls.py and then finds the path with name home and takes user there.\n return redirect('main:home')\n else:\n for msg in form.error_messages:\n #temporary way of handling error messages\n messages.error(request, f\"{msg}: {form.error_messages[msg]}\")\n\n form = NewUserForm\n return render(request, \"main/register.html\", context={\"form\":form})\n\ndef logout_request(request):\n #use djangos built in function to log the user out (also this is why you don't want your view to be called logout)\n logout(request)\n #create a message that says that the user logged out successfully\n messages.info(request, \"Logged out successfully! WOOOO\")\n #redirect to the path in main with the name home (urls.py)\n return redirect(\"main:home\")\n\ndef login_request(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n #we're getting the value of the 'username' field from the form\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n #if there is a user with the username and password in a database, this will not be None\n user = authenticate(username=username, password=password)\n if user is not None:\n #if the user is valid, log em in! (using djangos default login function)\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}\")\n return redirect(\"main:home\")\n else:\n messages.info(request, f\"invalid username or password!\")\n else:\n messages.info(request, f\"invalid username or password! (form)\")\n\n #\"pass form into the template\"\n form = AuthenticationForm()\n return render(request, \"main/login.html\", {\"form\":form})\n\n\n"
}
] | 5 |
yamahiro-offto/offto-sharetribe-flex-web | https://github.com/yamahiro-offto/offto-sharetribe-flex-web | 9059c9ec5e8550accf18673d37db4c25a53d8532 | a71020b5d269c13491779afd9da4f6eb46f91675 | c699f85aefedf9d4131fed0256bd6258018b40b0 | refs/heads/master | 2023-01-29T06:16:50.141200 | 2020-01-29T17:38:04 | 2020-01-29T17:38:04 | 230,557,194 | 0 | 0 | NOASSERTION | 2019-12-28T04:18:12 | 2020-01-29T17:38:46 | 2023-01-04T13:47:24 | JavaScript | [
{
"alpha_fraction": 0.6587803363800049,
"alphanum_fraction": 0.6623228788375854,
"avg_line_length": 25.434782028198242,
"blob_id": "9e685b5dd416cc15a482dadf544b6403e69d9db0",
"content_id": "6e29116cd9de5712c836608104c8ddd808dc218f",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 7904,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 299,
"path": "/src/util/offtoData.ts",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import { types as sdkTypes } from './sdkLoader';\nimport { plainToClass, classToPlain } from 'class-transformer';\nimport { LabelHTMLAttributes } from 'react';\nimport { propTypes } from './types';\nimport { Decimal } from 'decimal.js';\nimport _ from 'lodash';\nimport { userDisplayNameAsString } from './data';\n\nconst { LatLng, UUID, Money } = sdkTypes;\ntype C_UUID = typeof UUID;\ntype C_LatLng = typeof LatLng;\ntype C_Money = typeof Money;\n\n// ================ const definitions ================ //\n\nexport enum UserType {\n CUSTOMER = 'customer',\n SHOP = 'shop',\n}\n\nexport enum Activity {\n SKI_SNOWBOARD = 'ski_snowboard',\n CYCLE = 'cycle',\n OTHER = 'other',\n}\n\nexport enum RentalStyle {\n CUSTOMER_SELECT = 'customer_select',\n SHOP_RECOMMEND = 'shop_recommend',\n}\n\nexport enum Size {\n XL = 'xl',\n L = 'l',\n M = 'm',\n S = 's',\n XS = 'xs',\n}\n\nexport enum Gender {\n MALE = 'male',\n FEMALE = 'female',\n // UNISEX = 'unisex',\n}\n\nexport enum Age {\n ADULT = 'adult',\n CHILD = 'child',\n // ALL_AGE = 'all_age',\n}\n\nexport enum Skill {\n BEGINNER = 'beginner',\n INTERMEDIATE = 'intermediate',\n ADVANCED = 'advanced',\n}\n\nexport enum Color {\n BLACK = 'black',\n GRAY = 'gray',\n WHITE = 'white',\n RED = 'red',\n ORANGE = 'orange',\n YELLOW = 'yellow',\n GREEN = 'green',\n BLUE = 'blue',\n PURPLE = 'purple',\n PINK = 'pink',\n LIGHT_BLUE = 'light_blue',\n YELLOW_GREEN = 'yellow_green',\n SILVER = 'silver',\n GOLD = 'gold',\n}\n\nexport enum Condition {\n LIKELY_NEW = 'likely_new',\n LITTLE_DAMAGED = 'lettle_damaged',\n SOME_DAMAGED = 'some_damaged',\n}\n\nexport enum ActivityTypeSnow {\n ALL_MOUNTAIN = 'all_mountain',\n BACK_COUNTRY = 'back_country',\n}\n\nexport enum ActivityTypeCycle {}\n\nexport enum ActivityTypeOther {\n NONE = 'none',\n}\n\nexport type ActivityType = ActivityTypeSnow | ActivityTypeCycle | ActivityTypeOther;\nexport const ACTIVITYTYPE_TABLE = {\n [Activity.SKI_SNOWBOARD]: ActivityTypeSnow,\n [Activity.CYCLE]: ActivityTypeCycle,\n [Activity.OTHER]: ActivityTypeOther,\n};\n\n// ================ class definitions ================ //\n\nexport class BusinessHour {\n isRegularHoliday: boolean = false;\n startTime: string = '0';\n endTime: string = '0';\n}\n\nexport class BusinessDate {\n sunday: BusinessHour = new BusinessHour();\n monday: BusinessHour = new BusinessHour();\n tuesday: BusinessHour = new BusinessHour();\n wednesday: BusinessHour = new BusinessHour();\n thursday: BusinessHour = new BusinessHour();\n friday: BusinessHour = new BusinessHour();\n saturday: BusinessHour = new BusinessHour();\n}\n\nexport class OfftoUserPublicData {\n type?: string = UserType.CUSTOMER;\n geolocation?: any; // c_geolocation\n businessDate?: BusinessDate = new BusinessDate();\n activity?: Activity = Activity.OTHER;\n phoneNumber?: string = '000-000-0000';\n additionalItems?: AdditionalItem[] = [];\n\n constructor(publicData?: any) {\n this.type = !(publicData && publicData.type) ? this.type : publicData.type;\n this.geolocation = !(publicData && publicData.geolocation)\n ? this.geolocation\n : publicData.geolocation;\n this.businessDate = !(publicData && publicData.businessDate)\n ? this.businessDate\n : { ...classToPlain(this.businessDate), ...publicData.businessDate };\n this.activity = !(publicData && publicData.activity) ? this.activity : publicData.activity;\n this.phoneNumber = !(publicData && publicData.phoneNumber)\n ? this.phoneNumber\n : publicData.phoneNumber;\n this.additionalItems = !(publicData && publicData.additionalItems)\n ? this.additionalItems\n : publicData.additionalItems;\n }\n\n static publicDataIsShop(publicData: any): boolean {\n return !!publicData && !!publicData.type && publicData.type === UserType.SHOP;\n }\n\n static sanitize(publicData: any): Object {\n const default_publicData = classToPlain(new OfftoUserPublicData());\n const new_publicData = {\n ...default_publicData,\n ...publicData,\n };\n\n return new_publicData;\n }\n\n // toPlain(): any {\n // return classToPlain(this);\n // }\n}\n\nexport class OfftoListingDetailInfoSkiSnowboard {\n brand: string = '';\n length: number = 180; // cm\n radius: number = 20; // m\n widthHead: number = 20; // cm\n widthWaist: number = 15; // cm\n widthTail: number = 20; // cm\n binding: string = '';\n modelYear: string = '';\n\n constructor(params?: any) {\n if (params) {\n _.merge(this, params);\n }\n }\n}\n\nexport class OfftoListingDetailInfoCycle {\n constructor(params?: any) {}\n}\nexport class OfftoListingDetailInfoOther {\n constructor(params?: any) {}\n}\n\nexport type OfftoListingDetailInfo =\n | OfftoListingDetailInfoSkiSnowboard\n | OfftoListingDetailInfoCycle\n | OfftoListingDetailInfoOther;\n\n// export const OFFTOLISTING_DETAILINFO_TABLE: { [key: string]: OfftoListingDetailInfo } = {\nexport const OFFTOLISTING_DETAILINFO_TABLE = {\n [Activity.SKI_SNOWBOARD]: OfftoListingDetailInfoSkiSnowboard,\n [Activity.CYCLE]: OfftoListingDetailInfoCycle,\n [Activity.OTHER]: OfftoListingDetailInfoOther,\n};\n\nexport class AdditionalItem {\n id: number = 0; // unixtime, = new Date().getTime(), created in ManageAddtionalItemsForm\n label: string = '';\n price: C_Money = new Money(0, 'JPY');\n\n constructor(additionalItem: any) {\n _.merge(this, additionalItem);\n }\n}\n\nexport class OfftoListingPubilcData {\n activity: Activity = Activity.OTHER;\n rentalStyle: RentalStyle = RentalStyle.CUSTOMER_SELECT;\n gearId: string = '';\n activityType: ActivityType = ActivityTypeOther.NONE;\n size: Size = Size.M;\n skill: Skill = Skill.BEGINNER;\n age: Age = Age.ADULT;\n gender: Gender = Gender.FEMALE;\n color: Color = Color.WHITE;\n condition: Condition = Condition.LIKELY_NEW;\n description: string = '';\n additionalItemIds: string[] = [];\n detailInfo: OfftoListingDetailInfo = new OfftoListingDetailInfoOther();\n\n constructor(publicData?: any) {\n if (publicData) {\n _.merge(this, publicData);\n\n // if (publicData.additionalItems) {\n // this.additionalItems = publicData.additionalItems.map((item: any) => {\n // new AdditionalItem(item);\n // });\n // }\n\n const activity = this.activity;\n const detailInfoType = OFFTOLISTING_DETAILINFO_TABLE[activity];\n this.detailInfo = new detailInfoType(detailInfoType);\n }\n }\n}\n\n// ================ class definitions ================ //\n\nexport class OfftoUser {\n firstName: string = '';\n lastName: string = '';\n displayName: string = '';\n abbreviatedName: string = '';\n bio?: string = '';\n publicData: OfftoUserPublicData = new OfftoUserPublicData();\n protectedData: {} = {};\n privateData: {} = {};\n profileImageId: C_UUID = new UUID();\n\n static userIsShop(user: any): boolean {\n return (\n !!user &&\n !!user.attributes &&\n !!user.attributes.profile &&\n !!user.attributes.profile.publicData &&\n OfftoUserPublicData.publicDataIsShop(user.attributes.profile.publicData)\n );\n }\n\n static createUserParam(createUserParams: any, isShop: boolean = false) {\n // add UserType in publicData.type\n const publicData = isShop\n ? {\n ...createUserParams.publicData,\n type: UserType.SHOP,\n }\n : {\n ...createUserParams.publicData,\n type: UserType.CUSTOMER,\n };\n\n // overwrite publicData\n return { ...createUserParams, publicData: publicData };\n }\n\n static sanitizePublicData(pubilcData: any) {\n return OfftoUserPublicData.sanitize(pubilcData);\n }\n}\n\nexport class OfftoListingAttributes {\n title: string = '(no title)';\n description: string = '';\n geolocation: C_LatLng = new LatLng(0, 0);\n price: C_Money = new Money(0, 'JPY');\n publicData: OfftoListingPubilcData = new OfftoListingPubilcData();\n\n constructor(attributes: any) {\n if (attributes) {\n _.merge(this, attributes);\n\n if (attributes.publicData) {\n this.publicData = new OfftoListingPubilcData(attributes.publicData);\n }\n }\n }\n}\n"
},
{
"alpha_fraction": 0.8115941882133484,
"alphanum_fraction": 0.8115941882133484,
"avg_line_length": 69,
"blob_id": "146adad3bbf0cbef06ce9da50ad6dbc339af3b7d",
"content_id": "b7e09c05793c27fa754602a713aa9e4989585ef3",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 69,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 1,
"path": "/misc/flex_cli/showProcessVersionList.sh",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "flex-cli process list --process preauth-nightly-booking -m offto-test"
},
{
"alpha_fraction": 0.8064516186714172,
"alphanum_fraction": 0.8064516186714172,
"avg_line_length": 31,
"blob_id": "e3995838c92087ac1031bb9aeb3d99b0a52c1df3",
"content_id": "fab5fce6ad78022e3bc49d5b4619188f36f93727",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 31,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 1,
"path": "/misc/flex_cli/validateProcessEdn.sh",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "flex-cli process --path process"
},
{
"alpha_fraction": 0.6754751801490784,
"alphanum_fraction": 0.6768659949302673,
"avg_line_length": 33.238094329833984,
"blob_id": "98ef4311705d9e6c9ae004b6c0e95909b819551d",
"content_id": "56fe4124bade5010c0b0d17d370d9d79189a1e54",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2157,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 63,
"path": "/src/components/BookingBreakdown/LineItemAdditionalItemsMaybe.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "/**\n * Renders non-reversal line items that are not listed in the\n * `LINE_ITEMS` array in util/types.js\n *\n * The line items are rendered so that the line item code is formatted to human\n * readable form and the line total is printed as price.\n *\n * If you require another kind of presentation for your line items, add them to\n * the `LINE_ITEMS` array in util/types.js and create a specific line item\n * component for them that can be used in the `BookingBreakdown` component.\n */\nimport React from 'react';\nimport { intlShape } from '../../util/reactIntl';\nimport { formatMoney } from '../../util/currency';\nimport { humanizeLineItemCode } from '../../util/data';\nimport {\n isLineItemAdditionalItem,\n extractLineItemAdditionalItemId,\n propTypes,\n} from '../../util/types';\n\nimport css from './BookingBreakdown.css';\nimport { array } from 'prop-types';\n\nconst LineItemAdditionalItemsMaybe = props => {\n const { transaction, additionalItems, intl } = props;\n\n // resolve unknown non-reversal line items\n const items = transaction.attributes.lineItems.filter(item => isLineItemAdditionalItem(item));\n\n return items.length > 0 ? (\n <React.Fragment>\n {items.map((item, i) => {\n const itemCodeId = Number(extractLineItemAdditionalItemId(item.code));\n\n const matchItems = additionalItems\n ? additionalItems.filter(additionaItem => Number(additionaItem.id) === itemCodeId)\n : [];\n\n const label =\n matchItems.length > 0\n ? String(matchItems[0].label) + \" * \" + String(item.quantity)\n : humanizeLineItemCode(item.code);\n const formattedTotal = formatMoney(intl, item.lineTotal);\n\n return (\n <div key={`${i}-item.code`} className={css.lineItem}>\n <span className={css.itemLabel}>{label}</span>\n <span className={css.itemValue}>{formattedTotal}</span>\n </div>\n );\n })}\n </React.Fragment>\n ) : null;\n};\n\nLineItemAdditionalItemsMaybe.propTypes = {\n transaction: propTypes.transaction.isRequired,\n additionalItems: array.isRequired,\n intl: intlShape.isRequired,\n};\n\nexport default LineItemAdditionalItemsMaybe;\n"
},
{
"alpha_fraction": 0.7599999904632568,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 29,
"blob_id": "171986bcce21a0601d76b85fe9145eb1b14afb25",
"content_id": "ec501ef81a82407cf41280d21acb763462ef4d7b",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 150,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 5,
"path": "/misc/flex_cli/pullProcessEdn.sh",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "flex-cli login\n\nflex-cli process list -m offto-test\n\nflex-cli process pull --process preauth-nightly-booking --version 1 --path process -m offto-test\n"
},
{
"alpha_fraction": 0.6008760929107666,
"alphanum_fraction": 0.6018773317337036,
"avg_line_length": 33.73478317260742,
"blob_id": "ffba30ad1838b4bd69e0247c988e1c3fc44e6b8b",
"content_id": "2731095fb78d4b580f0090e60877431ae306b2dc",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8066,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 230,
"path": "/misc/copyEditListingPanelAndForm.py",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import json\nimport shutil\nimport pathlib\nimport os\n\nORIGINAL_ITEMNAME = f'Detailinfo'\nTARGET_ITEMNAME = f'Additionalitem'\n\n\nORIGINAL_PANEL = f'../src/components/EditListing{ORIGINAL_ITEMNAME}Panel/'\nTARGET_PANEL = f'../src/components/EditListing{TARGET_ITEMNAME}Panel/'\nORIGINAL_PANELNAME = f'EditListing{ORIGINAL_ITEMNAME}Panel'\nTARGET_PANELNAME = f'EditListing{TARGET_ITEMNAME}Panel'\n\nORIGINAL_FORM = f'../src/forms/EditListing{ORIGINAL_ITEMNAME}Form/'\nTARGET_FORM = f'../src/forms/EditListing{TARGET_ITEMNAME}Form/'\nORIGINAL_FORMNAME = f'EditListing{ORIGINAL_ITEMNAME}Form'\nTARGET_FORMNAME = f'EditListing{TARGET_ITEMNAME}Form'\n\nprint(ORIGINAL_ITEMNAME)\nprint(ORIGINAL_PANEL)\nprint(ORIGINAL_FORM)\n\nprint(TARGET_ITEMNAME)\nprint(TARGET_PANEL)\nprint(TARGET_FORM)\n\n# -------------------------------- copy EditListingPanel and change class-names --------------------------------\np = pathlib.Path(os.path.dirname(ORIGINAL_PANEL))\nfiles = p.glob('*')\nfor file in files:\n orig_f = str(file)\n if '__' in orig_f:\n continue\n\n tar_f = orig_f.replace(ORIGINAL_PANELNAME, TARGET_PANELNAME)\n print(tar_f)\n\n tar_dir = os.path.dirname(tar_f)\n if not os.path.exists(tar_dir):\n os.makedirs(tar_dir)\n\n shutil.copy(orig_f, tar_f)\n\n # change class name in files\n with open(tar_f, 'r') as f:\n content = f.read()\n\n with open(tar_f, 'w') as f:\n content = content.replace(ORIGINAL_PANELNAME, TARGET_PANELNAME)\n content = content.replace(ORIGINAL_FORMNAME, TARGET_FORMNAME)\n f.write(content)\n\n\n# -------------------------------- copy EditListingForm and change class-names --------------------------------\np = pathlib.Path(os.path.dirname(ORIGINAL_FORM))\nfiles = p.glob('*')\nfor file in files:\n orig_f = str(file)\n if '__' in orig_f:\n continue\n\n tar_f = orig_f.replace(ORIGINAL_FORMNAME, TARGET_FORMNAME)\n print(tar_f)\n\n tar_dir = os.path.dirname(tar_f)\n if not os.path.exists(tar_dir):\n os.makedirs(tar_dir)\n\n shutil.copy(orig_f, tar_f)\n\n # change class name in files\n with open(tar_f, 'r') as f:\n content = f.read()\n\n with open(tar_f, 'w') as f:\n content_ = content.replace(ORIGINAL_FORMNAME, TARGET_FORMNAME)\n f.write(content_)\n\n\n# -------------------------------- add panel import to components/index.js --------------------------------\nCOMPONENTS_INDEXJS = '../src/components/index.js'\nwith open(COMPONENTS_INDEXJS, 'r') as f:\n lines = f.readlines()\n\nalredy_exsits = [TARGET_PANELNAME in line for line in lines]\nif not any(alredy_exsits):\n idx = [ORIGINAL_PANELNAME in line for line in lines].index(True)\n orig_line = lines[idx]\n tar_line = orig_line.replace(ORIGINAL_PANELNAME, TARGET_PANELNAME)\n lines.insert(idx + 1, tar_line)\n\n with open(COMPONENTS_INDEXJS, 'w') as f:\n f.write(\"\".join(lines))\n\n# -------------------------------- add form import to forms/index.js --------------------------------\nFORMS_INDEXJS = '../src/forms/index.js'\nwith open(FORMS_INDEXJS, 'r') as f:\n lines = f.readlines()\n\nalredy_exsits = [TARGET_FORMNAME in line for line in lines]\nif not any(alredy_exsits):\n idx = [ORIGINAL_FORMNAME in line for line in lines].index(True)\n orig_line = lines[idx]\n tar_line = orig_line.replace(ORIGINAL_FORMNAME, TARGET_FORMNAME)\n lines.insert(idx + 1, tar_line)\n\n with open(FORMS_INDEXJS, 'w') as f:\n f.write(\"\".join(lines))\n\n\n# -------------------------------- add translation --------------------------------\nTRANSLATION_FILE = '../src/translations/en.json'\nwith open(TRANSLATION_FILE, 'r', encoding='utf8') as f:\n trans = json.load(f)\n\n# add key for EditListingsPanel/Form\norig_keys = filter(\n lambda key: f'EditListing{ORIGINAL_ITEMNAME}' in key, list(trans.keys()))\norig_keys = list(orig_keys)\nprint(orig_keys)\nfor orig_key in orig_keys:\n tar_key = orig_key.replace(ORIGINAL_ITEMNAME, TARGET_ITEMNAME)\n print(tar_key)\n trans[tar_key] = trans[orig_key]\n\n# add key for tabLabel\norig_keys = filter(\n lambda key: f'tabLabel{ORIGINAL_ITEMNAME}' in key, list(trans.keys()))\norig_keys = list(orig_keys)\nprint(orig_keys)\nfor orig_key in orig_keys:\n tar_key = orig_key.replace(ORIGINAL_ITEMNAME, TARGET_ITEMNAME)\n print(tar_key)\n trans[tar_key] = trans[orig_key]\n\n\nwith open(TRANSLATION_FILE, 'w', encoding='utf8') as f:\n json.dump(trans, f, indent=2, ensure_ascii=False, sort_keys=True)\n\n\n# ----- util func ---------\ndef addLineToTaggedFile(filepath: str, tag: str, lineToAdd: str, addBeforeTab=True):\n with open(filepath, 'r') as f:\n lines = f.readlines()\n\n for i, line in enumerate(lines):\n if lineToAdd.split(\"\\n\")[0] + '\\n' == line: # if already added, no change\n break\n\n if tag in line:\n if addBeforeTab:\n new_lines = lines[:i] + [lineToAdd] + lines[i:]\n else:\n new_lines = lines[:i+1] + [lineToAdd] + lines[i+1:]\n\n with open(filepath, 'w') as f:\n f.write(\"\".join(new_lines))\n\n break\n else:\n raise Exception(\n f\"Error: Not Found '{tag}' in '{filepath}'\")\n\n\n# -------------------------------- add import to EditListingWizardTab --------------------------------\nWIZARD_TAB_FILEPATH = f\"../src/components/EditListingWizard/EditListingWizardTab.js\"\nTAG = \"[ADD_EDITLISTINGPANEL_HERE]\"\nLINE_TO_ADD = f\" {TARGET_PANELNAME},\\n\"\n\naddLineToTaggedFile(WIZARD_TAB_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add const(identifier) to EditListingWizardTab --------------------------------\nTAG = \"[ADD_EDITLISTINGIDENTIFIER_HERE]\"\nLINE_TO_ADD = f\"export const {TARGET_ITEMNAME.upper()} = '{TARGET_ITEMNAME.lower()}';\\n\"\n\naddLineToTaggedFile(WIZARD_TAB_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add supported_tab to EditListingWizardTab --------------------------------\nTAG = \"[ADD_SUPPORTEDTAB_HERE]\"\nLINE_TO_ADD = f\" {TARGET_ITEMNAME.upper()},\\n\"\n\naddLineToTaggedFile(WIZARD_TAB_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add tab_case to EditListingWizardTab --------------------------------\nTAG = \"[ADD_TABCASE_HERE]\"\nLINE_TO_ADD = f\"\"\" case {TARGET_ITEMNAME.upper()}: {{\n return (\n <{TARGET_PANELNAME}\n {{...panelProps({TARGET_ITEMNAME.upper()})}}\n submitButtonText={{createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}}\n onSubmit={{values => {{\n onCompleteEditListingWizardTab(tab, values);\n }}}}\n />\n );\n }}\n\"\"\"\n\naddLineToTaggedFile(WIZARD_TAB_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add const(identifier) to EditListingWizard --------------------------------\nWIZARD_FILEPATH = f\"../src/components/EditListingWizard/EditListingWizard.js\"\nTAG = \"[ADD_EDITLISTINGIDENTIFIER_HERE]\" # NOTE: コメントを分けて、重複しないようにする\nLINE_TO_ADD = f\" {TARGET_ITEMNAME.upper()}, // id added\\n\"\n\naddLineToTaggedFile(WIZARD_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add tab to EditListingWizard --------------------------------\nWIZARD_FILEPATH = f\"../src/components/EditListingWizard/EditListingWizard.js\"\nTAG = \"[ADD_TABS_HERE]\" # NOTE: コメントを分けて、重複しないようにする\nLINE_TO_ADD = f\" {TARGET_ITEMNAME.upper()}, // tab added\\n\"\n\naddLineToTaggedFile(WIZARD_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add tab_label to EditListingWizard --------------------------------\nTAG = \"[ADD_TABLABEL_HERE]\"\nLINE_TO_ADD = f\"\"\" }} else if (tab === {TARGET_ITEMNAME.upper()}) {{\n key = 'EditListingWizard.tabLabel{TARGET_ITEMNAME.capitalize()}';\n\"\"\"\n\naddLineToTaggedFile(WIZARD_FILEPATH, TAG, LINE_TO_ADD)\n\n# -------------------------------- add tab_completed to EditListingWizard --------------------------------\nTAG = \"[ADD_TABCOMPLETED_HERE]\"\nLINE_TO_ADD = f\"\"\" case {TARGET_ITEMNAME.upper()}:\n return !!(publicData && publicData.{ORIGINAL_ITEMNAME.lower()}) // TODO: revise;\n\"\"\"\n\naddLineToTaggedFile(WIZARD_FILEPATH, TAG, LINE_TO_ADD)\n\n"
},
{
"alpha_fraction": 0.600562572479248,
"alphanum_fraction": 0.600562572479248,
"avg_line_length": 27.440000534057617,
"blob_id": "10bd5fe073b06883a28fc604c433d5ba21d93e30",
"content_id": "92b2417ac164282536fd2739cc014045177874fb",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 711,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 25,
"path": "/src/components/FieldSelectCustom/FieldSelectCustom.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { required } from '../../util/validators';\nimport { FieldSelect } from '../../components';\n\nconst FieldSelectCustom = props => {\n const { classname, name, id, options, label, placeholder, validate } = props;\n const validate_ = required(validate);\n\n return options ? (\n <FieldSelect classname={classname} name={name} id={id} label={label} validate={validate_}>\n <option disabled value=\"\">\n {placeholder}\n </option>\n {options.map(option => {\n return (\n <option key={option.key} value={option.key}>\n {option.label}\n </option>\n );\n })}\n </FieldSelect>\n ) : null;\n};\n\nexport default FieldSelectCustom;\n"
},
{
"alpha_fraction": 0.6036036014556885,
"alphanum_fraction": 0.6041579842567444,
"avg_line_length": 28.569671630859375,
"blob_id": "36feb9c435667d686c458881990aa4e75075c3a6",
"content_id": "f73094201ea5328675cfee39be7085dc7fff3356",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 7215,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 244,
"path": "/src/forms/EditListingDetailinfoForm/EditListingDetailinfoForm.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { arrayOf, bool, func, shape, string } from 'prop-types';\nimport { compose } from 'redux';\nimport { Form as FinalForm } from 'react-final-form';\nimport { intlShape, injectIntl, FormattedMessage } from '../../util/reactIntl';\nimport classNames from 'classnames';\nimport { propTypes } from '../../util/types';\nimport { maxLength, required, composeValidators } from '../../util/validators';\nimport * as offtoData from '../../util/offtoData';\nimport { Form, Button, FieldTextInput, FieldSelectCustom } from '../../components';\n\nimport css from './EditListingDetailinfoForm.css';\n\nconst TITLE_MAX_LENGTH = 60;\n\nconst EditListingDetailinfoFormComponent = props => (\n <FinalForm\n {...props}\n render={formRenderProps => {\n const {\n categories,\n className,\n currentListing,\n disabled,\n ready,\n handleSubmit,\n intl,\n invalid,\n pristine,\n saveActionMsg,\n updated,\n updateInProgress,\n fetchErrors,\n } = formRenderProps;\n\n const titleMessage = intl.formatMessage({ id: 'EditListingDetailinfoForm.title' });\n const titlePlaceholderMessage = intl.formatMessage({\n id: 'EditListingDetailinfoForm.titlePlaceholder',\n });\n const titleRequiredMessage = intl.formatMessage({\n id: 'EditListingDetailinfoForm.titleRequired',\n });\n const maxLengthMessage = intl.formatMessage(\n { id: 'EditListingDetailinfoForm.maxLength' },\n {\n maxLength: TITLE_MAX_LENGTH,\n }\n );\n\n const descriptionMessage = intl.formatMessage({\n id: 'EditListingDetailinfoForm.description',\n });\n const descriptionPlaceholderMessage = intl.formatMessage({\n id: 'EditListingDetailinfoForm.descriptionPlaceholder',\n });\n const maxLength60Message = maxLength(maxLengthMessage, TITLE_MAX_LENGTH);\n const descriptionRequiredMessage = intl.formatMessage({\n id: 'EditListingDetailinfoForm.descriptionRequired',\n });\n\n const { updateListingError, createListingDraftError, showListingsError } = fetchErrors || {};\n const errorMessageUpdateListing = updateListingError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingDetailinfoForm.updateFailed\" />\n </p>\n ) : null;\n\n // This error happens only on first tab (of EditListingWizard)\n const errorMessageCreateListingDraft = createListingDraftError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingDetailinfoForm.createListingDraftError\" />\n </p>\n ) : null;\n\n const errorMessageShowListing = showListingsError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingDetailinfoForm.showListingFailed\" />\n </p>\n ) : null;\n\n const classes = classNames(css.root, className);\n const submitReady = (updated && pristine) || ready;\n const submitInProgress = updateInProgress;\n const submitDisabled = invalid || disabled || submitInProgress;\n\n // HTMLs to be displayed in this form\n const formDivs = [];\n\n const activityType =\n offtoData.ACTIVITYTYPE_TABLE[currentListing.attributes.publicData.activity];\n\n // brand\n formDivs.push(\n <FieldTextInput\n id=\"brand\"\n name=\"brand\"\n className={css.title}\n type=\"text\"\n label={'brand'}\n placeholder={'brand of your gear'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // length\n formDivs.push(\n <FieldTextInput\n id=\"length\"\n name=\"length\"\n className={css.title}\n type=\"text\"\n label={'length'}\n placeholder={'Id of this gear in your shop'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // radius\n formDivs.push(\n <FieldTextInput\n id=\"radius\"\n name=\"radius\"\n className={css.title}\n type=\"text\"\n label={'radius'}\n placeholder={'Id of this gear in your shop'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // widthHead\n formDivs.push(\n <FieldTextInput\n id=\"widthHead\"\n name=\"widthHead\"\n className={css.title}\n type=\"text\"\n label={'widthHead'}\n placeholder={'Id of this gear in your shop'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // widthWaist\n formDivs.push(\n <FieldTextInput\n id=\"widthWaist\"\n name=\"widthWaist\"\n className={css.title}\n type=\"text\"\n label={'widthWaist'}\n placeholder={'Id of this gear in your shop'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // widthTail\n formDivs.push(\n <FieldTextInput\n id=\"widthTail\"\n name=\"widthTail\"\n className={css.title}\n type=\"text\"\n label={'widthTail'}\n placeholder={'Id of this gear in your shop'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // binding\n formDivs.push(\n <FieldTextInput\n id=\"binding\"\n name=\"binding\"\n className={css.title}\n type=\"text\"\n label={'binding'}\n placeholder={'name of binding'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // modelYear\n formDivs.push(\n <FieldTextInput\n id=\"modelYear\"\n name=\"modelYear\"\n className={css.title}\n type=\"text\"\n label={'Model Year'}\n placeholder={'title of your gear'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n return (\n <Form className={classes} onSubmit={handleSubmit}>\n {errorMessageCreateListingDraft}\n {errorMessageUpdateListing}\n {errorMessageShowListing}\n\n {formDivs}\n\n <Button\n className={css.submitButton}\n type=\"submit\"\n inProgress={submitInProgress}\n disabled={submitDisabled}\n ready={submitReady}\n >\n {saveActionMsg}\n </Button>\n </Form>\n );\n }}\n />\n);\n\nEditListingDetailinfoFormComponent.defaultProps = { className: null, fetchErrors: null };\n\nEditListingDetailinfoFormComponent.propTypes = {\n className: string,\n currentListing: propTypes.currentListing,\n intl: intlShape.isRequired,\n onSubmit: func.isRequired,\n saveActionMsg: string.isRequired,\n disabled: bool.isRequired,\n ready: bool.isRequired,\n updated: bool.isRequired,\n updateInProgress: bool.isRequired,\n fetchErrors: shape({\n createListingDraftError: propTypes.error,\n showListingsError: propTypes.error,\n updateListingError: propTypes.error,\n }),\n categories: arrayOf(\n shape({\n key: string.isRequired,\n label: string.isRequired,\n })\n ),\n};\n\nexport default compose(injectIntl)(EditListingDetailinfoFormComponent);\n"
},
{
"alpha_fraction": 0.6497205495834351,
"alphanum_fraction": 0.6553100943565369,
"avg_line_length": 28.582677841186523,
"blob_id": "ba0fa4e28dd113166c28070509d8f65f758c81e0",
"content_id": "d7bb2344ef731175b9038f2ddd51cc77c4969c4b",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3757,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 127,
"path": "/src/containers/ManageAdditionalItemsPage/ManageAdditionalItemsPage.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport PropTypes from 'prop-types';\nimport { compose } from 'redux';\nimport { connect } from 'react-redux';\nimport { FormattedMessage, injectIntl, intlShape } from '../../util/reactIntl';\nimport { propTypes } from '../../util/types';\nimport { parse } from '../../util/urlHelpers';\nimport { isScrollingDisabled } from '../../ducks/UI.duck';\nimport {\n Page,\n UserNav,\n LayoutSingleColumn,\n LayoutWrapperTopbar,\n LayoutWrapperMain,\n LayoutWrapperFooter,\n Footer,\n} from '../../components';\nimport { ManageAdditionalItemsForm } from '../../forms';\nimport { TopbarContainer } from '..';\n\nimport { updateProfile } from './ManageAdditionalItemsPage.duck';\nimport css from './ManageAdditionalItemsPage.css';\n\nexport class ManageAdditionalItemsPageComponent extends Component {\n constructor(props) {\n super(props);\n }\n\n render() {\n const { currentUser, scrollingDisabled, onUpdateProfile, intl } = this.props;\n\n const handleSubmit = values => {};\n const onUpdate = values => {\n const additionalItems = values;\n const updatedValues = { publicData: { additionalItems } };\n\n onUpdateProfile(updatedValues);\n };\n\n const additionalItems =\n (currentUser && currentUser.attributes.profile.publicData.additionalItems) || [];\n\n const heading =\n additionalItems.length > 0 ? (\n <h1 className={css.title}>\n <FormattedMessage\n id=\"ManageAdditionalItemsPage.youHaveAdditionalItems\"\n values={{ count: additionalItems.length }}\n />\n </h1>\n ) : additionalItems.length === 0 ? (\n <h1 className={css.title}>\n <FormattedMessage id=\"ManageAdditionalItemsPage.noAdditionalItems\" />\n </h1>\n ) : null;\n\n const title = intl.formatMessage({ id: 'ManageAdditionalItemsPage.title' });\n\n const panelWidth = 62.5;\n // Render hints for responsive image\n const renderSizes = [\n `(max-width: 767px) 100vw`,\n `(max-width: 1920px) ${panelWidth / 2}vw`,\n `${panelWidth / 3}vw`,\n ].join(', ');\n\n return (\n <Page className={css.root} title={title} scrollingDisabled={scrollingDisabled}>\n <LayoutSingleColumn>\n <LayoutWrapperTopbar>\n <TopbarContainer currentPage=\"ManageAdditionalItemsPage\" />\n <UserNav selectedPageName=\"ManageAdditionalItemsPage\" currentUser={currentUser} />\n </LayoutWrapperTopbar>\n <LayoutWrapperMain>\n <div className={css.listingPanel}>{heading}</div>\n <div className={css.content}>\n <ManageAdditionalItemsForm\n additionalItems={additionalItems}\n onSubmit={handleSubmit}\n onUpdate={onUpdate}\n />\n </div>\n </LayoutWrapperMain>\n <LayoutWrapperFooter>\n <Footer />\n </LayoutWrapperFooter>\n </LayoutSingleColumn>\n </Page>\n );\n }\n}\n\nManageAdditionalItemsPageComponent.defaultProps = {\n currentUser: null,\n};\n\nconst { bool } = PropTypes;\nManageAdditionalItemsPageComponent.propTypes = {\n currentUser: propTypes.currentUser.isRequired,\n scrollingDisabled: bool.isRequired,\n\n // from injectIntl\n intl: intlShape.isRequired,\n};\n\nconst mapStateToProps = state => {\n const { currentUser } = state.user;\n\n return {\n currentUser,\n scrollingDisabled: isScrollingDisabled(state),\n };\n};\n\nconst mapDispatchToProps = dispatch => ({\n // onImageUpload: data => dispatch(uploadImage(data)),\n onUpdateProfile: data => dispatch(updateProfile(data)),\n});\nconst ManageAdditionalItemsPage = compose(\n connect(\n mapStateToProps,\n mapDispatchToProps\n ),\n injectIntl\n)(ManageAdditionalItemsPageComponent);\n\nexport default ManageAdditionalItemsPage;\n"
},
{
"alpha_fraction": 0.7070151567459106,
"alphanum_fraction": 0.7070151567459106,
"avg_line_length": 32.04545593261719,
"blob_id": "db730142a07a8a721f4bbb5b9df09aacc702185a",
"content_id": "b0bbd70953c326c6f781f7273fa2ad2a82fec6ad",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 2181,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 66,
"path": "/src/util/offtoPrice.ts",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import config from '../config';\nimport { types as sdkTypes } from './sdkLoader';\nimport { nightsBetween, daysBetween } from './dates';\nimport { LINE_ITEM_NIGHT, formatLineItemAdditionalItem } from './types';\n\nconst { Money } = sdkTypes;\n\nexport const customPricingParams = (params: any) => {\n const {\n bookingData,\n bookingDates,\n listing,\n selectedAdditionalItemIdQuantities,\n ...rest\n } = params;\n\n const { bookingStart, bookingEnd } = bookingDates;\n // const listingId = listing.id;\n\n // Convert picked date to date that will be converted on the API as\n // a noon of correct year-month-date combo in UTC\n // const bookingStartForAPI = dateFromLocalToAPI(bookingStart);\n // const bookingEndForAPI = dateFromLocalToAPI(bookingEnd);\n\n // Fetch speculated transaction for showing price in booking breakdown\n // NOTE: if unit type is line-item/units, quantity needs to be added.\n // The way to pass it to checkout page is through pageData.bookingData\n\n // const { bookingStart, bookingEnd, listing, ...rest } = params;\n const { amount, currency } = listing.attributes.price;\n\n const unitType = config.bookingUnitType;\n const isNightly = unitType === LINE_ITEM_NIGHT;\n\n const quantity = isNightly\n ? nightsBetween(bookingStart, bookingEnd)\n : daysBetween(bookingStart, bookingEnd);\n\n const lineItem_listing = {\n code: unitType,\n unitPrice: new Money(amount, currency),\n quantity,\n };\n\n const lineItems_additionalItems = selectedAdditionalItemIdQuantities\n ? selectedAdditionalItemIdQuantities.map((idQuantity: any) => ({\n code: formatLineItemAdditionalItem(idQuantity.id),\n // includeFor: ['customer', 'provider'],\n unitPrice: new Money(idQuantity.item.price.amount, idQuantity.item.price.currency),\n quantity: idQuantity.quantity,\n // reversal: false,\n }))\n : [];\n\n console.log('lineItem_listing', lineItem_listing);\n console.log('lineItems_additionalItems', lineItems_additionalItems);\n\n return {\n listingId: listing.id,\n bookingStart,\n bookingEnd,\n lineItems: [lineItem_listing, ...lineItems_additionalItems],\n selectedAdditionalItemIdQuantities,\n ...rest,\n };\n};\n"
},
{
"alpha_fraction": 0.674435555934906,
"alphanum_fraction": 0.6773488521575928,
"avg_line_length": 31.690475463867188,
"blob_id": "be9a61738d7269f7c39e221bfca6e31176d27c74",
"content_id": "6ce808ecaa501dbe27e864da19861808f93ea3e0",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1373,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 42,
"path": "/src/components/BookingFlow/BookingFlow.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { compose } from 'redux';\nimport { withRouter } from 'react-router-dom';\nimport { intlShape, injectIntl, FormattedMessage } from '../../util/reactIntl';\nimport { arrayOf, bool, func, node, oneOfType, shape, string } from 'prop-types';\nimport classNames from 'classnames';\nimport omit from 'lodash/omit';\nimport { propTypes, LISTING_STATE_CLOSED, LINE_ITEM_NIGHT, LINE_ITEM_DAY } from '../../util/types';\nimport { formatMoney } from '../../util/currency';\nimport { parse, stringify } from '../../util/urlHelpers';\nimport config from '../../config';\nimport { ModalInMobile, Button } from '..';\nimport { BookingDatesForm } from '../../forms';\n\nimport css from './BookingFlow.css';\n\n// This defines when ModalInMobile shows content as Modal\nconst MODAL_BREAKPOINT = 1023;\n\nconst BookingFlow = props => {\n const {\n rootClassName,\n } = props;\n return (\n <ul className={css.bookingFlowWrapper}>\n <li className={css.bookingFlowAccessories}>\n <FormattedMessage id=\"BookingFlow.accessories\" />\n </li>\n <li className={css.bookingFlowCustomerinfo} >\n <FormattedMessage id=\"BookingFlow.customerinfo\" />\n </li>\n <li className={css.bookingFlowPayment} >\n <FormattedMessage id=\"BookingFlow.payment\" />\n </li>\n </ul>\n );\n};\n\nexport default compose(\n withRouter,\n injectIntl\n)(BookingFlow);\n"
},
{
"alpha_fraction": 0.7976190447807312,
"alphanum_fraction": 0.7976190447807312,
"avg_line_length": 84,
"blob_id": "0b83097f17ebd013d1552a386e38272c3fb83c82",
"content_id": "8af0da4da53d57f27f69f0e4a8b854ed92fb4d74",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 1,
"path": "/misc/flex_cli/pushProcessEdn.sh",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "flex-cli process push --path process --process preauth-nightly-booking -m offto-test"
},
{
"alpha_fraction": 0.5860148668289185,
"alphanum_fraction": 0.593440592288971,
"avg_line_length": 25.491804122924805,
"blob_id": "24216727ba7b9baf124d0c40d792bf09437f012c",
"content_id": "8197be28b797d544bbe0ac97f77b264409db4818",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1616,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 61,
"path": "/src/components/SectionWhyOffto/SectionWhyOffto.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport PropTypes from 'prop-types';\nimport { FormattedMessage } from '../../util/reactIntl';\nimport classNames from 'classnames';\n\nimport { NamedLink } from '..';\n\nimport css from './SectionWhyOffto.css';\n\nconst SectionWhyOffto = props => {\n const { rootClassName, className } = props;\n\n const classes = classNames(rootClassName || css.root, className);\n return (\n <div className={classes}>\n <div className={css.title}>\n <FormattedMessage id=\"SectionWhyOffto.titleLineOne\" />\n </div>\n\n <div className={css.steps}>\n <div className={css.step}>\n <h2 className={css.stepTitle}>\n <FormattedMessage id=\"SectionWhyOffto.part1Title\" />\n </h2>\n <p>\n <FormattedMessage id=\"SectionWhyOffto.part1Text\" />\n </p>\n </div>\n\n <div className={css.step}>\n <h2 className={css.stepTitle}>\n <FormattedMessage id=\"SectionWhyOffto.part2Title\" />\n </h2>\n <p>\n <FormattedMessage id=\"SectionWhyOffto.part2Text\" />\n </p>\n </div>\n\n <div className={css.step}>\n <h2 className={css.stepTitle}>\n <FormattedMessage id=\"SectionWhyOffto.part3Title\" />\n </h2>\n <p>\n <FormattedMessage id=\"SectionWhyOffto.part3Text\" />\n </p>\n </div>\n </div>\n </div>\n );\n};\n\nSectionWhyOffto.defaultProps = { rootClassName: null, className: null };\n\nconst { string } = PropTypes;\n\nSectionWhyOffto.propTypes = {\n rootClassName: string,\n className: string,\n};\n\nexport default SectionWhyOffto;\n"
},
{
"alpha_fraction": 0.6235268115997314,
"alphanum_fraction": 0.6239715218544006,
"avg_line_length": 28.979999542236328,
"blob_id": "04b8c8c944c326c74186aae67f9356f3d357058a",
"content_id": "0ebc178f59dd677448795f71449fa6b79f8035fc",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4507,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 150,
"path": "/src/forms/EditListingAdditionalitemForm/EditListingAdditionalitemForm.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { arrayOf, bool, func, shape, string, array } from 'prop-types';\nimport { compose } from 'redux';\nimport { Form as FinalForm } from 'react-final-form';\nimport { OnChange } from 'react-final-form-listeners';\nimport arrayMutators from 'final-form-arrays';\nimport { intlShape, injectIntl, FormattedMessage } from '../../util/reactIntl';\nimport classNames from 'classnames';\nimport { propTypes } from '../../util/types';\nimport { maxLength, required, composeValidators } from '../../util/validators';\nimport * as offtoData from '../../util/offtoData';\nimport {\n Form,\n Button,\n FieldCheckboxGroup,\n FieldTextInput,\n FieldSelectCustom,\n} from '../../components';\n\nimport css from './EditListingAdditionalitemForm.css';\nimport { currentUserShowSuccess } from '../../ducks/user.duck';\n\nconst TITLE_MAX_LENGTH = 60;\n\nconst EditListingAdditionalitemFormComponent = props => (\n <FinalForm\n {...props}\n mutators={{ ...arrayMutators }}\n render={formRenderProps => {\n const {\n className,\n additionalItems,\n disabled,\n ready,\n handleSubmit,\n onChange,\n intl,\n invalid,\n pristine,\n saveActionMsg,\n updated,\n updateInProgress,\n fetchErrors,\n } = formRenderProps;\n\n\n const { updateListingError, createListingDraftError, showListingsError } = fetchErrors || {};\n const errorMessageUpdateListing = updateListingError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingAdditionalitemForm.updateFailed\" />\n </p>\n ) : null;\n\n // This error happens only on first tab (of EditListingWizard)\n const errorMessageCreateListingDraft = createListingDraftError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingAdditionalitemForm.createListingDraftError\" />\n </p>\n ) : null;\n\n const errorMessageShowListing = showListingsError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingAdditionalitemForm.showListingFailed\" />\n </p>\n ) : null;\n\n const classes = classNames(css.root, className);\n const submitReady = (updated && pristine) || ready;\n const submitInProgress = updateInProgress;\n const submitDisabled = invalid || disabled || submitInProgress;\n\n const additionalItemOptions = additionalItems\n ? additionalItems.map((item, index) => {\n return {\n key: item.id,\n label:\n item.price.currency === 'JPY'\n ? `${item.label} ${item.price.amount} 円`\n : `${item.label} ${item.price.amount} [${item.price.currency}]`,\n };\n })\n : [];\n\n // HTMLs to be displayed in this form\n const formDivs = [];\n\n // additional items\n formDivs.push(\n <FieldCheckboxGroup\n className={css.additionalItems}\n id={'additionalItemIds'}\n name={'additionalItemIds'}\n options={additionalItemOptions}\n />\n );\n\n return (\n <Form className={classes} onSubmit={handleSubmit}>\n {errorMessageCreateListingDraft}\n {errorMessageUpdateListing}\n {errorMessageShowListing}\n\n {formDivs}\n\n <Button\n className={css.submitButton}\n type=\"submit\"\n inProgress={submitInProgress}\n disabled={submitDisabled}\n ready={submitReady}\n >\n {saveActionMsg}\n </Button>\n <OnChange>\n {(value, previous) => {\n onChange && onChange(value, previous);\n }}\n </OnChange>\n </Form>\n );\n }}\n />\n);\n\nEditListingAdditionalitemFormComponent.defaultProps = { className: null, fetchErrors: null };\n\nEditListingAdditionalitemFormComponent.propTypes = {\n className: string,\n additionalItems: array.isRequired,\n intl: intlShape.isRequired,\n onSubmit: func.isRequired,\n saveActionMsg: string.isRequired,\n disabled: bool.isRequired,\n ready: bool.isRequired,\n updated: bool.isRequired,\n updateInProgress: bool.isRequired,\n fetchErrors: shape({\n createListingDraftError: propTypes.error,\n showListingsError: propTypes.error,\n updateListingError: propTypes.error,\n }),\n categories: arrayOf(\n shape({\n key: string.isRequired,\n label: string.isRequired,\n })\n ),\n};\n\nexport default compose(injectIntl)(EditListingAdditionalitemFormComponent);\n"
},
{
"alpha_fraction": 0.5500097870826721,
"alphanum_fraction": 0.550402045249939,
"avg_line_length": 30.18654441833496,
"blob_id": "449473508f3ad8474d51a7d422c39aa077c0e42d",
"content_id": "28e6c8044903954662972f04812dbc60596f3179",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 10212,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 327,
"path": "/src/forms/EditListingBasicinfoForm/EditListingBasicinfoForm.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { arrayOf, bool, func, shape, string } from 'prop-types';\nimport { compose } from 'redux';\nimport { Form as FinalForm } from 'react-final-form';\nimport { intlShape, injectIntl, FormattedMessage } from '../../util/reactIntl';\nimport classNames from 'classnames';\nimport { propTypes } from '../../util/types';\nimport { maxLength, required, composeValidators } from '../../util/validators';\nimport * as offtoData from '../../util/offtoData';\nimport { Form, Button, FieldTextInput, FieldSelectCustom } from '../../components';\n\nimport css from './EditListingBasicinfoForm.css';\n\nconst TITLE_MAX_LENGTH = 60;\n\nconst EditListingBasicinfoFormComponent = props => (\n <FinalForm\n {...props}\n render={formRenderProps => {\n const {\n categories,\n className,\n currentListing,\n disabled,\n ready,\n handleSubmit,\n intl,\n invalid,\n pristine,\n saveActionMsg,\n updated,\n updateInProgress,\n fetchErrors,\n } = formRenderProps;\n\n const titleMessage = intl.formatMessage({ id: 'EditListingBasicinfoForm.title' });\n const titlePlaceholderMessage = intl.formatMessage({\n id: 'EditListingBasicinfoForm.titlePlaceholder',\n });\n const titleRequiredMessage = intl.formatMessage({\n id: 'EditListingBasicinfoForm.titleRequired',\n });\n const maxLengthMessage = intl.formatMessage(\n { id: 'EditListingBasicinfoForm.maxLength' },\n {\n maxLength: TITLE_MAX_LENGTH,\n }\n );\n\n const descriptionMessage = intl.formatMessage({\n id: 'EditListingBasicinfoForm.description',\n });\n const descriptionPlaceholderMessage = intl.formatMessage({\n id: 'EditListingBasicinfoForm.descriptionPlaceholder',\n });\n const maxLength60Message = maxLength(maxLengthMessage, TITLE_MAX_LENGTH);\n const descriptionRequiredMessage = intl.formatMessage({\n id: 'EditListingBasicinfoForm.descriptionRequired',\n });\n\n const { updateListingError, createListingDraftError, showListingsError } = fetchErrors || {};\n const errorMessageUpdateListing = updateListingError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingBasicinfoForm.updateFailed\" />\n </p>\n ) : null;\n\n // This error happens only on first tab (of EditListingWizard)\n const errorMessageCreateListingDraft = createListingDraftError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingBasicinfoForm.createListingDraftError\" />\n </p>\n ) : null;\n\n const errorMessageShowListing = showListingsError ? (\n <p className={css.error}>\n <FormattedMessage id=\"EditListingBasicinfoForm.showListingFailed\" />\n </p>\n ) : null;\n\n const classes = classNames(css.root, className);\n const submitReady = (updated && pristine) || ready;\n const submitInProgress = updateInProgress;\n const submitDisabled = invalid || disabled || submitInProgress;\n\n // HTMLs to be displayed in this form\n const formDivs = [];\n\n const activityType =\n offtoData.ACTIVITYTYPE_TABLE[currentListing.attributes.publicData.activity];\n\n // title\n formDivs.push(\n <FieldTextInput\n id=\"title\"\n name=\"title\"\n className={css.title}\n type=\"text\"\n label={'Gear Name'}\n placeholder={'title of your gear'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // gearId\n formDivs.push(\n <FieldTextInput\n id=\"gearId\"\n name=\"gearId\"\n className={css.title}\n type=\"text\"\n label={'Gear Id'}\n placeholder={'Id of this gear in your shop'}\n validate={composeValidators(required('title is required'))}\n />\n );\n\n // activityType\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n id=\"activityType\"\n name=\"activityType\" // values の key\n label={'activityType'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(activityType).map(activityTypeEnum => {\n return {\n key: activityType[activityTypeEnum],\n label: activityType[activityTypeEnum],\n };\n })}\n />\n </div>\n );\n\n // size\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n classname={css.selectCustom}\n id=\"size\"\n name=\"size\" // values の key\n label={'size'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Size).map(size => {\n return {\n key: offtoData.Size[size],\n label: offtoData.Size[size],\n };\n })}\n />\n </div>\n );\n\n // skill\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n classname={css.selectCustom}\n id=\"skill\"\n name=\"skill\" // values の key\n label={'skill'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Skill).map(skill => {\n return {\n key: offtoData.Skill[skill],\n label: offtoData.Skill[skill],\n };\n })}\n />\n </div>\n );\n\n // age\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n classname={css.selectCustom}\n id=\"age\"\n name=\"age\" // values の key\n label={'age'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Age).map(age => {\n return {\n key: offtoData.Age[age],\n label: offtoData.Age[age],\n };\n })}\n />\n </div>\n );\n\n // gender\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n classname={css.selectCustom}\n id=\"gender\"\n name=\"gender\" // values の key\n label={'gender'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Gender).map(gender => {\n return {\n key: offtoData.Gender[gender],\n label: offtoData.Gender[gender],\n };\n })}\n />\n </div>\n );\n\n // color\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n classname={css.selectCustom}\n id=\"color\"\n name=\"color\" // values の key\n label={'color'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Color).map(color => {\n return {\n key: offtoData.Color[color],\n label: offtoData.Color[color],\n };\n })}\n />\n </div>\n );\n\n // condition\n formDivs.push(\n <div className={css.selectCustom}>\n <FieldSelectCustom\n classname={css.selectCustom}\n id=\"condition\"\n name=\"condition\" // values の key\n label={'condition'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Condition).map(condition => {\n return {\n key: offtoData.Condition[condition],\n label: offtoData.Condition[condition],\n };\n })}\n />\n </div>\n );\n\n // description\n formDivs.push(\n <div className={css.title}>\n <FieldTextInput\n id=\"description\"\n name=\"description\"\n className={css.title}\n type=\"textarea\"\n label={'description'}\n placeholder={'explain your gear'}\n validate={composeValidators(required('description is required'))}\n />\n </div>\n );\n\n return (\n <Form className={classes} onSubmit={handleSubmit}>\n {errorMessageCreateListingDraft}\n {errorMessageUpdateListing}\n {errorMessageShowListing}\n\n {formDivs}\n\n <Button\n className={css.submitButton}\n type=\"submit\"\n inProgress={submitInProgress}\n disabled={submitDisabled}\n ready={submitReady}\n >\n {saveActionMsg}\n </Button>\n </Form>\n );\n }}\n />\n);\n\nEditListingBasicinfoFormComponent.defaultProps = { className: null, fetchErrors: null };\n\nEditListingBasicinfoFormComponent.propTypes = {\n className: string,\n currentListing: propTypes.currentListing,\n intl: intlShape.isRequired,\n onSubmit: func.isRequired,\n saveActionMsg: string.isRequired,\n disabled: bool.isRequired,\n ready: bool.isRequired,\n updated: bool.isRequired,\n updateInProgress: bool.isRequired,\n fetchErrors: shape({\n createListingDraftError: propTypes.error,\n showListingsError: propTypes.error,\n updateListingError: propTypes.error,\n }),\n categories: arrayOf(\n shape({\n key: string.isRequired,\n label: string.isRequired,\n })\n ),\n};\n\nexport default compose(injectIntl)(EditListingBasicinfoFormComponent);\n"
},
{
"alpha_fraction": 0.3677777647972107,
"alphanum_fraction": 0.3977777659893036,
"avg_line_length": 22.657894134521484,
"blob_id": "533a869ffc20c888b5400cd56f98fec2e33801c7",
"content_id": "0b8bfc03fe7e229495fff629e9aaaafe84ed23b0",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 900,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 38,
"path": "/misc/offtoDataEnums.py",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "\n# ----------------------------------- color -----------------------------------\ncolors = [\n 'BLACK',\n 'GRAY',\n 'WHITE',\n 'RED',\n 'ORANGE',\n 'YELLOW',\n 'GREEN',\n 'BLUE',\n 'PURPLE',\n 'PINK',\n 'LIGHT_BLUE',\n 'YELLOW_GREEN',\n 'SILVER',\n 'GOLD',\n]\n\n\nfor color in colors:\n # print(f\"{color.upper()} = '{color.capitalize()}', \")\n print(f\"{color.upper()} = '{color.lower()}', \")\n\n# ----------------------------------- width cm -----------------------------------\nwid_start = 50 # 5cm\nwid_end = 200 # 20cm\n \nfor wid in range(wid_start, wid_end, 5):\n print(f'W_{str(wid).zfill(3)} = \"{str(wid/10).zfill(4)}\",')\nprint()\n\n# ----------------------------------- width m -----------------------------------\nwid_start = 10 # 1m\nwid_end = 200 # 20m\n \nfor wid in range(wid_start, wid_end, 10):\n print(f'W_{str(wid).zfill(3)} = \"{str(wid/10).zfill(4)}\",')\nprint()\n"
},
{
"alpha_fraction": 0.6597222089767456,
"alphanum_fraction": 0.6605017185211182,
"avg_line_length": 31.07272720336914,
"blob_id": "0031c07a238caa2274e721a10871f860b3abec36",
"content_id": "f70e7af837431c8442246ded7421dfe7443a5cb9",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 14112,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 440,
"path": "/src/components/EditListingWizard/EditListingWizardTab.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import { types as sdkTypes } from '../../util/sdkLoader';\nimport React from 'react';\nimport PropTypes from 'prop-types';\nimport { intlShape } from '../../util/reactIntl';\nimport routeConfiguration from '../../routeConfiguration';\nimport {\n LISTING_PAGE_PARAM_TYPE_DRAFT,\n LISTING_PAGE_PARAM_TYPE_NEW,\n LISTING_PAGE_PARAM_TYPES,\n} from '../../util/urlHelpers';\nimport { ensureListing } from '../../util/data';\nimport { createResourceLocatorString } from '../../util/routes';\nimport {\n EditListingActivityPanel,\n EditListingAvailabilityPanel,\n EditListingDescriptionPanel,\n EditListingFeaturesPanel,\n EditListingLocationPanel,\n EditListingPhotosPanel,\n EditListingPoliciesPanel,\n EditListingPricingPanel,\n EditListingRentalstylePanel,\n EditListingBasicinfoPanel,\n EditListingDetailinfoPanel,\n EditListingAdditionalitemPanel,\n // [ADD_EDITLISTINGPANEL_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py\n} from '../../components';\n\nimport css from './EditListingWizard.css';\nimport { stringifyDateToISO8601 } from '../../util/dates';\nimport { defaultValueWithEnUSD } from '../FieldCurrencyInput/FieldCurrencyInput.example';\n\nconst { LatLng } = sdkTypes;\n\n// All chars must be lower case.\n// EditListingWizard.tabLabel{*} in src/translation/--.json must be the same\n// with first charactor uppercased (captalized).\n// (related in src/components/EditListingWizard/EditListingWizardTab.js > createNextButtonText())\nexport const AVAILABILITY = 'availability';\nexport const DESCRIPTION = 'description';\nexport const FEATURES = 'features';\nexport const POLICY = 'policy';\nexport const LOCATION = 'location';\nexport const PRICING = 'pricing';\nexport const PHOTOS = 'photos';\nexport const ACTIVITY = 'activity';\nexport const RENTALSTYLE = 'rentalstyle';\nexport const BASICINFO = 'basicinfo';\nexport const DETAILINFO = 'detailinfo';\nexport const ADDITIONALITEM = 'additionalitem';\n// [ADD_EDITLISTINGIDENTIFIER_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py\n\n// EditListingWizardTab component supports these tabs\nexport const SUPPORTED_TABS = [\n DESCRIPTION,\n FEATURES,\n POLICY,\n LOCATION,\n PRICING,\n AVAILABILITY,\n PHOTOS,\n ACTIVITY,\n RENTALSTYLE,\n BASICINFO,\n DETAILINFO,\n ADDITIONALITEM,\n // [ADD_SUPPORTEDTAB_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py\n];\n\nconst pathParamsToNextTab = (params, tab, marketplaceTabs) => {\n const nextTabIndex = marketplaceTabs.findIndex(s => s === tab) + 1;\n const nextTab =\n nextTabIndex < marketplaceTabs.length\n ? marketplaceTabs[nextTabIndex]\n : marketplaceTabs[marketplaceTabs.length - 1];\n return { ...params, tab: nextTab };\n};\n\n// When user has update draft listing, he should be redirected to next EditListingWizardTab\nconst redirectAfterDraftUpdate = (listingId, params, tab, marketplaceTabs, history) => {\n const currentPathParams = {\n ...params,\n type: LISTING_PAGE_PARAM_TYPE_DRAFT,\n id: listingId,\n };\n const routes = routeConfiguration();\n\n // Replace current \"new\" path to \"draft\" path.\n // Browser's back button should lead to editing current draft instead of creating a new one.\n if (params.type === LISTING_PAGE_PARAM_TYPE_NEW) {\n const draftURI = createResourceLocatorString('EditListingPage', routes, currentPathParams, {});\n history.replace(draftURI);\n }\n\n // Redirect to next tab\n const nextPathParams = pathParamsToNextTab(currentPathParams, tab, marketplaceTabs);\n const to = createResourceLocatorString('EditListingPage', routes, nextPathParams, {});\n history.push(to);\n};\n\nconst EditListingWizardTab = props => {\n const {\n tab,\n marketplaceTabs,\n params,\n errors,\n fetchInProgress,\n newListingPublished,\n history,\n images,\n availability,\n listing,\n currentUser,\n handleCreateFlowTabScrolling,\n handlePublishListing,\n onUpdateListing,\n onCreateListingDraft,\n onImageUpload,\n onUpdateImageOrder,\n onRemoveImage,\n onChange,\n updatedTab,\n updateInProgress,\n isLastTab,\n intl,\n } = props;\n\n const { type } = params;\n const isNewURI = type === LISTING_PAGE_PARAM_TYPE_NEW;\n const isDraftURI = type === LISTING_PAGE_PARAM_TYPE_DRAFT;\n const isNewListingFlow = isNewURI || isDraftURI;\n\n const currentListing = ensureListing(listing);\n const imageIds = images => {\n return images ? images.map(img => img.imageId || img.id) : null;\n };\n\n const onCompleteEditListingWizardTab = (tab, updateValues, currentUser) => {\n const defaultValues = {\n title: '(no title)',\n geolocation:\n currentUser &&\n currentUser.attributes.profile.publicData.geolocation &&\n currentUser.attributes.profile.publicData.geolocation.selectedPlace &&\n currentUser.attributes.profile.publicData.geolocation.selectedPlace.origin &&\n new LatLng(\n currentUser.attributes.profile.publicData.geolocation.selectedPlace.origin.lat,\n currentUser.attributes.profile.publicData.geolocation.selectedPlace.origin.lng\n ),\n };\n\n updateValues = {\n ...defaultValues,\n ...updateValues,\n };\n\n const { images: updatedImages, ...otherValues } = updateValues;\n const imageProperty =\n typeof updatedImages !== 'undefined' ? { images: imageIds(updatedImages) } : {};\n const updateValuesWithImages = { ...otherValues, ...imageProperty };\n\n if (isNewListingFlow) {\n const onUpsertListingDraft = isNewURI\n ? (tab, updateValues) => onCreateListingDraft(updateValues)\n : onUpdateListing;\n\n const upsertValues = isNewURI\n ? updateValuesWithImages\n : { ...updateValuesWithImages, id: currentListing.id };\n\n onUpsertListingDraft(tab, upsertValues)\n .then(r => {\n if (tab !== marketplaceTabs[marketplaceTabs.length - 1]) {\n // Create listing flow: smooth scrolling polyfill to scroll to correct tab\n handleCreateFlowTabScrolling(false);\n\n // After successful saving of draft data, user should be redirected to next tab\n redirectAfterDraftUpdate(r.data.data.id.uuid, params, tab, marketplaceTabs, history);\n } else {\n handlePublishListing(currentListing.id);\n }\n })\n .catch(e => {\n // No need for extra actions\n });\n } else {\n onUpdateListing(tab, { ...updateValuesWithImages, id: currentListing.id });\n }\n };\n\n const panelProps = tab => {\n return {\n className: css.panel,\n errors,\n listing,\n currentUser,\n onChange,\n panelUpdated: updatedTab === tab,\n updateInProgress,\n // newListingPublished and fetchInProgress are flags for the last wizard tab\n ready: newListingPublished,\n disabled: fetchInProgress,\n };\n };\n\n const createNextButtonText = (tab, marketplaceTabs, isNewListingFlow, isLastTab) => {\n const capitalizeFirstLetter = str => {\n return str && str.length > 0 ? str.charAt(0).toUpperCase() + str.slice(1) : '';\n };\n const tabLabel = intl.formatMessage({\n id: `EditListingWizard.tabLabel${capitalizeFirstLetter(tab)}`,\n });\n\n if (isNewListingFlow) {\n if (!isLastTab) {\n const nextTab = marketplaceTabs[marketplaceTabs.indexOf(tab) + 1];\n const nextTabLabel = intl.formatMessage({\n id: `EditListingWizard.tabLabel${capitalizeFirstLetter(nextTab)}`,\n });\n\n // In creating a new listing, and editing not last tab, \"Next {nextTabLebel}\"\n return intl.formatMessage(\n {\n id: 'EditListingWizard.saveNewNotLastTab',\n },\n {\n nextTabLabel,\n }\n );\n } else {\n // In creating a new listing, and editing the last tab, \"Publish listing\"\n return intl.formatMessage({ id: 'EditListingWizard.saveNewLastTab' });\n }\n } else {\n // In creating a already-exist listing, \"Save {tabLabel}\"\n return intl.formatMessage({ id: 'EditListingWizard.saveEditTab' }, { tabLabel: tabLabel });\n }\n };\n\n switch (tab) {\n case DESCRIPTION: {\n return (\n <EditListingDescriptionPanel\n {...panelProps(DESCRIPTION)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case FEATURES: {\n return (\n <EditListingFeaturesPanel\n {...panelProps(FEATURES)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case POLICY: {\n return (\n <EditListingPoliciesPanel\n {...panelProps(POLICY)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case LOCATION: {\n return (\n <EditListingLocationPanel\n {...panelProps(LOCATION)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case PRICING: {\n return (\n <EditListingPricingPanel\n {...panelProps(PRICING)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case AVAILABILITY: {\n return (\n <EditListingAvailabilityPanel\n {...panelProps(AVAILABILITY)}\n availability={availability}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case PHOTOS: {\n return (\n <EditListingPhotosPanel\n {...panelProps(PHOTOS)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n images={images}\n onImageUpload={onImageUpload}\n onRemoveImage={onRemoveImage}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n onUpdateImageOrder={onUpdateImageOrder}\n />\n );\n }\n case ACTIVITY: {\n return (\n <EditListingActivityPanel\n {...panelProps(ACTIVITY)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case RENTALSTYLE: {\n return (\n <EditListingRentalstylePanel\n {...panelProps(RENTALSTYLE)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case BASICINFO: {\n return (\n <EditListingBasicinfoPanel\n {...panelProps(BASICINFO)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case DETAILINFO: {\n return (\n <EditListingDetailinfoPanel\n {...panelProps(DETAILINFO)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n case ADDITIONALITEM: {\n return (\n <EditListingAdditionalitemPanel\n {...panelProps(ADDITIONALITEM)}\n submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)}\n onSubmit={values => {\n onCompleteEditListingWizardTab(tab, values, currentUser);\n }}\n />\n );\n }\n // [ADD_TABCASE_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py\n default:\n return null;\n }\n};\n\nEditListingWizardTab.defaultProps = {\n listing: null,\n updatedTab: null,\n};\n\nconst { array, bool, func, object, oneOf, shape, string } = PropTypes;\n\nEditListingWizardTab.propTypes = {\n params: shape({\n id: string.isRequired,\n slug: string.isRequired,\n type: oneOf(LISTING_PAGE_PARAM_TYPES).isRequired,\n tab: oneOf(SUPPORTED_TABS).isRequired,\n }).isRequired,\n errors: shape({\n createListingDraftError: object,\n publishListingError: object,\n updateListingError: object,\n showListingsError: object,\n uploadImageError: object,\n }).isRequired,\n fetchInProgress: bool.isRequired,\n newListingPublished: bool.isRequired,\n history: shape({\n push: func.isRequired,\n replace: func.isRequired,\n }).isRequired,\n images: array.isRequired,\n availability: object.isRequired,\n\n // We cannot use propTypes.listing since the listing might be a draft.\n listing: shape({\n attributes: shape({\n publicData: object,\n description: string,\n geolocation: object,\n pricing: object,\n title: string,\n }),\n images: array,\n }),\n\n handleCreateFlowTabScrolling: func.isRequired,\n handlePublishListing: func.isRequired,\n onUpdateListing: func.isRequired,\n onCreateListingDraft: func.isRequired,\n onImageUpload: func.isRequired,\n onUpdateImageOrder: func.isRequired,\n onRemoveImage: func.isRequired,\n onChange: func.isRequired,\n updatedTab: string,\n updateInProgress: bool.isRequired,\n\n intl: intlShape.isRequired,\n};\n\nexport default EditListingWizardTab;\n"
},
{
"alpha_fraction": 0.7090112566947937,
"alphanum_fraction": 0.7090112566947937,
"avg_line_length": 32.64210510253906,
"blob_id": "203c8f00b79d3527d3fd2dcbcb4d254e97bb3535",
"content_id": "5a9a7d4488684850752ce46e95f841a77173726a",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3196,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 95,
"path": "/src/components/SearchMapShopLabel/SearchMapShopLabel.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport PropTypes from 'prop-types';\nimport { injectIntl, intlShape } from '../../util/reactIntl';\nimport classNames from 'classnames';\nimport { propTypes } from '../../util/types';\nimport { formatMoney } from '../../util/currency';\nimport { ensureListing, ensureUser } from '../../util/data';\nimport config from '../../config';\n\nimport css from './SearchMapShopLabel.css';\n\nconst SUPPORTED_CATEGORY_DISPLAYS = {\n Smoke: 'SMOKE',\n Electric: 'ELECTRIC',\n Camping: 'CAMPING',\n surfing: 'SURFING',\n};\nconst OTHER_CATEGORY_DISPLAY = 'OTHERS';\n\nconst ensureShop = shop => {\n return {\n author: ensureUser(shop.author),\n listings: shop.listings.map(listing => {\n return ensureListing(listing);\n }),\n };\n};\n\nclass SearchMapShopLabel extends Component {\n // overwrite Component method\n shouldComponentUpdate(nextProps) {\n const currentProps = this.props;\n\n const currentShop = ensureShop(currentProps.shop);\n const nextShop = ensureShop(nextProps.shop);\n console.log('currentShop', currentShop);\n console.log('nextShop', nextShop);\n\n const isSameShop = currentShop.author.id.uuid === nextShop.author.id.uuid;\n const hasSameListings = currentShop.listings === nextShop.listings;\n const hasSameActiveStatus = currentProps.isActive === nextProps.isActive;\n const hasSameRefreshToken =\n currentProps.mapComponentRefreshToken === nextProps.mapComponentRefreshToken;\n\n return !(isSameShop && hasSameListings && hasSameActiveStatus && hasSameRefreshToken);\n }\n\n render() {\n const { className, rootClassName, intl, shop, onListingClicked, isActive } = this.props;\n const currentShop = ensureShop(shop);\n const category = shop.author.attributes.profile.publicData.Category;\n\n // Create formatted price if currency is known or alternatively show just the unknown currency.\n // const formattedPrice =\n // price && price.currency === config.currency ? formatMoney(intl, price) : price.currency;\n console.log('category', category);\n const formattedCategory =\n (category && SUPPORTED_CATEGORY_DISPLAYS[category]) || OTHER_CATEGORY_DISPLAY;\n\n const classes = classNames(rootClassName || css.root, className);\n const shopLabelClasses = classNames(css.shopLabel, { [css.shopLabelActive]: isActive });\n const caretClasses = classNames(css.caret, { [css.caretActive]: isActive });\n\n return (\n <button className={classes} onClick={() => onListingClicked(currentShop)}>\n <div className={css.caretShadow} />\n <div className={shopLabelClasses}>{formattedCategory}</div>\n <div className={caretClasses} />\n </button>\n );\n }\n}\n\nSearchMapShopLabel.defaultProps = {\n className: null,\n rootClassName: null,\n};\n\nconst { func, string, bool, shape, arrayOf } = PropTypes;\n\nSearchMapShopLabel.propTypes = {\n className: string,\n rootClassName: string,\n shop: shape({\n author: propTypes.user.isRequired,\n listings: arrayOf(propTypes.listing.isRequired).isRequired,\n }).isRequired,\n onListingClicked: func.isRequired,\n isActive: bool.isRequired,\n\n // from injectIntl\n intl: intlShape.isRequired,\n};\n\nexport default injectIntl(SearchMapShopLabel);\n"
},
{
"alpha_fraction": 0.5283825993537903,
"alphanum_fraction": 0.5299182534217834,
"avg_line_length": 35.908905029296875,
"blob_id": "61a2d338c5faf6a20a0f9214788a750bdfee760f",
"content_id": "a71f86406b52e307116d90f21e02670d484acd22",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 18241,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 494,
"path": "/src/forms/ProfileSettingsShopForm/ProfileSettingsShopForm.js",
"repo_name": "yamahiro-offto/offto-sharetribe-flex-web",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { bool, string } from 'prop-types';\nimport { compose } from 'redux';\nimport { FormattedMessage, injectIntl, intlShape } from '../../util/reactIntl';\nimport { Field, Form as FinalForm } from 'react-final-form';\nimport isEqual from 'lodash/isEqual';\nimport classNames from 'classnames';\nimport { ensureCurrentUser } from '../../util/data';\nimport { propTypes } from '../../util/types';\nimport * as validators from '../../util/validators';\nimport {\n autocompleteSearchRequired,\n autocompletePlaceSelected,\n composeValidators,\n} from '../../util/validators';\nimport { isUploadImageOverLimitError } from '../../util/errors';\nimport * as offtoData from '../../util/offtoData';\nimport {\n Form,\n Avatar,\n Button,\n ImageFromFile,\n IconSpinner,\n FieldTextInput,\n FieldSelectCustom,\n LocationAutocompleteInputField,\n} from '../../components';\n\nimport css from './ProfileSettingsShopForm.css';\n\nconst identity = v => v;\n\nconst ACCEPT_IMAGES = 'image/*';\nconst UPLOAD_CHANGE_DELAY = 2000; // Show spinner so that browser has time to load img srcset\n\nclass ProfileSettingsShopFormComponent extends Component {\n constructor(props) {\n super(props);\n\n this.uploadDelayTimeoutId = null;\n this.state = { uploadDelay: false };\n this.submittedValues = {};\n }\n\n componentDidUpdate(prevProps) {\n // Upload delay is additional time window where Avatar is added to the DOM,\n // but not yet visible (time to load image URL from srcset)\n if (prevProps.uploadInProgress && !this.props.uploadInProgress) {\n this.setState({ uploadDelay: true });\n this.uploadDelayTimeoutId = window.setTimeout(() => {\n this.setState({ uploadDelay: false });\n }, UPLOAD_CHANGE_DELAY);\n }\n }\n\n componentWillUnmount() {\n window.clearTimeout(this.uploadDelayTimeoutId);\n }\n\n render() {\n return (\n <FinalForm\n {...this.props}\n render={fieldRenderProps => {\n const {\n className,\n currentUser,\n handleSubmit,\n intl,\n invalid,\n onImageUpload,\n pristine,\n profileImage,\n rootClassName,\n updateInProgress,\n updateProfileError,\n uploadImageError,\n uploadInProgress,\n form,\n values,\n } = fieldRenderProps;\n\n const user = ensureCurrentUser(currentUser);\n\n // First name\n const firstNameLabel = intl.formatMessage({\n id: 'ProfileSettingsShopForm.firstNameLabel',\n });\n const firstNamePlaceholder = intl.formatMessage({\n id: 'ProfileSettingsShopForm.firstNamePlaceholder',\n });\n const firstNameRequiredMessage = intl.formatMessage({\n id: 'ProfileSettingsShopForm.firstNameRequired',\n });\n const firstNameRequired = validators.required(firstNameRequiredMessage);\n\n // Last name\n const lastNameLabel = intl.formatMessage({\n id: 'ProfileSettingsShopForm.lastNameLabel',\n });\n const lastNamePlaceholder = intl.formatMessage({\n id: 'ProfileSettingsShopForm.lastNamePlaceholder',\n });\n const lastNameRequiredMessage = intl.formatMessage({\n id: 'ProfileSettingsShopForm.lastNameRequired',\n });\n const lastNameRequired = validators.required(lastNameRequiredMessage);\n\n // Bio\n const bioLabel = intl.formatMessage({\n id: 'ProfileSettingsShopForm.bioLabel',\n });\n const bioPlaceholder = intl.formatMessage({\n id: 'ProfileSettingsShopForm.bioPlaceholder',\n });\n\n // location\n const titleRequiredMessage = intl.formatMessage({\n id: 'EditListingLocationForm.address',\n });\n const addressPlaceholderMessage = intl.formatMessage({\n id: 'EditListingLocationForm.addressPlaceholder',\n });\n const addressRequiredMessage = intl.formatMessage({\n id: 'EditListingLocationForm.addressRequired',\n });\n const addressNotRecognizedMessage = intl.formatMessage({\n id: 'EditListingLocationForm.addressNotRecognized',\n });\n\n const optionalText = intl.formatMessage({\n id: 'EditListingLocationForm.optionalText',\n });\n\n const buildingMessage = intl.formatMessage(\n { id: 'EditListingLocationForm.building' },\n { optionalText: optionalText }\n );\n const buildingPlaceholderMessage = intl.formatMessage({\n id: 'EditListingLocationForm.buildingPlaceholder',\n });\n\n const uploadingOverlay =\n uploadInProgress || this.state.uploadDelay ? (\n <div className={css.uploadingImageOverlay}>\n <IconSpinner />\n </div>\n ) : null;\n\n const hasUploadError = !!uploadImageError && !uploadInProgress;\n const errorClasses = classNames({ [css.avatarUploadError]: hasUploadError });\n const transientUserProfileImage = profileImage.uploadedImage || user.profileImage;\n const transientUser = { ...user, profileImage: transientUserProfileImage };\n\n // Ensure that file exists if imageFromFile is used\n const fileExists = !!profileImage.file;\n const fileUploadInProgress = uploadInProgress && fileExists;\n const delayAfterUpload = profileImage.imageId && this.state.uploadDelay;\n const imageFromFile =\n fileExists && (fileUploadInProgress || delayAfterUpload) ? (\n <ImageFromFile\n id={profileImage.id}\n className={errorClasses}\n rootClassName={css.uploadingImage}\n aspectRatioClassName={css.squareAspectRatio}\n file={profileImage.file}\n >\n {uploadingOverlay}\n </ImageFromFile>\n ) : null;\n\n // Avatar is rendered in hidden during the upload delay\n // Upload delay smoothes image change process:\n // responsive img has time to load srcset stuff before it is shown to user.\n const avatarClasses = classNames(errorClasses, css.avatar, {\n [css.avatarInvisible]: this.state.uploadDelay,\n });\n const avatarComponent =\n !fileUploadInProgress && profileImage.imageId ? (\n <Avatar\n className={avatarClasses}\n renderSizes=\"(max-width: 767px) 96px, 240px\"\n user={transientUser}\n disableProfileLink\n />\n ) : null;\n\n const chooseAvatarLabel =\n profileImage.imageId || fileUploadInProgress ? (\n <div className={css.avatarContainer}>\n {imageFromFile}\n {avatarComponent}\n <div className={css.changeAvatar}>\n <FormattedMessage id=\"ProfileSettingsShopForm.changeAvatar\" />\n </div>\n </div>\n ) : (\n <div className={css.avatarPlaceholder}>\n <div className={css.avatarPlaceholderText}>\n <FormattedMessage id=\"ProfileSettingsShopForm.addYourProfilePicture\" />\n </div>\n <div className={css.avatarPlaceholderTextMobile}>\n <FormattedMessage id=\"ProfileSettingsShopForm.addYourProfilePictureMobile\" />\n </div>\n </div>\n );\n\n const submitError = updateProfileError ? (\n <div className={css.error}>\n <FormattedMessage id=\"ProfileSettingsShopForm.updateProfileFailed\" />\n </div>\n ) : null;\n\n const classes = classNames(rootClassName || css.root, className);\n const submitInProgress = updateInProgress;\n const submittedOnce = Object.keys(this.submittedValues).length > 0;\n const pristineSinceLastSubmit = submittedOnce && isEqual(values, this.submittedValues);\n const submitDisabled =\n invalid || pristine || pristineSinceLastSubmit || uploadInProgress || submitInProgress;\n\n const formDivs = {};\n formDivs.profileImage = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n <FormattedMessage id=\"ProfileSettingsShopForm.yourProfilePicture\" />\n </h3>\n <Field\n accept={ACCEPT_IMAGES}\n id=\"profileImage\"\n name=\"profileImage\"\n label={chooseAvatarLabel}\n type=\"file\"\n form={null}\n uploadImageError={uploadImageError}\n disabled={uploadInProgress}\n >\n {fieldProps => {\n const { accept, id, input, label, disabled, uploadImageError } = fieldProps;\n const { name, type } = input;\n const onChange = e => {\n const file = e.target.files[0];\n form.change(`profileImage`, file);\n form.blur(`profileImage`);\n if (file != null) {\n const tempId = `${file.name}_${Date.now()}`;\n onImageUpload({ id: tempId, file });\n }\n };\n\n let error = null;\n\n if (isUploadImageOverLimitError(uploadImageError)) {\n error = (\n <div className={css.error}>\n <FormattedMessage id=\"ProfileSettingsShopForm.imageUploadFailedFileTooLarge\" />\n </div>\n );\n } else if (uploadImageError) {\n error = (\n <div className={css.error}>\n <FormattedMessage id=\"ProfileSettingsShopForm.imageUploadFailed\" />\n </div>\n );\n }\n\n return (\n <div className={css.uploadAvatarWrapper}>\n <label className={css.label} htmlFor={id}>\n {label}\n </label>\n <input\n accept={accept}\n id={id}\n name={name}\n className={css.uploadAvatarInput}\n disabled={disabled}\n onChange={onChange}\n type={type}\n />\n {error}\n </div>\n );\n }}\n </Field>\n <div className={css.tip}>\n <FormattedMessage id=\"ProfileSettingsShopForm.tip\" />\n </div>\n <div className={css.fileInfo}>\n <FormattedMessage id=\"ProfileSettingsShopForm.fileInfo\" />\n </div>\n </div>\n );\n formDivs.name = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n <FormattedMessage id=\"ProfileSettingsShopForm.yourName\" />\n </h3>\n <div className={css.nameContainer}>\n <FieldTextInput\n className={css.firstName}\n type=\"text\"\n id=\"firstName\"\n name=\"firstName\"\n label={firstNameLabel}\n placeholder={firstNamePlaceholder}\n validate={firstNameRequired}\n />\n <FieldTextInput\n className={css.lastName}\n type=\"text\"\n id=\"lastName\"\n name=\"lastName\"\n label={lastNameLabel}\n placeholder={lastNamePlaceholder}\n validate={lastNameRequired}\n />\n </div>\n </div>\n );\n formDivs.shopName = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n {/* <FormattedMessage id=\"ProfileSettingsShopForm.bioHeading\" /> */}\n {'Your shop name'}\n </h3>\n <FieldTextInput\n type=\"text\"\n id=\"displayName\"\n name=\"displayName\"\n label={'shop name'}\n placeholder={'shop name here..'}\n />\n </div>\n );\n formDivs.intro = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n <FormattedMessage id=\"ProfileSettingsShopForm.bioHeading\" />\n </h3>\n <FieldTextInput\n type=\"textarea\"\n id=\"bio\"\n name=\"bio\"\n label={'your shop introduction'}\n placeholder={'shop name here..'}\n />\n </div>\n );\n formDivs.userType = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n {/* <FormattedMessage id=\"ProfileSettingsShopForm.bioHeading\" /> */}\n {'shop type'}\n </h3>\n <FieldSelectCustom\n id=\"publicData.type\"\n name=\"publicData.type\" // values の key\n label={'publicData type'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.UserType).map(usertype => {\n return {\n key: offtoData.UserType[usertype],\n label: offtoData.UserType[usertype],\n };\n })}\n />\n <p className={css.bioInfo}>\n <FormattedMessage id=\"ProfileSettingsShopForm.bioInfo\" />\n </p>\n </div>\n );\n formDivs.activity = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n {/* <FormattedMessage id=\"ProfileSettingsShopForm.bioHeading\" /> */}\n {'Yout Shop activity'}\n </h3>\n <FieldSelectCustom\n id=\"publicData.activity\"\n name=\"publicData.activity\" // values の key\n label={'activity'}\n placeholder={'placeholder'}\n validate=\"\"\n // options={[{ key: 'customer', label: 'customer' }, { key: 'shop', label: 'shop' }]}\n options={Object.keys(offtoData.Activity).map(activity => {\n return {\n key: offtoData.Activity[activity],\n label: offtoData.Activity[activity],\n };\n })}\n />\n <p className={css.bioInfo}>\n <FormattedMessage id=\"ProfileSettingsShopForm.bioInfo\" />\n </p>\n </div>\n );\n formDivs.location = (\n <div className={css.sectionContainer}>\n <h3 className={css.sectionTitle}>\n <FormattedMessage id=\"ProfileSettingsShopForm.locationHeading\" />\n </h3>\n <LocationAutocompleteInputField\n className={css.locationAddress}\n inputClassName={css.locationAutocompleteInput}\n iconClassName={css.locationAutocompleteInputIcon}\n predictionsClassName={css.predictionsRoot}\n validClassName={css.validLocation}\n name=\"geolocation\"\n meta={{ touched: true, valid: true }}\n label={titleRequiredMessage}\n placeholder={addressPlaceholderMessage}\n useDefaultPredictions={false}\n format={identity}\n valueFromForm={values.geolocation}\n validate={composeValidators(\n autocompleteSearchRequired(addressRequiredMessage),\n autocompletePlaceSelected(addressNotRecognizedMessage)\n )}\n />\n <FieldTextInput\n className={css.building}\n type=\"text\"\n name=\"building\"\n id=\"building\"\n label={buildingMessage}\n placeholder={buildingPlaceholderMessage}\n />\n </div>\n );\n\n return (\n <Form\n className={classes}\n onSubmit={e => {\n this.submittedValues = values;\n handleSubmit(e);\n }}\n >\n {formDivs.profileImage}\n {/* {formDivs.name} */}\n {formDivs.shopName}\n {formDivs.intro}\n {/* {formDivs.userType} */}\n {formDivs.activity}\n {formDivs.location}\n {/* (余白) */}\n <div className={css.lastSection} />\n {submitError}\n <Button\n className={css.submitButton}\n type=\"submit\"\n inProgress={submitInProgress}\n disabled={submitDisabled}\n ready={pristineSinceLastSubmit}\n >\n <FormattedMessage id=\"ProfileSettingsShopForm.saveChanges\" />\n </Button>\n </Form>\n );\n }}\n />\n );\n }\n}\n\nProfileSettingsShopFormComponent.defaultProps = {\n rootClassName: null,\n className: null,\n uploadImageError: null,\n updateProfileError: null,\n updateProfileReady: false,\n};\n\nProfileSettingsShopFormComponent.propTypes = {\n rootClassName: string,\n className: string,\n\n uploadImageError: propTypes.error,\n uploadInProgress: bool.isRequired,\n updateInProgress: bool.isRequired,\n updateProfileError: propTypes.error,\n updateProfileReady: bool,\n\n // from injectIntl\n intl: intlShape.isRequired,\n};\n\nconst ProfileSettingsShopForm = compose(injectIntl)(ProfileSettingsShopFormComponent);\n\nProfileSettingsShopForm.displayName = 'ProfileSettingsShopForm';\n\nexport default ProfileSettingsShopForm;\n"
}
] | 19 |
erwenzhang/FreeLunchTest | https://github.com/erwenzhang/FreeLunchTest | d67beb25b776e9b2e1a72acdbd74ff169d9a162f | 731c80b6f4a4506aedf224768e4a2676320a765d | aed346b42e447bc2d41d94ae00601d5fabc54b16 | refs/heads/master | 2021-01-17T17:55:25.839957 | 2015-12-06T00:19:45 | 2015-12-06T00:19:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6432678699493408,
"alphanum_fraction": 0.6509343385696411,
"avg_line_length": 30.854961395263672,
"blob_id": "6275431db718820cbd930b8dbe6cd04937ed532f",
"content_id": "f2acf69789763444c32456a8f44a30bfea5ed142",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4174,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 131,
"path": "/main.py",
"repo_name": "erwenzhang/FreeLunchTest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nfrom __future__ import division\nimport webapp2\nimport cgi\nimport urllib\nimport json\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import blobstore\nfrom google.appengine.ext.webapp import blobstore_handlers\nfrom google.appengine.api import images\n\n\nadministrator1 = users.User(\"[email protected]\")\nadministrator2 = users.User(\"[email protected]\")\nadministrator1ID = administrator1.user_id()\nadministrator2ID = administrator2.user_id()\n\nclass Event(ndb.Model):\n name = ndb.StringProperty(required= True)\n description = ndb.StringProperty()\n coverurl=ndb.StringProperty(default=None)\n\n authorID = ndb.StringProperty(default=\"administrator\")\n author_name = ndb.StringProperty(default=\"administrator\")\n\n loc = ndb.GeoPtProperty(required=True,default=ndb.GeoPt(0,0))\n date = ndb.DateTimeProperty(required=True)\n\n linkage = ndb.StringProperty(default=None)\n\n\nclass Crowdworker(ndb.Model):\n ID = ndb.StringProperty()\n name = ndb.StringProperty()\n rated_times = ndb.IntegerProperty(default=0)\n score = ndb.IntegerProperty()\n\n\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n self.response.write('Hello world!')\n\n\nclass ViewAllEvents(webapp2.RequestHandler):\n def get(self):\n events = Event.query().fetch()\n locations = []\n dates = []\n names = []\n\n for event in events:\n locations.append(event.loc)\n dates.append(event.date)\n names.append(event.name)\n\n dictPassed = {'dates':dates, 'names':names,'locations':locations}\n jsonObj = json.dumps(dictPassed, sort_keys=True,indent=4, separators=(',', ': '))\n self.response.write(jsonObj)\n\n\n\nclass ViewOneEvent(webapp2.RequestHandler):\n def get(self):\n event_name = self.request.get(\"event_name\")\n the_event = ndb.gql(\"SELECT * FROM Event WHERE name = :1\",event_name).get()\n print event_name\n author = ndb.gql(\"SELECT * FROM Crowdworker WHERE ID = :1\",the_event.authorID).get()\n if author.ID != administrator1ID and author.ID != administrator2ID:\n ratings = str(author.score/author.rated_times)\n author_name = author.name\n print \"ratings: \"+ratings\n else:\n ratings = None\n author_name = None\n\n dictPassed = {'date':the_event.date,\n 'location':the_event.loc,\n 'description':the_event.description,\n 'coverUrl':the_event.coverUrl,\n 'linkage':the_event.linkage,\n 'ratings':ratings,\n 'author_name':author_name\n }\n jsonObj = json.dumps(dictPassed, sort_keys=True,indent=4, separators=(',', ': '))\n self.response.write(jsonObj)\n\n\nclass GiveFeedback(webapp2.RequestHandler):\n def get(self):\n feedback = self.request.get('feedback')\n author_name = self.request.get('author_name')\n author = ndb.gql(\"SELECT * FROM Crowdworker WHERE name = :1\",author_name).get()\n author.rated_times +=1\n if feedback == \"Yes\":\n author.score +=5\n\n author.put()\n\n\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n # ('/Mapview',MapView),\n # ('/Calendarview',CalendarView),\n # ('/Addevent',AddEvent),\n ('/ViewOneEvent',ViewOneEvent),\n ('/ViewAllEvents',ViewAllEvents),\n # ('/ViewOneWorker',ViewOneWorker),\n # ('/ViewAllWorkers',ViewAllWorkers),\n ('/GiveFeedback',GiveFeedback),\n # ('/DeleteEvent',DeleteEvent)\n], debug=True)\n\n"
}
] | 1 |
RSGInc/popsampler | https://github.com/RSGInc/popsampler | ae9d6bc2141cb35bebab6e6f6dd3718fb31381ff | 5cc683a0d8646e982fb72d3a529a5bddb2b6a2b9 | b0a932d10bce25d007b73a88d1fe62b5107062b3 | refs/heads/master | 2020-03-20T11:53:57.970153 | 2018-06-15T22:59:47 | 2018-06-15T22:59:47 | 137,415,409 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.692224383354187,
"alphanum_fraction": 0.7032704949378967,
"avg_line_length": 43.81553268432617,
"blob_id": "abfe9c912fba637116bfb58bfe7d78f671c8237d",
"content_id": "26b4c3c9ccf584dc5d6407c54a2ac90ece56974a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4617,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 103,
"path": "/popsampler/__init__.py",
"repo_name": "RSGInc/popsampler",
"src_encoding": "UTF-8",
"text": "\nimport pandas as pd\nimport numpy as np\n\ndef read_tables(zoneSampleRateFileName, hhFileName, perFileName):\n \n sampleRates = pd.read_csv(zoneSampleRateFileName)\n hhTable = pd.read_csv(hhFileName)\n perTable = pd.read_csv(perFileName)\n return(sampleRates, hhTable, perTable)\n\ndef write_tables(hhOutFileName, perOutFileName, households, persons):\n \n households.to_csv(hhOutFileName, index=False)\n persons.to_csv(perOutFileName, index=False)\n \ndef sample_hhs(group, hhZoneField, hhExpFacField):\n \n #sample using the zone sample rate with replacement and a stable group seed\n seed = int(group[hhZoneField].min()*1000 + group.hhincbin.min()*100 + group.hhsizebin.min()*10 + group.hhworkerbin.min())\n sample = group.sample(frac=group.sample_rate.min(), replace=True, random_state=seed)\n\n if len(sample)==0:\n print('sample is empty')\n sample = group\n else:\n #set hh expansion factor based on actual sample size since sampling is lumpy\n sample[hhExpFacField] = 1.0 / (len(sample)*1.0/len(group))\n\n print(hhZoneField + \" %i hhincbin %s hhsizebin %s hhworkerbin %s sample rate %.2f effective rate %.2f\" % (group[hhZoneField].min(), \n group.hhincbin.min(), group.hhsizebin.min(), group.hhworkerbin.min(), group.sample_rate.min(), 1.0 / sample[hhExpFacField].min()))\n \n return(sample)\n\ndef run(zoneSampleRateFileName, hhFileName, hhOutFileName, perFileName, perOutFileName, hhZoneField, \n zoneField, useIncomeBins, useSizeBins, useWorkerBins, incomeField, sizeField, workersField,\n incomeBin1Max, incomeBin2Max, incomeBin3Max, hhExpFacField, hhHhIdField, perHhIdField):\n\n print(\"Synthetic Population Spatial Sampler\")\n print(\"zoneSampleRateFileName: \" + zoneSampleRateFileName)\n print(\"hhFileName: \" + hhFileName)\n print(\"hhOutFileName: \" + hhOutFileName)\n print(\"perFileName: \" + perFileName)\n print(\"perOutFileName: \" + perOutFileName)\n print(\"hhZoneField: \" + hhZoneField)\n print(\"zoneField: \" + zoneField)\n print(\"useIncomeBins: \" + str(useIncomeBins))\n print(\"useSizeBins: \" + str(useSizeBins))\n print(\"useWorkerBins: \" + str(useWorkerBins))\n print(\"incomeField: \" + incomeField)\n print(\"sizeField: \" + sizeField)\n print(\"workersField: \" + workersField)\n print(\"incomeBin1Max: \" + str(incomeBin1Max))\n print(\"incomeBin2Max: \" + str(incomeBin2Max))\n print(\"incomeBin3Max: \" + str(incomeBin3Max))\n print(\"hhExpFacField: \" + hhExpFacField)\n print(\"hhHhIdField: \" + hhHhIdField)\n print(\"perHhIdField: \" + perHhIdField)\n \n #get tables\n sampleRates, households, persons = read_tables(zoneSampleRateFileName, hhFileName, perFileName)\n\n #join sample rate by home zone\n households = pd.merge(households, sampleRates, left_on=hhZoneField, right_on=zoneField)\n\n #bin hhs by control fields\n if useIncomeBins:\n incbins = [-1, incomeBin1Max, incomeBin2Max, incomeBin3Max, households[incomeField].max()+1]\n households['hhincbin'] = pd.cut(households[incomeField], incbins, labels=False)\n else:\n households['hhincbin'] = 0\n if useSizeBins:\n sizebins = [-1, 1, 2, 3, households[sizeField].max()+1]\n households['hhsizebin'] = pd.cut(households[sizeField], sizebins, labels=False)\n else: \n households['hhsizebin'] = 0 \n if useWorkerBins:\n workerbins = [-1, 0, 1, 2, households[workersField].max()+1]\n households['hhworkerbin'] = pd.cut(households[workersField], workerbins, labels=False)\n else:\n households['hhworkerbin'] = 0\n\n #group hhs by zone, control fields and sample and reset index\n hhsGrouped = households.groupby([hhZoneField,\"hhincbin\",\"hhsizebin\",\"hhworkerbin\"])\n new_households = hhsGrouped.apply(sample_hhs, hhZoneField=hhZoneField, hhExpFacField=hhExpFacField)\n new_households = new_households.reset_index(drop=True)\n \n #update ids and expand persons\n new_households['hhno_new'] = range(1,len(new_households)+1)\n new_persons = pd.merge(persons, new_households[[hhHhIdField,\"hhno_new\"]], left_on=perHhIdField, right_on=hhHhIdField)\n new_households[hhHhIdField] = new_households['hhno_new'].astype(np.int32)\n new_persons[perHhIdField] = new_persons['hhno_new'].astype(np.int32)\n\n #delete added fields\n del new_households[zoneField]\n del new_households['hhno_new']\n del new_households['sample_rate']\n del new_households['hhincbin']\n del new_households['hhsizebin']\n del new_households['hhworkerbin']\n del new_persons['hhno_new']\n \n #write result files\n write_tables(hhOutFileName, perOutFileName, new_households, new_persons)\n"
},
{
"alpha_fraction": 0.6447661519050598,
"alphanum_fraction": 0.6681514382362366,
"avg_line_length": 32.22222137451172,
"blob_id": "3f8fcf56159128eef63b9308d459dfc74847c5e7",
"content_id": "5b056aaca065b630e0580a239cd84a6913bd460c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 898,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 27,
"path": "/test/test_popsampler.py",
"repo_name": "RSGInc/popsampler",
"src_encoding": "UTF-8",
"text": "\nimport popsampler\n\nzone_sample_rate_file = \"zone_sample_rate.csv\"\nhh_file = \"households.csv\"\nhh_out_file = \"households_out.csv\"\nper_file = \"persons.csv\"\nper_out_file = \"persons_out.csv\"\nhh_zone_field = \"hhtaz\"\nzone_field = \"zone_id\"\nuse_income_bins = True\nuse_size_bins = True\nuse_worker_bins = False\nincome_field = \"hhincome\"\nsize_field = \"hhsize\"\nworkers_field = \"hwkrs\"\nincome_bin_1_max = 33333\nincome_bin_2_max = 66666\nincome_bin_3_max = 99999\nhh_exp_fac_field = \"hhexpfac\"\nhh_hh_id_field = \"hhno\"\nper_hh_id_field = \"hhno\"\n\npopsampler.run(zone_sample_rate_file, hh_file, hh_out_file, per_file, \n per_out_file, hh_zone_field, zone_field, use_income_bins, \n use_size_bins, use_worker_bins, income_field, size_field, \n workers_field, income_bin_1_max, income_bin_2_max, income_bin_3_max,\n hh_exp_fac_field, hh_hh_id_field, per_hh_id_field)\n"
},
{
"alpha_fraction": 0.5925372838973999,
"alphanum_fraction": 0.6134328246116638,
"avg_line_length": 25.799999237060547,
"blob_id": "b886ff4929041ea945bdb5bac5b01dce2e5a3521",
"content_id": "9cdd939eed0b073aad66f4ddaa475291cd0a2650",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 25,
"path": "/setup.py",
"repo_name": "RSGInc/popsampler",
"src_encoding": "UTF-8",
"text": "from ez_setup import use_setuptools\nuse_setuptools() # nopep8\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='popsampler',\n version='0.1',\n description='Synthetic Population Spatial Sampler',\n author='contributing authors',\n author_email='[email protected]',\n license='BSD-3',\n url='https://github.com/RSGInc/popsampler',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python :: 2.7',\n 'License :: OSI Approved :: BSD License'\n ],\n long_description=\"\",\n packages=find_packages(exclude=['*.tests']),\n install_requires=[\n 'numpy >= 1.8.0',\n 'pandas >= 0.18.0',\n ]\n)\n"
},
{
"alpha_fraction": 0.6886395215988159,
"alphanum_fraction": 0.7040673494338989,
"avg_line_length": 27.479999542236328,
"blob_id": "3c3dca24e84a88d4b54e34cad373f07934d15a3b",
"content_id": "9c580e38f710ef37700bc41ae2f90780834b40c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 25,
"path": "/README.md",
"repo_name": "RSGInc/popsampler",
"src_encoding": "UTF-8",
"text": "## Synthetic Population Spatial Sampler\n\n - Run a population synthesizer as usual\n - Specify a sample rate for each zone\n - Assign HHs into bins by income, size, workers by zone\n - Sample HHs by bin to match zone level sample rates\n - Set the expansion weight of each record\n - Run travel model as usual\n - Average trip matrices before assignment\n\n - For example: \n - Oversample study area zones @ 400%\n - Replicate a synthetic household four times\n - Set the weight to 0.25\n - Undersample zones outside study area @ 25%\n - Select 1 in 4 synthetic households \n - Set the weight to 4\n\n## Example\n\n``test\\test_popsampler.py``\n\n## See Also\n\nhttps://github.com/RSGInc/populationsim\n\n"
}
] | 4 |
erosminer/CAFE_fig | https://github.com/erosminer/CAFE_fig | 8a86918b1913cea95d0a98dd91e530fa74c402e2 | 572493c78b5a7ce7bb08c856214965f34c25a7e4 | b6dd8fc7f849a5599448710dc5d4200f81b085ab | refs/heads/master | 2022-05-22T04:22:16.838089 | 2022-04-12T01:15:43 | 2022-04-12T01:15:43 | 478,616,157 | 0 | 0 | null | 2022-04-06T15:20:28 | 2022-03-18T12:50:46 | 2022-04-11T12:55:40 | null | [
{
"alpha_fraction": 0.5278300642967224,
"alphanum_fraction": 0.5344835519790649,
"avg_line_length": 40.74904251098633,
"blob_id": "f3f43bdc720b70e902c5d6aa0852b489bd812c04",
"content_id": "9f49595eb63cb44c77de468c5345275c8528753f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21793,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 522,
"path": "/CAFE_fig.py",
"repo_name": "erosminer/CAFE_fig",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\n\nfrom __future__ import print_function\nimport PyQt5\nfrom PyQt5 import QtGui\nimport ete3\nimport argparse\nimport re\nimport os\nimport shutil\nimport copy\nimport math\nfrom base64 import b16encode\n\n\n# Python 2 compatability:\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ndef is_valid_format(line):\n # check if a text line is in the valid format to denote a CAFE family\n values = line.strip().split('\\t')\n if len(values) != 4:\n return False\n # the family-wide p-value should be here\n try:\n float(values[2])\n except ValueError:\n return False\n # the trees should start/end with parentheses\n fam_tree, pvalue_tree = values[1], values[3]\n if not fam_tree.startswith('('):\n return False\n if not pvalue_tree.startswith('(') or not pvalue_tree.endswith(')'):\n return False\n return True\n\n\ndef get_pvalue_asterisks(node):\n try:\n p = float(node.pvalue)\n except (TypeError, AttributeError):\n # TypeError occurs when CAFE did not compute the pvalue because the whole\n # family did not experience significant size changes (p = None).\n # AttributeError occurs when the node has no pvalue attribute (e.g.\n # because it is the root node of the tree).\n return ''\n if p <= 0.0001:\n return '***'\n if p <= 0.001:\n return '**'\n if p <= 0.01:\n return '*'\n elif p <= 0.05:\n return '.'\n else:\n return ''\n\ndef to_rgb(v_abs, min_v, max_v):\n if min_v == max_v:\n hex_color = \"#A9A9A9\"\n else:\n v = (v_abs - min_v) / (max_v - min_v) # scaled to a value between 0 and 1\n red = round(255 * v)\n blue = round(255 * (1 - v))\n rgb_triplet = (red, 0, blue)\n hex_color = '#' + b16encode(bytes(rgb_triplet)).decode()\n return hex_color\n\n\nclass CAFE_fig():\n def __init__(self, report_cafe, families, clades, pb, pf,\n dump, gfx_output_format, count_all_expansions):\n self.graphics_options = {\n '+': '#4dac26', # expansion\n '=': '#696969', # unchanged (remain)\n '-': '#d01c8b', # contraction (decrease)\n 'pixels_per_mya': 1.0, # pixels per million years (tree width)\n 'opacity': 1.0, # opacity of node circles\n 'scale': 1.0, # size scale factor of node circles\n }\n self.count_all_expansions = count_all_expansions\n self.branch_p_cutoff = pb\n self.family_p_cutoff = pf\n self.report_path = report_cafe\n self.prepare_pdf_dump(dump, gfx_format=gfx_output_format)\n self.parse_tree()\n if clades:\n self.get_clades_of_interest(clades)\n if families:\n self.families_of_interest = set(families)\n return\n\n def prepare_pdf_dump(self, dir_path, gfx_format='pdf'):\n '''\n create a directory to dump the figures (trees) to.\n '''\n self.gfx_format = '.' + gfx_format.lstrip('.')\n if self.gfx_format not in ('.svg', '.pdf', '.png'):\n raise Exception('graphics output format must be one of [svg|pdf|png]')\n if dir_path:\n if os.path.isdir(dir_path):\n answer = ''\n while answer.lower() not in ('y', 'n', 'yes', 'no'):\n answer = input('The directory \"{}\" already exists. Overwrite it '\n 'and delete all its contents? '\n '(y/n)? '.format(dir_path))\n if answer.lower() in ('n', 'no'):\n exit('bye!')\n else:\n shutil.rmtree(dir_path)\n os.mkdir(dir_path)\n self.out_dir = os.path.abspath(dir_path)\n fam_dir = os.path.join(dir_path, 'families')\n os.mkdir(fam_dir)\n self.out_dir_families = os.path.abspath(fam_dir)\n self.dump = True\n else:\n self.dump = False\n return\n\n def parse_tree(self):\n '''\n read the first few lines of the CAFE output file to extract the\n phylogeny, the output format/node order and the average expansion\n of all nodes.\n '''\n print('Parsing CAFE report...', end='\\r')\n self.multi_lambda = False # boolean that indicates whether the user ran CAFE\n # with one lambda or multiple lambda values, will be toggled if lambas found\n with open(self.report_path, 'r') as report:\n for line in report:\n if line.startswith('Tree:'):\n # parse the general phylogeny including branch lengths\n self.parse_phylo_tree(line)\n if line.startswith('Lambda:'):\n self.parse_lambdas(line)\n if line.startswith('Lambda tree:'):\n # add the information of the lambda groups to the tree\n self.parse_lambda_tree(line)\n self.multi_lambda = True # user ran CAFE with more than one lambda\n if line.startswith('# IDs of nodes:'):\n # add CAFE's numerical ID's to the tree\n self.parse_node_num_tree(line)\n if line.startswith('# Output format for: '):\n # find the output format that CAFE used to average expansion etc.\n self.cafe_node_id_order = [int(i) for i in re.findall(r'\\d+', line)]\n if line.startswith('Average Expansion'):\n self.parse_fam_size_summary_tree(line, 'avg_expansion')\n if line.startswith('Expansion') or line.startswith('nExpansion'):\n self.parse_fam_size_summary_tree(line, 'expansion')\n if line.startswith('Remain') or line.startswith('nRemain'):\n self.parse_fam_size_summary_tree(line, 'remain')\n if line.startswith('Decrease') or line.startswith('nDecrease'):\n self.parse_fam_size_summary_tree(line, 'decrease')\n if line.startswith('\\'ID\\''):\n break # end of header lines\n\n # count the number of significant expansions\n # and significant contractions per node:\n for family in self:\n if family.pvalue > self.family_p_cutoff:\n continue # insignificant family\n family.get_tree_with_famsizes()\n for node, fam_tree_node in zip(\n self.tree.traverse(),\n family.tree.traverse()\n ):\n if not fam_tree_node.event:\n continue\n if fam_tree_node.event == '+':\n node.sig_expansions += 1\n elif fam_tree_node.event == '-':\n node.sig_contractions += 1\n print('Parsing CAFE report... done!')\n return\n\n def parse_fam_size_summary_tree(self, line, node_attr_name):\n '''\n parses a line that denotes node features (e.g. average expansion)\n and adds the feature as a class attribute to the corresponding node.\n '''\n node_fam_sizes = [float(size) for size in re.findall(r'[\\d\\.-]+', line)]\n for node_id, node_size in zip(self.cafe_node_id_order, node_fam_sizes):\n nodes = self.tree.search_nodes(id=node_id)\n assert len(nodes) == 1\n node = nodes[0]\n setattr(node, node_attr_name, node_size)\n return\n\n def parse_phylo_tree(self, line):\n '''\n parse the general phylogeny including branch lengths\n '''\n newick = line[5:].strip() + ';'\n self.tree = ete3.Tree(newick)\n for node in self.tree.traverse():\n node.sig_expansions = 0\n node.sig_contractions = 0\n return\n\n def parse_lambdas(self, line):\n self.lambdas = {}\n for i, lambda_str in enumerate(line.split()[1:]):\n self.lambdas[i + 1] = float(lambda_str)\n self.lambda_colors = {}\n max_l = math.log(max(self.lambdas.values()))\n min_l = math.log(min(self.lambdas.values()))\n for i, lambda_ in self.lambdas.items():\n self.lambda_colors[i] = to_rgb(math.log(lambda_), min_l, max_l)\n return\n\n def parse_lambda_tree(self, line):\n '''\n find out in which lambda group each node is and add this information\n as a node attribute.\n '''\n lambda_nwk = line[12:].strip() + ';'\n lambda_tree = ete3.Tree(lambda_nwk)\n for node, lambda_node in zip(\n self.tree.traverse(),\n lambda_tree.traverse()\n ):\n if lambda_node.name:\n # ete3 parser calls this info \"name\" for leaves\n node.lambda_group = int(lambda_node.name)\n else:\n # ete3 parser calls this info \"support\" for internal nodes\n node.lambda_group = int(lambda_node.support)\n return\n\n def parse_node_num_tree(self, line):\n '''\n parses the numerical ID that CAFE assigned to each node, and adds\n this ID as a node class feature.\n '''\n num_nwk = line[15:].strip() + ';'\n num_tree = ete3.Tree(num_nwk.replace('<', ' ').replace('>', ''))\n for node, num_node in zip(\n self.tree.traverse(),\n num_tree.traverse()\n ):\n if num_node.name:\n node.id = int(num_node.name.split()[-1])\n else:\n node.id = int(num_node.support)\n return\n\n def summary_tree(self):\n '''\n show a tree that visualizes the total number of expansions and\n contractions across the whole phylogeny for allgene families.\n '''\n def fam_size_piechart_layout(node):\n '''\n the PieChart layout function, defined in local scope so it\n can access class attributes (graphics options).\n '''\n if not node.is_root():\n if self.count_all_expansions:\n n_exp = node.expansion\n n_con = node.decrease\n else:\n n_exp = node.sig_expansions\n n_con = node.sig_expansions\n\n if hasattr(self, 'lambda_colors'):\n circle_color = self.lambda_colors[node.lambda_group]\n else:\n circle_color = \"blue\"\n\n # add a text that shows expansions & contractions, e.g. +10/-20\n exp_cnt_txt = ete3.TextFace(\n '+{} -{}\\n'.format(int(n_exp), int(n_con)), fsize=6,\n fgcolor=circle_color\n )\n pos = 'aligned' if node.is_leaf() else 'float'\n # add a circle that shows the average expansion\n ete3.faces.add_face_to_node(exp_cnt_txt, node, 1, position=pos)\n\n # add average expansion info:\n scale_factor = 1 + node.avg_expansion\n avg_exp = '{:+}'.format(round(node.avg_expansion, 2))\n circle = ete3.CircleFace(radius=9 * scale_factor,\n color=circle_color,\n label={'text': avg_exp, 'color': 'white',\n 'fontsize': 3 + (2.25*scale_factor)})\n circle.opacity = self.graphics_options['opacity']\n ete3.faces.add_face_to_node(circle, node, 2, position='float')\n nstyle = ete3.NodeStyle()\n nstyle['size'] = 0\n node.set_style(nstyle)\n return\n t = self.tree\n ts = ete3.TreeStyle()\n header = 'Family expansions and contractions'\n if hasattr(self, \"lambdas\"):\n header += ('\\nmin lambda: {} '\n '(blue)\\nmax lambda: {} (red)').format(\n min(self.lambdas.values()), max(self.lambdas.values()))\n ts.title.add_face(ete3.TextFace(header, fsize=8), column=0)\n ts.scale = self.graphics_options['pixels_per_mya'] # pixels per million years\n ts.layout_fn = fam_size_piechart_layout\n self.show_or_dump_tree(tree_obj=t, tree_style=ts, fname='summary')\n return\n\n def get_clades_of_interest(self, clades_of_interest):\n '''\n parses the user-specified \"--clades\" parameter\n '''\n self.clades_of_interest = set()\n for c in clades_of_interest:\n name, species_str = c.split('=')\n species = species_str.split(',')\n if len(species) == 1:\n search_results = self.tree.search_nodes(name=species[0])\n assert len(search_results) == 1\n node = search_results[0]\n elif len(species) > 1:\n node = self.tree.get_common_ancestor(species)\n else:\n raise Exception('invalid --clades param')\n self.clades_of_interest.add(\n (name, node.id)\n )\n return\n\n def __iter__(self):\n with open(self.report_path, 'r') as report:\n for line in report:\n if is_valid_format(line):\n yield Family(line, self)\n\n def show_fam_size_tree(self, family):\n def fam_size_layout(node):\n try:\n node_color = self.graphics_options[node.event]\n except:\n node_color = self.graphics_options['=']\n relative_size = 8 * (node.fam_size / max(family.fam_sizes))\n cf = ete3.CircleFace(\n radius=relative_size * self.graphics_options['scale'],\n color=node_color,\n style='circle'\n )\n cf.opacity = self.graphics_options['opacity']\n node.add_face(cf, column=10, position='float')\n # add the family size number and asterisks to the figure\n famsize_str = '{}{}\\n'.format(\n get_pvalue_asterisks(node),\n node.fam_size,\n )\n tf = ete3.TextFace(famsize_str, fsize=4, fgcolor=node_color)\n node.add_face(tf, column=1, position='float')\n # remove the silly default blue node dot\n nstyle = ete3.NodeStyle()\n nstyle['size'] = 0\n node.set_style(nstyle)\n return\n\n t = family.tree\n # write a quick summary of family name, pvalues and what happened\n if hasattr(self, 'clades_of_interest'):\n for clade_name, node_id in self.clades_of_interest:\n search_results = t.search_nodes(id=node_id)\n assert len(search_results) == 1\n clade_node = search_results[0]\n clade_event = getattr(clade_node, 'event', '=')\n clade_pvalue = getattr(clade_node, 'pvalue', '')\n tsv_header = 'family\\tfamily_pvalue\\tclade\\tevent\\tclade_pvalue'\n tsv_line = '{}\\t{}\\t{}\\t{}\\t{}'.format(\n family.name, family.pvalue, clade_name, clade_event, clade_pvalue\n )\n if self.dump:\n outf_name = '{}_summary.tsv'.format(clade_name)\n outf_path = os.path.join(self.out_dir, outf_name)\n if not os.path.isfile(outf_path):\n with open(outf_path, 'a') as outf:\n outf.write(tsv_header + '\\n')\n with open(outf_path, 'a') as outf:\n outf.write(tsv_line + '\\n')\n print('\\n' + tsv_header)\n print(tsv_line)\n\n ts = ete3.TreeStyle()\n ts.layout_fn = fam_size_layout\n header = 'Evolution of the gene family \"{}\" (p={})'.format(\n family.name,\n family.pvalue,\n )\n ts.title.add_face(ete3.TextFace(header, fsize=8), column=0)\n ts.scale = self.graphics_options['pixels_per_mya'] # pixels per million years\n self.show_or_dump_tree(tree_obj=t, tree_style=ts,\n fname=family.name, is_family=True)\n return\n\n def show_or_dump_tree(self, tree_obj, tree_style, fname, is_family=False):\n '''\n show the tree in a window, or write it to a PDF file if the user used --dump\n '''\n if self.dump:\n if is_family:\n out_dir = self.out_dir_families\n else:\n out_dir = self.out_dir\n out_path = os.path.join(out_dir, fname + self.gfx_format)\n print('\\tWriting', os.path.relpath(out_path))\n tree_obj.render(out_path, tree_style=tree_style)\n else:\n tree_obj.show(tree_style=tree_style)\n return\n\n\nclass Family():\n def __init__(self, txtline, cafe_fig_instance):\n values = txtline.strip().split()\n self.name = values[0]\n self.nwk_famsize_str = values[1].replace(')_', ')') + ';'\n self.pvalue = float(values[2])\n self.branch_pvalue_str = values[3]\n self.c = cafe_fig_instance\n return\n\n def get_tree_with_famsizes(self):\n self.fam_sizes = []\n size_tree = ete3.Tree(self.nwk_famsize_str)\n self.tree = copy.deepcopy(self.c.tree)\n # parse family sizes:\n for node, size_tree_node in zip(\n self.tree.traverse(),\n size_tree.traverse()\n ):\n if size_tree_node.is_leaf():\n node.fam_size = int(size_tree_node.name.split('_')[1])\n else:\n node.fam_size = int(size_tree_node.support)\n self.fam_sizes.append(node.fam_size)\n node.event = None\n # parse family pvalues:\n node_pvalues = re.findall(r'[\\d\\.]+|-', self.branch_pvalue_str)\n for node_id, node_size in zip(self.c.cafe_node_id_order, node_pvalues):\n nodes = self.tree.search_nodes(id=node_id)\n assert len(nodes) == 1\n node = nodes[0]\n if node_size == '-' or self.pvalue > self.c.family_p_cutoff:\n node.pvalue = None\n else:\n node.pvalue = float(node_size)\n if node.pvalue <= self.c.branch_p_cutoff:\n if node.fam_size > node.up.fam_size:\n node.event = '+'\n elif node.fam_size < node.up.fam_size:\n node.event = '-'\n return\n\n\ndef main(report_cafe, families, clades, pb, pf, dump, gfx_output_format, count_all_expansions):\n # parse initial information (phylogeny and CAFE output formats)\n c = CAFE_fig(report_cafe, families, clades, pb, pf, dump, gfx_output_format, count_all_expansions)\n\n # show a tree that shows how many total expansions/contractions\n # occured at each node\n c.summary_tree()\n\n # show a tree for each gene family, unless the user specified a filter\n # rule (specific families, or families that changed in a specific clade)\n for family in c:\n if hasattr(c, 'families_of_interest'):\n if family.name not in c.families_of_interest:\n continue # skip family since the user didn't specifically select it\n family.get_tree_with_famsizes() # prepare to plot\n else:\n if family.pvalue > c.family_p_cutoff:\n continue # skip family since it's not significant\n family.get_tree_with_famsizes()\n if hasattr(c, 'clades_of_interest'):\n for __, node_id in c.clades_of_interest:\n p_value = family.tree.search_nodes(id=node_id)[0].pvalue\n if p_value <= c.branch_p_cutoff:\n break\n else: # loop wasnt broken = no significant event found\n continue\n c.show_fam_size_tree(family)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Parses a CAFE output file (.cafe) and plots a summary tree '\n 'that shows the average expansion/contraction across the phylogeny; a tree '\n 'that shows which clades evolved under the same lambda (if available); and '\n 'a gene family evolution tree for each user-specified gene family.')\n parser.add_argument('report_cafe', help='the file report.cafe (or similar name)')\n parser.add_argument('-f', '--families', help='only show families with these IDs',\n nargs='+')\n parser.add_argument('-c', '--clades', help='only show families that are '\n 'expanded/contracted at this clade. Format: [clade]='\n '[leaf],[leaf] where clade is the name of the last '\n 'common ancestor of the two leaves, e.g.: Isoptera=zne,mna',\n nargs='+')\n parser.add_argument('-pb', help='branch p-value cutoff (default: 0.05)',\n default=0.05, type=float)\n parser.add_argument('-pf', help='family p-value cutoff (default: 0.05)',\n default=0.05, type=float)\n parser.add_argument('-d', '--dump', help='don\\'t open trees in a window, write '\n 'them to files in the specified directory instead (default: '\n 'off)', default=None)\n parser.add_argument('-g', '--gfx_output_format', default='.pdf', help='output '\n 'format for the tree figures when using --dump [svg|pdf|png]'\n ' (default: pdf)')\n parser.add_argument('--count_all_expansions', action='store_true', help='count '\n 'and write down the number of *all* expansions and contrac'\n 'tions (default: only count significant expansions/contrac'\n 'tions)')\n args = parser.parse_args()\n if args.families:\n if args.clades:\n print('\\n########\\nWarning! \"--families\" overrides \"--clades\".\\n########\\n')\n main(**vars(args))\n"
},
{
"alpha_fraction": 0.6765885353088379,
"alphanum_fraction": 0.6881992816925049,
"avg_line_length": 47.32653045654297,
"blob_id": "1bad3a702841934b2b285f231f8b4e331cf731a2",
"content_id": "3541afc97390fc4030168a148c4f1a95c23eade2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4737,
"license_type": "no_license",
"max_line_length": 520,
"num_lines": 98,
"path": "/README.md",
"repo_name": "erosminer/CAFE_fig",
"src_encoding": "UTF-8",
"text": "CAFE_fig, a visualization tool for CAFE.\n=========\n\n[CAFE](https://hahnlab.github.io/CAFE/) (Computational Analysis of gene Family\nEvolution) is a software that provides a statistical foundation for evolutionary\ninferences about changes in gene family size.\n\nThe visualization and interpretation of CAFE results usually requires custom\nscripts. Here, I provide such a custom script.\n\nCAFE_fig takes a .cafe output file and produces:\n- a summary tree that shows the average expansion/contraction of families across the phylogeny\n- a tree that denotes which branches evolve under which lambda (if a model with multiple lambdas was used)\n- a tree for each family of interest, i.e. families that the user specified by ID or families that showed significant change at a user-specified clade of interest\n\n\n\nRequirements\n------------\n\n\nCAFE_fig requires Python3.4+ and ETE3:\nInstall ETE3 with\n\n`pip3 install 'ete3==3.0.0b35'`\n\nIt's important that you use ETE3 version 3.0.0b35 since it appears that the latest ETE3 version causes problems that are beyond my control (see issue [#1](https://github.com/LKremer/CAFE_fig/issues/1)). This ETE3 version runs well with PyQt4 but not PyQt5, so if you're experiencing issues it's worth a try to switch to PyQt4. \n\n\nUsage\n------------\n\n```\nusage: CAFE_fig.py [-h] [-f FAMILIES [FAMILIES ...]] [-c CLADES [CLADES ...]]\n [-pb PB] [-pf PF] [-d DUMP] [-g GFX_OUTPUT_FORMAT]\n [--count_all_expansions]\n report_cafe\n\nParses a CAFE output file (.cafe) and plots a summary tree that shows the\naverage expansion/contraction across the phylogeny; a tree that shows which\nclades evolved under the same lambda (if available); and a gene family\nevolution tree for each user-specified gene family.\n\npositional arguments:\n report_cafe the file report.cafe (or similar name)\n\noptional arguments:\n -h, --help show this help message and exit\n -f FAMILIES [FAMILIES ...], --families FAMILIES [FAMILIES ...]\n only show families with these IDs\n -c CLADES [CLADES ...], --clades CLADES [CLADES ...]\n only show families that are expanded/contracted at\n this clade. Format: [clade]=[leaf],[leaf] where clade\n is the name of the last common ancestor of the two\n leaves, e.g.: Isoptera=zne,mna\n -pb PB branch p-value cutoff (default: 0.05)\n -pf PF family p-value cutoff (default: 0.05)\n -d DUMP, --dump DUMP don't open trees in a window, write them to files in\n the specified directory instead (default: off)\n -g GFX_OUTPUT_FORMAT, --gfx_output_format GFX_OUTPUT_FORMAT\n output format for the tree figures when using --dump\n [svg|pdf|png] (default: pdf)\n --count_all_expansions\n count and write down the number of *all* expansions\n and contractions (default: only count significant\n expansions/contractions)\n```\n\n\nExample outputs\n------------\n\nSummary tree that shows the average expansion/contraction (radius of node circles),\nthe numbers of expanded/contracted families (+/-), and the estimated gene gain/loss\nrates (blue: low rate; red: high rate).\n\n\nExample output for a specific gene family. Numbers and node sizes represent the family size at each node.\nSignificant expansions are shown in green, significant contractions in magenta.\n\n\n\nExample usage\n------------\n\nTo recreate the plots shown above, use this command:\n\n`python3 ./CAFE_fig.py example_result.cafe -c Isoptera=zne,mna -pb 0.05 -pf 0.05 --dump test/ -g .pdf --count_all_expansions`\n\nThis reads \"example_result.cafe\" and dumps all figures in PDF format to the directory \"test/\". The summary tree (\"summary.pdf\") will show the whole phylogeny and the number of expansions and contractions (including insignificant ones!) as shown below. Further family-wise trees will be created and dumped in the directory \"test/families\". These trees will only be created for families that showed a significant (p<=0.05) expansion/contraction at the node \"Isoptera\", which is the last common ancestor of \"zne\" and \"mna\".\n\nSignificant contractions are marked in magenta, significant expansions are marked in green (p<=0.001 = \\*\\*\\*, p<=0.01 = \\*\\*, p<=0.05 = \\*).\n\nKnown issues\n------------\n\nThe error message `module 'ete3' has no attribute 'TreeStyle'` is caused by a known problem with ete3 that is beyond my control.\n[Check this link for possible solutions!](https://github.com/etetoolkit/ete/issues/354). \n"
}
] | 2 |
Cowhitewhite/spider | https://github.com/Cowhitewhite/spider | 87ed91629101fd7646daf0b72e3ca918d26701c6 | 412054b2a47be462e613d1545386a85bfc0881cd | d7430640ec4c2f14971af491dc5d49cceff148a9 | refs/heads/master | 2023-02-24T03:37:39.426980 | 2021-02-05T09:55:31 | 2021-02-05T09:55:31 | 332,417,873 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5398170351982117,
"alphanum_fraction": 0.5804811716079712,
"avg_line_length": 32.15730285644531,
"blob_id": "537d8ed7633efd615b76519e2c725ed1f41de0dd",
"content_id": "6882feb8961658ae0f3288fbf2ad7ca5f0ab5a99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3337,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 89,
"path": "/51job.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "# coding = utf-8\n\nfrom bs4 import BeautifulSoup # 网页解析,获取数据\nimport re # 正则表达式,进行文字匹配\nimport urllib.request, urllib.error # 制定url,获取网页数据\nimport xlwt # 进行excel操作\nimport json\n\n\ndef main(city, search):\n # 090200代表城市 000000代表区域 00行业领域 1代表页数 9发布日期类型 99代表薪酬范围(01-99 )\n baseUrl = \"https://search.51job.com/list/\" + city + \",000000,0000,00,9,99,\" + search + \",2,{}.html\"\n # baseUrl = \"https://search.51job.com/list/090200,000000,0000,00,9,99,java,2,1.html\"\n datalist = getData(baseUrl)\n print(datalist)\n savePath = \".\\\\51job.xls\"\n saveData(datalist, savePath)\n\n\n# 影片详情\nsearchInfo = re.compile(r'window.__SEARCH_RESULT__\\s=\\s(.*)(?=<\\/script>)') # 正则表达式对象规则\n\n\n# 爬取网页\ndef getData(baseUrl):\n datalist = []\n diclist = []\n for i in range(1, 69): # 调用获取页面信息函数10次\n url = baseUrl.format(i)\n html = askUrl(url) # 保存获取到的网页源码\n # 逐一解析数据\n soup = BeautifulSoup(html, \"html.parser\")\n for item in soup.find_all('script', type='text/javascript'): # 查找符合要求字符串,形成列表\n item = str(item)\n info = re.findall(searchInfo, item)\n if len(info) != 0:\n diclist.append(info[0])\n for data in diclist:\n # 转换为json\n jsonStr = json.dumps(data)\n # 转换为字典\n result = json.loads(json.loads(jsonStr))\n for r in result['engine_search_result']:\n value = [str(r['job_href']), str(r['job_name']), str(r['company_name']), str(r['providesalary_text']),\n str(r['workarea_text']), '', str(r['jobwelf_list']), str(r['companytype_text']), str(r['companysize_text']),\n str(r['attribute_text']),str(r['workyear'])]\n datalist.append(value)\n return datalist\n\n\n# 保存数据\ndef saveData(datalist, path):\n print(\"save.....\")\n workbook = xlwt.Workbook(encoding=\"utf-8\", style_compression=0) # 创建workbook对象\n sheet = workbook.add_sheet(sheetname=\"job\", cell_overwrite_ok=True) # 创建sheet对象\n row = (\"工作链接\", \"工作名称\", \"公司名称\", \"薪资\", \"区域\", \"学历\", \"详情\", \"公司类型\", \"规模\", \"工作清单\", \"工作年限\")\n for i in range(0, len(row)):\n sheet.write(0, i, row[i])\n for i in range(0, len(datalist)):\n print(\"第%d条\" % (i + 1))\n data = datalist[i]\n for j in range(0, 11):\n sheet.write(i + 1, j, data[j])\n workbook.save(path)\n\n\n# 得到指定一个URL的网页内容\ndef askUrl(url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"\n }\n req = urllib.request.Request(url, headers=headers)\n html = \"\"\n try:\n response = urllib.request.urlopen(req)\n html = response.read().decode('gbk')\n print(url)\n except urllib.error.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n return html\n\n\nif __name__ == \"__main__\":\n # 调用函数\n main(\"090200\", \"java\")\n print(\"complete!!!\")\n"
},
{
"alpha_fraction": 0.528384268283844,
"alphanum_fraction": 0.5644105076789856,
"avg_line_length": 38.869564056396484,
"blob_id": "9732361d6e83f4188c436c7889fce64642459949",
"content_id": "6b04b18cc605d4eca70b7ff21e1cbeceae6964d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 916,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 23,
"path": "/city.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "from lxml import etree\nimport requests\n\nif __name__ == '__main__':\n url = 'https://www.aqistudy.cn/historydata/'\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"}\n html = requests.get(url=url, headers=headers).text\n tree = etree.HTML(html)\n # hot_list = tree.xpath('//div[@class = \"bottom\"]/ul/li')\n # all_citys = []\n # for li in hot_list:\n # name = li.xpath('./a/text()')[0]\n # all_citys.append(name)\n # all_list = tree.xpath('//div[@class = \"bottom\"]/ul/div[2]/li')\n # for i in all_list:\n # name_ = i.xpath('./a/text()')[0]\n # all_citys.append(name_)\n # print(all_citys)\n citys = tree.xpath('//div[@class = \"bottom\"]/ul/li | //div[@class = \"bottom\"]/ul/div[2]/li')\n for city in citys:\n name = city.xpath('./a/text()')[0]\n print(name)"
},
{
"alpha_fraction": 0.6656716465950012,
"alphanum_fraction": 0.6656716465950012,
"avg_line_length": 18.764705657958984,
"blob_id": "1ad40dad02c5ac653b6cff16c7fbc806ea291226",
"content_id": "4f83c6a79812dc3b61e29b0b67e0fd24ed877672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 517,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/test/testRe.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "import re\n\n# re库正则表达式\n\n# pat = re.compile(\"AA\") # 此处AA是正则表达式,用来验证其他的字符串\n# m = pat.search(\"ASAABRAAAA\") # search字符串是被校验的内容\n\n# 没有模式对象\n# m = re.search(\"asd\",\"Aasd\")\n# print(m)\n# print(re.findall(\"[A-Z]\",\"ASDaDDDa\"))\n\n# sub函数\nprint(re.sub(\"a\",\"A\",\"aaaAAAAAvs\")) # 找到a用A替换,在第三个字符串中查找\"A\"\n\n# 建议在正则表达式中,被比较的字符串前面加上r,不用担心转义字符的问题\nprint(r\"\\aa\\'\")"
},
{
"alpha_fraction": 0.6184649467468262,
"alphanum_fraction": 0.6312569379806519,
"avg_line_length": 21.734176635742188,
"blob_id": "790d10dae5400825d6e31702710800d15f362274",
"content_id": "4d82fe38f91d07651b7e816787f49cad22203dcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2174,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 79,
"path": "/test/testBs4.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "'''\n BeautifulSoup4将复杂html文档转换成一个复杂的树形结构,每个节点都是一个python对象,大致分为\n - Tag\n - NavigableString\n - BeautifulSoup\n - Comment\n'''\nfrom bs4 import BeautifulSoup\nimport re\n\nreName = re.compile(\"\")\n\nfile = open(\"./test.html\",\"rb\")\nhtml = file.read().decode(\"utf-8\")\nbs = BeautifulSoup(html,\"html.parser\")\nprint(bs.findAll('dd')[0].text)\nprint(bs.findAll('dd')[6].find('a').get('href'))\n# print(bs.title)\n# print(bs.a)\n# print(bs.head)\n# print(type(bs.head))\n# print(bs.title.string)\n# print(type(bs.title.string))\n# print(bs.a.attrs)\n# print(bs.name)\n# print(bs.a.string)\n\n\n# 1.Tag 标签及其内容:拿到它所找到的第一个内容\n# 2.NavigableString 标签里边的内容(字符串)\n# 3.BeautifulSoup 表示整个文档\n# 3.Comment 是一个特殊的NavigableString,输出的内容不包含注释符号\n\n# --------------------------------------------\n\n# 文档的遍历\n# print(bs.head.contents[1])\n# for i in bs.head.contents:\n# print(i)\n\n# 文档的搜索\n# 1. find_all() 字符转过滤:会查找与字符串完全匹配的内容\n# t_list = bs.find_all(\"a\")\n# for i in t_list:\n# print(i)\n\n# search() 正则表达式搜索\n# t_list = bs.find_all(re.compile(\"a\"))\n# 方法: 传入一个函数(方法),根据函数的要求搜索\n# def name_is_exists(tag):\n# return tag.has_attr(\"name\")\n# t_list = bs.find_all(name_is_exists)\n# for item in t_list:\n# print(item)\n\n# 2. kwargs参数\n# t_list = bs.find_all(id=\"head\")\n# t_list = bs.find_all(class_=True)\n# for item in t_list:\n# print(item)\n\n# 3. text参数\n# t_list = bs.find_all(text=\"hao123\")\n# t_list = bs.find_all(text=[\"hao123\",\"地图\",\"贴吧\"])\n# t_list = bs.find_all(text= re.compile(\"\\d\"))\n# for item in t_list:\n# print(item)\n\n# 3. limit参数\n# t_list = bs.find_all(text=\"a\",limit=3)\n\n\n# CSS选择器\n# print(bs.select(\"title\")) # 通过标签来查找\n# print(bs.select(\".mnav\")) # 通过class雷鸣查找\n# print(bs.select(\"#u1\")) # 通过id查找\n# print(bs.select(\"a[class = 'bri']\")) # 通过属性查找\n# print(bs.select(\"head > title\")) # 通过子标签查找\n# print(bs.select(\".mnav ~ .bri\")[0].get_text())\n\n\n"
},
{
"alpha_fraction": 0.5414688587188721,
"alphanum_fraction": 0.6132365465164185,
"avg_line_length": 33.766990661621094,
"blob_id": "284f000b88301fe41dc6f498965e904db984cde2",
"content_id": "25a8d7f34365905bf7e3b73e7278d2d7e84d0f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3651,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 103,
"path": "/btdownload.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "import requests\nimport threading\nimport xlwt\nfrom bs4 import BeautifulSoup\nimport json\nimport time\n\ndatalit = []\nuser_agent_list = [\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0\",\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15\",\n ]\n\ndef download_page(url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"}\n r = requests.get(url, headers=headers)\n r.encoding = 'utf-8'\n return r.text\n\n\ndef get_bt(html):\n soup = BeautifulSoup(html, 'html.parser')\n bt_list = soup.find('div', class_='body threadlist').find_all('table')\n for table in bt_list:\n info = table.find('a', class_='subject_link thread-new')\n if info is not None:\n link = info.get('href')\n open_info(link)\n\n\ndef open_info(link):\n html = download_page(link)\n time.sleep(2)\n soup = BeautifulSoup(html, 'html.parser')\n if soup.find('div', class_='attachlist') is not None:\n td_list = soup.find('div', class_='attachlist').findAll('td')\n for td in td_list:\n ajax = td.find('a', class_='ajaxdialog')\n if ajax is not None:\n ajax_url = ajax.get('href')\n print(ajax_url)\n open_result(ajax_url)\n\n\n# 此处需要调用接口\ndef open_result(url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"\n }\n rsp = requests.get(url, headers=headers)\n dic = json.loads(rsp.content.decode('utf-8'))\n html = str(dic['message']['body'])\n result = BeautifulSoup(html, 'html.parser')\n dds = result.findAll('dd')\n data = []\n data.append(str(dds[0].text))\n data.append(str(dds[6].find('a').get('href')))\n datalit.append(data)\n time.sleep(2)\n\n\ndef writeExcel(datalist):\n print(\"save.....\")\n workbook = xlwt.Workbook(encoding=\"utf-8\", style_compression=0) # 创建workbook对象\n sheet = workbook.add_sheet(sheetname=\"movie\", cell_overwrite_ok=True) # 创建sheet对象\n row = (\"电影名称\", \"BT下载地址\")\n for i in range(0, len(row)):\n sheet.write(0, i, row[i])\n for i in range(0, len(datalist)):\n print(\"第%d条\" % (i + 1))\n data = datalist[i]\n for j in range(0, 2):\n sheet.write(i + 1, j, data[j])\n workbook.save(\"test.xls\")\n\n\ndef execute(url, datalist):\n page_html = download_page(url)\n get_bt(page_html)\n writeExcel(datalist)\n\n\n# 主函数\ndef main():\n queue = [i for i in range(1, 10)]\n threads = []\n while len(queue) > 0:\n cur_page = queue.pop(0)\n url = 'http://www.33btjia.com/forum-index-fid-1-page-{}.htm'.format(cur_page)\n print(url)\n execute(url, datalit)\n print('{}正在下载{}页'.format(threading.current_thread().name, cur_page))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5751634240150452,
"alphanum_fraction": 0.6427015066146851,
"avg_line_length": 37.33333206176758,
"blob_id": "fc70f297ffbfa57e0e11645cf2c2fdb765988783",
"content_id": "1be44075b6f65453782f120f3daafea644ba3df5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 12,
"path": "/58.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "from lxml import etree\nimport requests\n\nif __name__ == '__main__':\n url = 'https://cd.58.com/ershoufang/'\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"}\n html = requests.get(url, headers).text\n #数据解析\n tree = etree.HTML(html)\n titles = tree.xpath('//h3[@class=\"property-content-title-name\"]/text()')\n for title in titles:\n print(title)"
},
{
"alpha_fraction": 0.4816414713859558,
"alphanum_fraction": 0.49460044503211975,
"avg_line_length": 37.66666793823242,
"blob_id": "d1efa0a808a8f255b1c8c29c878f6e0249123559",
"content_id": "3e85e820415bbcd8b61e9054cedfe5847acff050",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 12,
"path": "/test/xpathtest.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "from lxml import etree\n\nif __name__ == '__main__':\n tree = etree.parse('test/baidu.html')\n # r = tree.xpath('/html/body/div')\n # r = tree.xpath('/html//div')\n # r = tree.xpath('//div')\n # r = tree.xpath('//div[@id = \"u1\"]') # 属性定位\n # r = tree.xpath('//div[@id = \"u1\"]/a[@class = \"bri\"]') # 属性定位\n # r = tree.xpath('//div[@id = \"u1\"]/a[7]/text()')[0] # 获取标签中的文本\n r = tree.xpath('//div[@id = \"u1\"]/a[@class = \"bri\"]/@href') # 取属性\n print(r)"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6298543810844421,
"avg_line_length": 20.710525512695312,
"blob_id": "6bb375f90a7372555684857ab15487030f95d2fe",
"content_id": "108f2f644f8842d59035515b488ced497a0a7e5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 38,
"path": "/test/testwordcloud.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "import jieba # 分词\nfrom matplotlib import pyplot # 绘图,数据可视化\nfrom wordcloud import WordCloud # 词云\nfrom PIL import Image # 图片处理\nimport numpy # 矩阵运算\nimport sqlite3\n\n# 数据准备\nconn = sqlite3.connect(\"../movie.db\")\ncur = conn.cursor()\nsql = \"select introduction from movie\"\ndata = cur.execute(sql)\ntext = \"\"\nfor i in data:\n text += i[0]\ncur.close()\nconn.close()\n\n# 分词\ncut = jieba.cut(text)\nstring = ' '.join(cut)\nprint(len(string))\n\nimg = Image.open(r'tree.jpg')\nimg_array = numpy.array(img) # 图片转换为数组\nwc = WordCloud(\n background_color='white',\n mask=img_array,\n font_path='simkai.ttf'\n)\nwc.generate_from_text(string)\n\n# 绘制图片\nfig = pyplot.figure(1)\npyplot.imshow(wc)\npyplot.axis('off') # 是否显示坐标轴\n# pyplot.show() # 显示生成的词云\npyplot.savefig(\".\\word.jpg\")"
},
{
"alpha_fraction": 0.6404494643211365,
"alphanum_fraction": 0.6719101071357727,
"avg_line_length": 26.875,
"blob_id": "b97b2deea859b88c04065b5818c320a0c1e36209",
"content_id": "2554751a05d6856f5c89b79eef31685d2e1e2b12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 16,
"path": "/test/testXlwt.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "import xlwt\n\n'''\nworkbook = xlwt.Workbook(encoding=\"utf-8\") #创建workbook对象\nsheet = workbook.add_sheet(sheetname=\"sheet1\") #创建sheet对象\nsheet.write(0,0,\"hello\")\nworkbook.save(\"student.xls\")\n'''\nworkbook = xlwt.Workbook(encoding=\"utf-8\") #创建workbook对象\nsheet = workbook.add_sheet(sheetname=\"sheet1\") #创建sheet对象\n\nfor i in range(0,9):\n for j in range(0,i+1):\n sheet.write(i,j,\"%d * %d = %d\"%(i+1,j+1,(i+1)*(j+1)))\n\nworkbook.save(\"student.xls\")"
},
{
"alpha_fraction": 0.5736263990402222,
"alphanum_fraction": 0.5901098847389221,
"avg_line_length": 18.80434799194336,
"blob_id": "221eb92715ecde780a499b1e5b3151aacd220c7e",
"content_id": "bdf3f132b87061e5c8c911d853de3716b334d826",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 46,
"path": "/test/testSql.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\n\n# conn = sqlite3.connect(\"test.db\") # 打开或创建数据库文件\n# print(\"open database....\")\n# c = conn.cursor()\n# sql = '''\n# create table test\n# (id int primary key not null,\n# name text not null,\n# age int not null,\n# address char(500),\n# salary real);\n# '''\n# c.execute(sql)\n# conn.commit()\n# conn.close()\n# print(\"table complete....\")\n\n\n\n# 插入数据\n# conn = sqlite3.connect(\"test.db\") # 打开或创建数据库文件\n# print(\"open database....\")\n# c = conn.cursor()\n# sql = '''\n# insert into test(id,name,age,address,salary) values\n# (1,\"张三\",\"123\",\"test\",8000);\n# '''\n# c.execute(sql)\n# conn.commit()\n# conn.close()\n# print(\"table complete....\")\n\n# 查询数据\nconn = sqlite3.connect(\"test.db\") # 打开或创建数据库文件\nprint(\"open database....\")\nc = conn.cursor()\nsql = '''\n select * from test\n'''\ncursor = c.execute(sql)\nfor row in cursor:\n print(row)\nconn.close()\nprint(\"table complete....\")"
},
{
"alpha_fraction": 0.6448979377746582,
"alphanum_fraction": 0.6891156435012817,
"avg_line_length": 36.71794891357422,
"blob_id": "222abdf88b28ea76467f8e2acccccda4e943c7b4",
"content_id": "93519840d4a62750375a5f3f227c35ebd1c033d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1506,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 39,
"path": "/test/testUrllib.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "import urllib.request,urllib.parse,urllib.error\n\n# 获取一个get请求\n# response = urllib.request.urlopen(\"http://www.baidu.com\")\n# print(response.read().decode('utf-8'))\n\n# 获取一个post请求\n# data = bytes(urllib.parse.urlencode({\"username\" : \"tomas\"}), encoding = \"utf-8\")\n# response = urllib.request.urlopen(\"http://httpbin.org/post\",data = data)\n# print(response.read().decode(\"utf-8\"))\n\n# 超时处理\n# try:\n# response = urllib.request.urlopen(\"http://httpbin.org/get\",timeout= 0.1)\n# print(response.read().decode(\"utf-8\"))\n# except urllib.error.URLError as e:\n# print(e,'time out!!!')\n\n# response = urllib.request.urlopen(\"http://www.baidu.com\")\n# print(response.getheaders())\n\n# 封装header\n# url = \"http://httpbin.org/post\"\n# data = bytes(urllib.parse.urlencode({\"username\" : \"tomas\"}), encoding = \"utf-8\")\n# headers = {\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"\n# }\n# method = \"POST\"\n# request = urllib.request.Request(url = url, data=data,headers= headers,method = method)\n# response = urllib.request.urlopen(request)\n# print(response.read().decode(\"utf-8\"))\n\nurl = \"https://www.douban.com\"\nheaders = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"\n}\nrequest = urllib.request.Request(url = url, headers= headers)\nresponse = urllib.request.urlopen(request)\nprint(response.read().decode(\"utf-8\"))"
},
{
"alpha_fraction": 0.5154769420623779,
"alphanum_fraction": 0.5306380391120911,
"avg_line_length": 28.314815521240234,
"blob_id": "6b28d351d8305177f7bf6b9d37cd596811e46d46",
"content_id": "b22a3a1e02e0034432af47a0a015cba3cc9430b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5227,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 162,
"path": "/douban.py",
"repo_name": "Cowhitewhite/spider",
"src_encoding": "UTF-8",
"text": "# coding = utf-8\n\nfrom bs4 import BeautifulSoup # 网页解析,获取数据\nimport re # 正则表达式,进行文字匹配\nimport urllib.request, urllib.error # 制定url,获取网页数据\nimport xlwt # 进行excel操作\nimport sqlite3 # 进行SQLite数据库操作\n\n\ndef main():\n baseUrl = \"https://movie.douban.com/top250?start=\"\n dataList = getData(baseUrl)\n # savePath = \".\\\\top250.xls\"\n # saveData(dataList, savePath)\n dbPath = \"movie.db\"\n saveDb(dataList, dbPath)\n\n\n# 影片详情\nfindlink = re.compile(r'<a href=\"(.*?)\">') # 正则表达式对象规则\n# 影片图片链接\nfindImgSrc = re.compile(r'<img.*src=\"(.*?)\"', re.S) # re.S 让换行符包含在字符中\n# 影片片面\nfindTitle = re.compile(r'<span class=\"title\">(.*)</span>')\n# 影片评分\nfindScore = re.compile(r'<span class=\"rating_num\" property=\"v:average\">(.*)</span>')\n# 评价人数\nfindReview = re.compile(r'<span>(\\d*)人评价</span>')\n# 概况\nfindInfo = re.compile(r'<span class=\"inq\">(.*)</span>')\n# 影片相关类容\nfindBd = re.compile(r'<p class=\"\">(.*?)</p>', re.S)\n\n\n# 爬取网页\ndef getData(baseUrl):\n dataList = []\n for i in range(0, 10): # 调用获取页面信息函数10次\n url = baseUrl + str(i * 25)\n html = askUrl(url) # 保存获取到的网页源码\n # 逐一解析数据\n soup = BeautifulSoup(html, \"html.parser\")\n for item in soup.find_all(\"div\", class_=\"item\"): # 查找符合要求字符串,形成列表\n data = [] # 电影信息\n item = str(item)\n # 影片详情\n link = re.findall(findlink, item)[0]\n data.append(link)\n # 影片图片链接\n imgSrc = re.findall(findImgSrc, item)[0]\n data.append(imgSrc)\n # 影片标题\n titles = re.findall(findTitle, item)\n if (len(titles) == 2):\n cnTitle = titles[0]\n data.append(cnTitle)\n enTitle = titles[1].replace(\"/\", \"\")\n data.append(enTitle)\n else:\n data.append(titles[0])\n data.append(\" \")\n # 评价分数\n score = re.findall(findScore, item)[0]\n data.append(score)\n # 评价人数\n review = re.findall(findReview, item)[0]\n data.append(review)\n # 概况\n infos = re.findall(findInfo, item)\n if len(infos) != 0:\n info = infos[0].replace(\"。\", \"\")\n data.append(info)\n else:\n data.append(\" \")\n # 影片相关类容\n bd = re.findall(findBd, item)[0]\n bd = re.sub('<br(\\s+)?/>(\\s+)?', \"\", bd) # 去掉<br/>\n bd = re.sub(\"/\", \"\", bd)\n data.append(bd.strip()) # 去掉前后空格\n\n dataList.append(data)\n return dataList\n\n\n# 保存数据\ndef saveData(datalist, path):\n print(\"save.....\")\n workbook = xlwt.Workbook(encoding=\"utf-8\", style_compression=0) # 创建workbook对象\n sheet = workbook.add_sheet(sheetname=\"top250\", cell_overwrite_ok=True) # 创建sheet对象\n row = (\"详情链接\", \"图片链接\", \"影片中文名\", \"影片外文名\", \"评分\", \"评价数\", \"概况\", \"相关信息\")\n for i in range(0, len(row)):\n sheet.write(0, i, row[i])\n for i in range(0, len(datalist)):\n print(\"第%d条\" % (i + 1))\n data = datalist[i]\n for j in range(0, 8):\n sheet.write(i + 1, j, data[j])\n workbook.save(path)\n\n\n# 得到指定一个URL的网页内容\ndef askUrl(url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"\n }\n req = urllib.request.Request(url, headers=headers)\n html = \"\"\n try:\n response = urllib.request.urlopen(req)\n html = response.read().decode('utf-8')\n # print(html)\n except urllib.error.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n return html\n\n\ndef saveDb(datalist, dbpath):\n # init_db(dbpath)\n conn = sqlite3.connect(dbpath)\n cursor = conn.cursor()\n for data in datalist:\n for index in range(len(data)):\n if index == 4 or index == 5:\n continue\n data[index] = '\"' + data[index] + '\"'\n sql = '''\n insert into movie(info_link,pic_link,cn_name,en_name,score,rated,introduction,info)\n values (%s)'''%\",\".join(data)\n print(sql)\n cursor.execute(sql)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef init_db(dbpath):\n sql = '''\n create table movie(\n id integer primary key autoincrement,\n info_link text,\n pic_link text,\n cn_name varchar,\n en_name varchar ,\n score numeric,\n rated numeric,\n introduction text,\n info text\n );\n '''\n conn = sqlite3.connect(dbpath)\n cursor = conn.cursor()\n cursor.execute(sql)\n conn.commit()\n conn.close()\n\n\nif __name__ == \"__main__\":\n # 调用函数\n main()\n print(\"complete!!!\")\n"
}
] | 12 |
JHelar/LabTask2 | https://github.com/JHelar/LabTask2 | 2942c5926d96721006b5b2d1e48afc493cbfa5a5 | e44bef378dd9c061b85934b647e2d059a8caa2d7 | fef3dc91676056b76277456fe0c9df07da6ab62f | refs/heads/master | 2021-05-26T20:53:32.506790 | 2013-12-20T14:30:04 | 2013-12-20T14:30:04 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6629572510719299,
"alphanum_fraction": 0.6696548461914062,
"avg_line_length": 36.7470817565918,
"blob_id": "bcf428046e0acfd115229de00c5147327a6c2b0a",
"content_id": "5b2264fef4af4b23a70540329899731e79d02dd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9705,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 257,
"path": "/LabTask2/LabTask2.py",
"repo_name": "JHelar/LabTask2",
"src_encoding": "UTF-8",
"text": "import operator\n\n#Class Person, general class for a person with it's constraints in form of strings.\nclass Person:\n name = \"\"\n status = \"\"\n smoker = \"\"\n manyVisitors = \"\"\n potentialRoomKey = list()\n\n def __init__(self,name,status,smoker,manyVisitors):\n self.name = name\n self.status = status\n self.smoker = smoker\n self.manyVisitors = manyVisitors\n\n#Class Room, class for a room with a size of how many can be in that room.\nclass Rooms:\n amount = 0\n currentAmount = 0\n contains = []\n roomKey = \"\"\n\n def __init__(self,amount,roomKey):\n self.contains = []\n self.amount = amount\n self.roomKey = roomKey\n\n#Class Office, hold the list of people and officerooms, reads in people and rooms.\nclass Office:\n persons = list()\n unAssignedPeople = list()\n originalPeopleList = list()\n officeRooms = dict()\n def __init__(self):\n self.officeRooms['T13'] = Rooms(1,'T13')\n self.officeRooms['T14'] = Rooms(1,'T14')\n self.officeRooms['T15'] = Rooms(1,'T15')\n self.officeRooms['T16'] = Rooms(1,'T16')\n self.officeRooms['T11'] = Rooms(2,'T11')\n self.officeRooms['T12'] = Rooms(2,'T12')\n self.officeRooms['T10'] = Rooms(3,'T10')\n self.officeRooms['T17'] = Rooms(3,'T17')\n self.officeRooms['T18'] = Rooms(3,'T18')\n self.setPeople()\n\n def setPeople(self):\n self.persons.append(Person(\"E\",\"researcher\",\"smoker\",\"many visitors\"))\n self.persons.append(Person(\"F\",\"researcher\",\"non-smoker\",\"few visitors\"))\n self.persons.append(Person(\"G\",\"researcher\",\"non-smoker\",\"few visitors\"))\n self.persons.append(Person(\"H\",\"PhD Student\",\"non-smoker\",\"many visitors\"))\n self.persons.append(Person(\"I\",\"PhD Student\",\"smoker\",\"few visitors\"))\n self.persons.append(Person(\"J\",\"PhD Student\",\"smoker\",\"few visitors\"))\n self.persons.append(Person(\"K\",\"PhD Student\",\"non-smoker\",\"few visitors\"))\n self.persons.append(Person(\"B\",\"professor\",\"non-smoker\",\"many visitors\"))\n self.persons.append(Person(\"D\",\"professor\",\"smoker\",\"many visitors\"))\n self.persons.append(Person(\"A\",\"head\",\"non-smoker\",\"many visitors\"))\n self.persons.append(Person(\"C\",\"professor\",\"non-smoker\",\"few visitors\"))\n self.originalPeopleList = list(self.persons)\n self.unAssignedPeople = list(self.persons)\n\n def refresh(self):\n self.persons = list(self.originalPeopleList)\n self.unAssignedPeople = list(self.originalPeopleList)\n\n#Class Constraints, has the office object in it and all the constraint functions that must be followed. \nclass Constraints:\n\n office = Office()\n\n def refresh(self):\n self.office.refresh()\n\n def checkConstraints(self,one,room):\n if not self.FullRoom(room):\n return False\n if not self.Smoker(one,room):\n return False\n if not self.Visitors(one,room):\n return False\n if not self.CheckStatus(one,room):\n return False\n return True\n\n def Smoker(self,one,room):\n for another in room.contains:\n if one.smoker != another.smoker:\n return False\n return True\n \n def Visitors(self,one,room):\n for another in room.contains:\n if one.manyVisitors == \"many visitors\" and another.manyVisitors == \"many visitors\":\n return False\n return True\n\n def FullRoom(self,room):\n if room.currentAmount == room.amount:\n return False\n for another in room.contains:\n if another.status == \"head\" or another.status == \"professor\":\n return False\n else:\n return True\n\n def CheckStatus(self,one,room):\n if one.status == \"head\" or one.status == \"professor\":\n if room.contains == []:\n if room.amount == 2 or room.amount == 3:\n return True\n else:\n return False\n else:\n return True\n#The first backtracking search that will call the recursive function that is not using any heuristics, returns either an assignment list or a \"None\" value\ndef BackTrackingSearch(csp):\n assignment = dict()\n return ReckursiveBacktracking(assignment,csp)\n\n#Recursive backtracking search, tries to assign all the people in to the rooms following all the constraints from the constraints class\ndef ReckursiveBacktracking(assignment,csp):\n if csp.office.persons == []:\n return assignment\n person = csp.office.persons.pop()\n for room in roomList(person,assignment,csp):\n assign(person,room,assignment,csp)\n global count\n count = count + 1\n result = ReckursiveBacktracking(assignment,csp)\n if result != None:\n return result\n unAssign(person,room,assignment,csp)\n csp.office.persons.insert(0,person)\n if len(csp.office.persons) < len(csp.office.unAssignedPeople):\n csp.office.unAssignedPeople = list()\n for p in csp.office.persons:\n csp.office.unAssignedPeople.append(p)\n return None\n\n#Function that returns a list of rooms a given person can be in following the constraints given by the constraints class\ndef roomList(person,assignment,csp):\n returnKeys = list()\n for room in csp.office.officeRooms.keys():\n if csp.checkConstraints(person,csp.office.officeRooms[room]):\n returnKeys.append(room)\n return returnKeys\n\n#Function assigns a person to the assignment list.\ndef assign(person,room,assignment,csp):\n personToAppend = csp.office.officeRooms[room]\n personToAppend.contains.append(person)\n personToAppend.currentAmount += 1\n assignment[person] = room \n\n#Function un assignes a person from the assignment list\ndef unAssign(person,room,assignment,csp):\n roomToDeletePersonFrom = assignment[person]\n csp.office.officeRooms[roomToDeletePersonFrom].contains.remove(person)\n csp.office.officeRooms[roomToDeletePersonFrom].currentAmount -= 1\n del assignment[person]\n\n#Function that returns the leas constrainging value in form of a room that will be the least constraining for the other un assigned people\ndef LeastConstrainingVal(person,csp,assignment):\n testAssignment = dict(assignment)\n testCsp = csp\n bestRoom = 0\n if person.potentialRoomKey == []:\n return None\n elif csp.office.persons == []:\n return person.potentialRoomKey.pop()\n for roomKey in person.potentialRoomKey:\n possibleRoomCount = 0\n assign(person,roomKey,testAssignment,testCsp)\n for affectedPerson in testCsp.office.persons:\n tempRoomKeys = roomList(affectedPerson,testAssignment,testCsp)\n possibleRoomCount += len(tempRoomKeys)\n if possibleRoomCount > bestRoom:\n bestRoom = possibleRoomCount\n roomToReturn = roomKey\n unAssign(person,roomKey,testAssignment,testCsp) \n if bestRoom == 0:\n return None\n person.potentialRoomKey.remove(roomToReturn)\n return roomToReturn\n\n#Function returns the person that is the most constrained of all current people in the people list\ndef MostConstrainedVariable(csp):\n count = 1000\n findPotential(csp)\n for person in csp.office.persons:\n if len(person.potentialRoomKey) < count:\n count = len(person.potentialRoomKey)\n personToReturn = person\n csp.office.persons.remove(personToReturn)\n return personToReturn\n\n#Function sets what rooms people in the people list can be in, following the constraints\ndef findPotential(csp):\n for p in csp.office.persons:\n p.potentialRoomKey = list(roomList(p,{},csp))\n\n#Backtracking search with heuristic, calls the recursive function that uses the heuristics, returns an assignment or a \"None\" value.\ndef BacktrackingWithH(csp):\n assignment = dict()\n return ReckursiveBacktrackingWithH(assignment,csp)\n\n#Recursive backtracking that uses heuristics.\ndef ReckursiveBacktrackingWithH(assignment,csp):\n if csp.office.persons == []:\n return assignment\n person = MostConstrainedVariable(csp)\n room = LeastConstrainingVal(person,csp,assignment)\n while room != None:\n assign(person,room,assignment,csp)\n global count\n count = count + 1\n result = ReckursiveBacktrackingWithH(assignment,csp)\n if result != None:\n return result\n unAssign(person,room,assignment,csp)\n room = LeastConstrainingVal(person,csp,assignment)\n csp.office.persons.insert(0,person)\n return None\n#Main program\ncsp = Constraints()\ncount = 0 \nassignments = BackTrackingSearch(csp)\n\nif assignments != None:\n for a in assignments.keys():\n print(\"Person:\",a.name,\"constraints:\",a.status,a.smoker,a.manyVisitors,\"room:\",assignments[a])\n del a\n assignments.clear()\nelse:\n print(\"Couldn't assign everybody\")\n print(\"People that couldn't be assigned a room:\")\n for person in csp.office.unAssignedPeople:\n print(person.name,person.status,person.smoker,person.manyVisitors)\nprint(\"It took\",count,\"recursions in order for the assignment to finish\")\n\ndummy = input(\"Press enter to do the assignment with heuristic search!\\n\")\ncount = 0\n\nfor room in csp.office.officeRooms.values():\n room.contains.clear()\n room.currentAmount = 0\n\ncsp.refresh()\nassignments = BacktrackingWithH(csp)\nprint('Heuristic backtracking:')\nif assignments != None:\n for a in assignments.keys():\n print(\"Person:\",a.name,\"constraints:\",a.status,a.smoker,a.manyVisitors,\"room:\",assignments[a])\n assignments.clear()\nelse:\n print(\"Couldn't assign everybody!\")\nprint(\"It took\",count,\"recursions in order for the assignment to finish\")\n\n \n"
}
] | 1 |
akash250899/SIFT_Algorithm | https://github.com/akash250899/SIFT_Algorithm | 8145c6d4c666af4b75e5c6e9e3cb0a576ce0a196 | 65786ee1dffce89b1c04e79cb7376285a61ebaca | b8ee9aef71c245fd1554d2c15a4bc0adf7bf3309 | refs/heads/main | 2023-04-02T15:43:14.825855 | 2021-04-06T16:06:32 | 2021-04-06T16:06:32 | 355,239,733 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6818950772285461,
"alphanum_fraction": 0.7309644818305969,
"avg_line_length": 22.13725471496582,
"blob_id": "f78898e8866336377e06785edfbed99ee64a7d0b",
"content_id": "57cf3383bf94d631185a6d147e99de23f26e4a54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 51,
"path": "/detect_obj.py",
"repo_name": "akash250899/SIFT_Algorithm",
"src_encoding": "UTF-8",
"text": "import cv2 \nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\n# read images\nimg1 = cv2.imread('biscuits_packets_train_sift.jpg') \nimg2 = cv2.imread('lays_test_sift.jpg') \n\n \n#img1 = cv2.cvtColor(img1, cv2.COLOR_BGR5552GRAY)\n#img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n#sift\n#Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D.\nsift = cv2.xfeatures2d.SIFT_create()\n\n\n\nys = 200 + np.random.randn(100)\nx = [x for x in range(len(ys))]\n\nplt.plot(x, ys, '-')\nplt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)\n\nplt.title(\"Sample Visualization\")\nplt.show()\n\n\nkeypoints_1, descriptors_1 = sift.detectAndCompute(img1,None)\nkeypoints_2, descriptors_2 = sift.detectAndCompute(img2,None)\n\n#Printing the keypoint\n\nprint(len(keypoints_1), len(keypoints_2))\n\n\n#feature matching\n#Brute force Matcher\nbf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)\n\n\nmatches = bf.match(descriptors_1,descriptors_2)\nprint(len(matches))\n\nmatches = sorted(matches, key = lambda x:x.distance)\n\n\nimg3 = cv2.drawMatches(img1, keypoints_1, img2, keypoints_2, matches[:50], img2, flags=2)\nplt.imshow(img3),plt.show()\n\n\n"
},
{
"alpha_fraction": 0.6436781883239746,
"alphanum_fraction": 0.6754789352416992,
"avg_line_length": 30.071428298950195,
"blob_id": "3d0850fa0cbcd0c8fa98ca968c8de87832c26bf0",
"content_id": "51b907f00f75a8582b7241e74da862edc1942c62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2610,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 84,
"path": "/object_detection.py",
"repo_name": "akash250899/SIFT_Algorithm",
"src_encoding": "UTF-8",
"text": "import time \nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Threshold \nMIN_MATCH_COUNT=30\n\n# Initiate SIFT detector\nsift=cv2.xfeatures2d.SIFT_create()\n\n\n\n# Create the Flann Matcher object\nFLANN_INDEX_KDITREE=0\nflannParam=dict(algorithm=FLANN_INDEX_KDITREE,tree=5)\nflann=cv2.FlannBasedMatcher(flannParam,{})\n\n\n\ntrain_img= cv2.imread(\"../res/obama1.jpg\",0) # train image\nkp1,desc1= sift.detectAndCompute(train_img,None) # find the keypoints and descriptors with SIFT\ntrain_img_kp= cv2.drawKeypoints(train_img,kp1,None,(255,0,0),4) # draw keypoints of the train image\nplt.imshow(train_img_kp) # show the train image keypoints\nplt.title('Train Image Keypoints')\nplt.show()\n\n\n\n# start capturing video\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # turn the frame captured into grayscale\n kp2, desc2 = sift.detectAndCompute(gray,None) # find the keypoints and descriptors with SIFT of the frame captured\n \n # Obtain matches using K-Nearest Neighbor Method\n # the result 'matches' is the number of similar matches found in both images\n matches=flann.knnMatch(desc2,desc1,k=2)\n\n\n # store all the good matches as per Lowe's ratio test.\n goodMatch=[]\n for m,n in matches:\n if(m.distance<0.75*n.distance):\n goodMatch.append(m)\n\n\n # If enough matches are found, we extract the locations of matched keypoints \n # in both the images.\n # They are passed to find the perpective transformation.\n # Once we get this 3x3 transformation matrix, we use it to transform the corners \n # of query image to corresponding points in train image. Then we draw it.\n\n if(len(goodMatch)>MIN_MATCH_COUNT):\n tp=[] # src_pts\n qp=[] # dst_pts\n for m in goodMatch:\n tp.append(kp1[m.trainIdx].pt)\n qp.append(kp2[m.queryIdx].pt)\n tp,qp=np.float32((tp,qp))\n\n H,status=cv2.findHomography(tp,qp,cv2.RANSAC,3.0)\n\n\n h,w = train_img.shape\n train_outline= np.float32([[[0,0],[0,h-1],[w-1,h-1],[w-1,0]]])\n query_outline = cv2.perspectiveTransform(train_outline,H)\n\n cv2.polylines(frame,[np.int32(query_outline)],True,(0,255,0),5)\n cv2.putText(frame,'Object Found',(50,50), cv2.FONT_HERSHEY_COMPLEX, 2 ,(0,255,0), 2)\n print(\"Match Found-\")\n print(len(goodMatch),MIN_MATCH_COUNT)\n\n else:\n print(\"Not Enough match found-\")\n print(len(goodMatch),MIN_MATCH_COUNT)\n cv2.imshow('result',frame)\n\n if cv2.waitKey(1) == 13:\n break\ncap.release()\ncv2.destroyAllWindows()\n"
}
] | 2 |
wangzan18/serverless-image-resizing | https://github.com/wangzan18/serverless-image-resizing | 6cda2b75ae0daa85e326bdf729950537a4bdba4e | 00e3f24f27a2dc2c06e5d8a4554a146893c8c57b | 36f3c2884e5f4d6b5bad9e848c0baed1772f9b2f | refs/heads/master | 2022-11-30T06:26:42.418797 | 2020-08-14T06:56:28 | 2020-08-14T06:56:28 | 287,457,428 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5995575189590454,
"alphanum_fraction": 0.6091445684432983,
"avg_line_length": 23.214284896850586,
"blob_id": "7f8897cf5d7807735139293ff231ea493dddbb7e",
"content_id": "b5f9b16ec27b7ed9a67ddca1ad9f99e8f935ff5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1356,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 56,
"path": "/handler.py",
"repo_name": "wangzan18/serverless-image-resizing",
"src_encoding": "UTF-8",
"text": "import datetime\nimport json\nimport os\nfrom io import BytesIO\nimport boto3\nimport PIL\nfrom PIL import Image\n\n\ndef resized_image_url(resized_key, bucket, region):\n return \"http://{bucket}.s3.{region}.amazonaws.com/{resized_key}\".format(bucket=bucket, region=region, resized_key=resized_key\n )\n\ndef resize_image(bucket_name, key, size):\n size_split = size.split('x')\n s3 = boto3.resource('s3')\n obj = s3.Object(\n bucket_name=bucket_name,\n key=key,\n )\n obj_body = obj.get()['Body'].read()\n\n img = Image.open(BytesIO(obj_body))\n img = img.resize(\n (int(size_split[0]), int(size_split[1])), PIL.Image.ANTIALIAS\n )\n buffer = BytesIO()\n img.save(buffer, 'JPEG')\n buffer.seek(0)\n\n resized_key=\"{size}_{key}\".format(size=size, key=key)\n obj = s3.Object(\n bucket_name=bucket_name,\n key=resized_key,\n )\n obj.put(Body=buffer, ContentType='image/jpeg')\n\n return resized_image_url(\n resized_key, bucket_name, os.environ[\"AWS_REGION\"]\n )\n\ndef call(event, context):\n key = event[\"pathParameters\"][\"image\"]\n size = event[\"pathParameters\"][\"size\"]\n\n result_url = resize_image(os.environ[\"BUCKET\"], key, size)\n\n response = {\n \"statusCode\": 301,\n \"body\": \"\",\n \"headers\": {\n \"location\": result_url\n }\n }\n\n return response\n"
},
{
"alpha_fraction": 0.7425876259803772,
"alphanum_fraction": 0.7574123740196228,
"avg_line_length": 27.538461685180664,
"blob_id": "5d3e40d4be1aca2d3775a0814161c9ff4ffc1e99",
"content_id": "c7128dc7417b08a7717046a313265af077f98af7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 26,
"path": "/README.md",
"repo_name": "wangzan18/serverless-image-resizing",
"src_encoding": "UTF-8",
"text": "# Dynamic image resizing with Python and Serverless framework\n\nIn this example, we set up a dynamic image resizing solution with AWS S3 and a Serverless framework function written in Python. We use [Pillow](https://pillow.readthedocs.io/en/stable/) for image resizing.\n\n## Pre-requisites\n\nIn order to deploy the function, you will need the following:\n\n- API credentials for AWS with persmissions to manage S3, IAM and API Gateway\n- If not on Linux - Docker installed locally \n\n## Deploying the Serverless project\n\n1. Make a .env file with your AWS credentials, as in .env.example\n2. Deploy the Serverless project:\n\n```\nsource .env\nsls deploy\n```\n\n## Example usage\n\n```\nhttps://XXX.execute-api.eu-west-1.amazonaws.com/dev/100x100/test.jpg\n```\n"
}
] | 2 |
zunhai/hive | https://github.com/zunhai/hive | c45d1cfccaca3ab1778b8b7145cb9520649b0026 | 9fd3085dd460bbe3f6f0c6ed865a1b5d7ddad475 | 72da095d63752590b271809736d75d2fa26892c1 | refs/heads/master | 2021-04-26T23:42:34.789085 | 2018-03-03T13:12:33 | 2018-03-03T13:12:33 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5569800734519958,
"alphanum_fraction": 0.5968660712242126,
"avg_line_length": 32.47618865966797,
"blob_id": "5359020f7ba6039ff1420ee6b40a4700686388b9",
"content_id": "73b6d60c70208c0364aebb6d95b4fa89c30a6495",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 702,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 21,
"path": "/scrapySpider/bilibili/bilibili/spiders/bb.py",
"repo_name": "zunhai/hive",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nclass BbSpider(scrapy.Spider):\n name = 'bb'\n start_urls = ['https://api.bilibili.com/x/v2/reply?pn=1&type=1&oid=20141782']\n\n def parse(self, response):\n response = response.text\n response = json.loads(response)\n page = int(response['data'][\"page\"][\"count\"])//20 + 1\n for i in range(5):\n url = 'https://api.bilibili.com/x/v2/reply?pn='+ str(i) +'&type=1&oid=20141782'\n yield scrapy.Request(url=url, callback=self.parse2)\n\n def parse2(self , response):\n response = response.text\n response = json.loads(response)\n mid = len(response['data']['replies'])\n print(mid)"
}
] | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.