repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
SPAMLab/data_sharing | [
"a450e5cbd1e625866d6407b7fb8826fd4cdbaf1e"
] | [
"Landslide_Segmentation_with_Deep_Learning_Evaluating_Model_Generalization_in_Rainfall-Induced_Landslides_in_Brazil/preprocessing.py"
] | [
"import rasterio as rio\nimport geopandas as gpd\nfrom shapely.ops import cascaded_union\nfrom shapely.geometry import Polygon\nfrom rasterio.features import rasterize\nimport numpy as np\nimport os\nfrom glob import glob\nimport earthpy.spatial as es\nfrom rasterio.mask import mask\nfrom rasterio.plot import reshape_as_image, reshape_as_raster\nfrom shapely.geometry import box\nimport pandas as pd\nfrom utils import open_tif_image, create_new_folder_if_it_doesnt_exist\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\nimport albumentations as A\n\n\nclass Augmentation:\n\n def __init__(self, imagesPath, masksPath, size):\n\n\n self.imagePath = imagesPath\n self.maskPath = masksPath\n self.imageNames = self.get_tif_images_names_from_a_folder()\n self.size = size\n def get_tif_images_names_from_a_folder(self):\n imagesPath = glob(f\"{self.imagePath}/*.tif\")\n imageNames = [x.split(\"/\")[-1] for x in imagesPath]\n return imageNames\n\n @staticmethod\n def save_raster(data, meta, savePath, saveName):\n with rio.open(f\"{savePath}/{saveName}.tif\", 'w', **meta) as dst:\n dst.write(data)\n\n def augment_images(self):\n for image in self.imageNames:\n # open image\n img = rio.open(f\"{self.imagePath}/{image}\").read()\n img_meta = rio.open(f\"{self.imagePath}/{image}\").meta\n img = reshape_as_image(img)\n mask = rio.open(f\"{self.maskPath}/{image}\").read()\n mask_meta = rio.open(f\"{self.maskPath}/{image}\").meta\n mask = reshape_as_image(mask)\n\n transform = A.Compose([\n A.HorizontalFlip(p=1),\n ])\n\n transformed = transform(image=img, mask=mask)\n self.save_raster(reshape_as_raster(transformed[\"image\"]), img_meta, self.imagePath, saveName=f\"{image}_hflip\")\n self.save_raster(reshape_as_raster(transformed[\"mask\"]), mask_meta, self.maskPath, saveName=f\"{image}_hflip\")\n\n transform = A.Compose([\n A.VerticalFlip(p=1),\n ])\n\n transformed = transform(image=img, mask=mask)\n self.save_raster(reshape_as_raster(transformed[\"image\"]), img_meta, self.imagePath,\n saveName=f\"{image}_Vflip\")\n self.save_raster(reshape_as_raster(transformed[\"mask\"]), mask_meta, self.maskPath,\n saveName=f\"{image}_Vflip\")\n\n # transform = A.Compose([\n # A.RandomBrightnessContrast(p=1),\n # ])\n #\n # transformed = transform(image=img, mask=mask)\n # self.save_raster(reshape_as_raster(transformed[\"image\"]), img_meta, self.imagePath,\n # saveName=f\"{image}_rb\")\n # self.save_raster(reshape_as_raster(transformed[\"mask\"]), mask_meta, self.maskPath,\n # saveName=f\"{image}_rb\")\n\n transform = A.Compose([A.RandomSizedCrop(min_max_height=(15, 30), height=self.size, width=self.size, p=1)])\n\n transformed = transform(image=img, mask=mask)\n\n self.save_raster(reshape_as_raster(transformed[\"image\"]), img_meta, self.imagePath,\n saveName=f\"{image}_crop\")\n self.save_raster(reshape_as_raster(transformed[\"mask\"]), mask_meta, self.maskPath,\n saveName=f\"{image}_crop\")\n\n\n\n transform = A.Compose([\n A.ChannelShuffle(p=1),\n ])\n\n transformed = transform(image=img, mask=mask)\n self.save_raster(reshape_as_raster(transformed[\"image\"]), img_meta, self.imagePath,\n saveName=f\"{image}_cs\")\n self.save_raster(reshape_as_raster(transformed[\"mask\"]), mask_meta, self.maskPath,\n saveName=f\"{image}_cs\")\n\n\nclass Shapefile:\n def __init__(self, shapefilePath):\n self.shapefilePath = shapefilePath\n\n def open_shapefile(self):\n return gpd.read_file(self.shapefilePath)\n\n def reproject_Shapefile(self, epsg):\n shapefile = self.open_shapefile()\n shapefile.crs = epsg\n return shapefile\n\n def buffer(self, distance):\n shapefile = self.open_shapefile()\n buffer = shapefile.buffer(distance=distance)\n return buffer\n\n\n\n\nclass Raster:\n def __init__(self, rasterPath):\n self.rasterPath = rasterPath\n\n def open_raster(self):\n with rio.open(self.rasterPath) as image:\n return image\n\n @staticmethod\n def save_raster(data, meta, savePath, saveName):\n create_new_folder_if_it_doesnt_exist(savePath)\n with rio.open(f\"{savePath}/{saveName}.tif\", 'w', **meta) as dst:\n dst.write(np.expand_dims(data, axis=0))\n\n def calculate_ndvi(self, savePath, saveName):\n with rio.open(self.rasterPath) as image:\n imageArray = image.read()\n meta = image.meta\n meta.update(count=6)\n meta.update(dtype=\"float64\")\n tImage = imageArray.transpose((1, 2, 0))\n ndvi = (tImage[:, :, 4].astype(float) - tImage[:, :, 2].astype(float)) / (tImage[:, :, 4] + tImage[:, :, 2])\n print(imageArray.shape, ndvi.shape)\n print(ndvi.max)\n ndvi = ndvi * (2**16)\n print(ndvi.max())\n ndvi = np.expand_dims(ndvi, axis=0)\n stackImage = np.vstack([imageArray, ndvi])\n create_new_folder_if_it_doesnt_exist(savePath)\n with rio.open(f\"{savePath}/{saveName}.tif\", 'w', **meta) as dst:\n dst.write(stackImage)\n\n def get_rgb(self, savePath):\n with rio.open(self.rasterPath) as image:\n imageArray = image.read()\n meta = image.meta\n meta.update(count=3)\n create_new_folder_if_it_doesnt_exist(savePath)\n\n with rio.open(f\"{savePath}/tile_0.tif\", \"w\", **meta) as dst:\n dst.write(imageArray[0:3, :, :])\n\n def stack_new_band(self, newBandPath, savePath, name):\n with rio.open(self.rasterPath) as image:\n imageArray = image.read()\n print(imageArray.shape)\n meta = image.meta\n meta.update(count=meta[\"count\"] + 1)\n meta.update(dtype=\"float64\")\n with rio.open(newBandPath) as newBand:\n terrain = (newBand.read())\n print(terrain.shape)\n terrain = terrain / terrain.max()\n terrain = np.where(terrain == terrain.min(), 0, terrain)\n terrain = terrain * 2 ** 16\n\n stackImage = np.vstack([imageArray, terrain])\n\n # Read each layer and write it to stack\n with rio.open(f\"{savePath}/{name}_terrain.tif\", 'w', **meta) as dst:\n dst.write(stackImage)\n\n\n\n\n\n def reproject_raster(self, epsg):\n saveName = self.rasterPath.split(\"/\")[-1].split(\".\")[0]\n with rio.open(self.rasterPath) as src:\n transform, width, height = calculate_default_transform(src.crs, epsg, src.width, src.height, *src.bounds)\n kwargs = src.meta.copy()\n kwargs.update({\n 'crs': epsg,\n 'transform': transform,\n 'width': width,\n 'height': height})\n with rio.open(f'{saveName}_epsg_{epsg.split(\":\")[-1]}.tif', 'w', **kwargs) as dst:\n for i in range(1, src.count + 1):\n reproject(\n source=rio.band(src, i),\n destination=rio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=epsg,\n resampling=Resampling.nearest)\n print(\"image reprojected!\")\n\n\n def get_meta_crs_and_bounds(self):\n image = self.open_raster()\n meta = image.meta\n crs = image.crs\n xmax = image.bounds[2]\n xmin = image.bounds[0]\n ymax = image.bounds[3]\n ymin = image.bounds[1]\n\n return meta, crs, xmax, xmin, ymax, ymin\n\n def get_transform(self):\n image = self.open_raster()\n transform = image.transform\n return transform\n\n def get_metadata(self):\n image = self.open_raster()\n metadata = image.meta\n return metadata\n\n #TODO save raster\n def save_raster(self):\n pass\n\n\nclass Sampling:\n def __init__(self, raster: Raster, xSize, ySize, savePath):\n\n self.savePath = savePath\n self.ySize = ySize\n self.xSize = xSize\n self.meta, self.crs, self.xmax, self.xmin, self.ymax, self.ymin = raster.get_meta_crs_and_bounds()\n self.xPixelSize = self.meta[\"transform\"][0]\n self.yPixelSize = self.meta[\"transform\"][4]\n self.width = self.xPixelSize * self.xSize\n self.height = self.yPixelSize * self.ySize * -1\n\n\n\n def generate_regular_sampling_polygons(self, overlap=0, save=False):\n\n \"\"\"\n # Code adapted from Keras-spatial library - https://pypi.org/project/keras-spatial/\n Generate regular grid over extent.\n Args:\n overlap (float): percentage of patch overlap (optional)\n save (bool) : True if want to save geojson in a folder.\n Returns:\n geopandas.GeoDataFrame\n \"\"\"\n\n # Define the pixel size and multiply by the xsize to get xsize in number of pixels\n\n numx = int((self.xmax - self.xmin) // (self.width - self.width * overlap))\n numy = int((self.ymax - self.ymin) // (self.height - self.height * overlap))\n\n x = np.linspace(self.xmin, self.xmax - self.width, num=numx)\n y = np.linspace(self.ymin, self.ymax - self.height, num=numy)\n X, Y = np.meshgrid(x, y)\n polys = [box(x, y, x + self.width, y + self.height) for x, y in np.nditer([X, Y])]\n\n gdf = gpd.GeoDataFrame({'geometry': polys})\n gdf.crs = self.crs\n if save:\n if not os.path.exists(self.savePath):\n os.makedirs(self.savePath)\n gdf.to_file(f\"{self.savePath}/regular_grid_size_{self.xSize}.geojson\", driver=\"GeoJSON\")\n else:\n return gdf\n\n def generate_random_sampling_polygons(self, numberOfPolygons, save=False):\n\n \"\"\"\n Generate random grid over extent.\n Args:\n numberOfPolygons (int): number of patches\n save (bool): if true save the dataframe\n Returns:\n :obj:`geopandas.GeoDataFrame`:\n \"\"\"\n\n x = np.random.rand(numberOfPolygons) * (self.xmax - self.xmin - self.width) + self.xmin\n y = np.random.rand(numberOfPolygons) * (self.ymax - self.ymin - self.height) + self.ymin\n polys = [box(x, y, x + self.width, y + self.height) for x, y in np.nditer([x, y])]\n\n gdf = gpd.GeoDataFrame({'geometry': polys})\n gdf.crs = self.crs\n gdf = self.remove_duplicated_polygons(gdf)\n if save:\n create_new_folder_if_it_doesnt_exist(savePath=self.savePath)\n gdf.to_file(f\"{self.savePath}/random_grid_size_{self.xSize}.geojson\", driver=\"GeoJSON\")\n else:\n return gdf\n\n\n def generate_tiles_from_points(self, shapefile, save=False, size=None):\n\n \"\"\"Function that converts the points to a specified size square.\n\n Parameters\n\n imagePath (string) = Path to the .tif image.\n\n shapefilePath (string) = path to the shapefile or geojson.\n\n outputPath (string) = path to output resulting geojson.\n\n outputName (string) = path to output resulting geojson.\n\n size (int) = size (in pixels) of the output.\n\n :return if save == False, return GeoDataFrame\n\n \"\"\"\n\n # Get the pixel size\n print(f\"Pixel size - X : {self.xPixelSize}, Y: {-self.yPixelSize}\")\n pixelSizeX = self.xPixelSize\n # load the shapefile\n if type(shapefile) == Shapefile:\n df = shapefile.open_shapefile()\n else:\n df = shapefile\n if size:\n bufferSize = (size / 2 - 0.5) * pixelSizeX\n else:\n bufferSize = (self.xSize / 2 - 0.5) * pixelSizeX\n buffer = df.buffer(bufferSize, cap_style=3)\n if save:\n create_new_folder_if_it_doesnt_exist(self.savePath)\n buffer.to_file(f\"{self.savePath}/{self.xSize}_tiles_created_from_points.geojson\", driver=\"GeoJSON\")\n print(\"Data Saved!\")\n else:\n return buffer\n\n def select_only_the_tiles_that_intersect_polygons(self, samplingPolygons: Shapefile, featurePolygons: Shapefile, save=False):\n saveName = samplingPolygons.shapefilePath.split(\"/\")[-1].split(\".\")[0]\n samplingPolygons = samplingPolygons.open_shapefile()\n featurePolygon = featurePolygons.open_shapefile()\n featurePolygon = self.remove_duplicated_polygons(featurePolygon)\n s_join = gpd.sjoin(samplingPolygons, featurePolygon, how=\"inner\")\n s_join = self.remove_duplicated_polygons(s_join)\n\n if save:\n create_new_folder_if_it_doesnt_exist(self.savePath)\n s_join.to_file(f\"{self.savePath}/s_join_{saveName}.geojson\", driver=\"GeoJSON\")\n else:\n return s_join\n\n @staticmethod\n def remove_duplicated_polygons(geopandasDataFrame):\n try:\n if \"index_right\" and \"id\" in geopandasDataFrame.keys():\n geopandasDataFrame = geopandasDataFrame.drop([\"index_right\", \"id\"], axis=1)\n else:\n geopandasDataFrame = geopandasDataFrame.drop([\"index_left\", \"index_right\", \"id\"], axis=1)\n except:\n geopandasDataFrame = geopandasDataFrame\n return geopandasDataFrame.drop_duplicates()\n\n def generate_random_points(self):\n xLongitude = np.random.uniform(self.xmin, self.xmax, 1)\n yLatitude = np.random.uniform(self.ymin, self.ymax, 1)\n return xLongitude, yLatitude\n\n @staticmethod\n def geodataframe_is_empty(geoDataFrame):\n return len(geoDataFrame) == 0\n\n def create_geodataframe_from_x_y_and_add_crs(self,x,y):\n gdf = gpd.GeoSeries(gpd.points_from_xy(x, y))\n gdf.crs = self.crs\n return gdf\n\n def buffer_point(self,geoSeries):\n gdfBuffer = geoSeries.buffer(self.width)\n return gdfBuffer\n\n @staticmethod\n def points_are_inside_buffer(geoSeries, buffer):\n return len(geoSeries[geoSeries.within(buffer)]) > 0\n\n def generate_random_tiles_without_overlap(self, numberOfPoints, save=False):\n points = gpd.GeoSeries()\n while len(points) != numberOfPoints:\n x, y = self.generate_random_points()\n if self.geodataframe_is_empty(points):\n points = self.create_geodataframe_from_x_y_and_add_crs(x,y)\n else:\n newPoint = self.create_geodataframe_from_x_y_and_add_crs(x,y)\n pointBuffer = self.buffer_point(newPoint)\n if self.points_are_inside_buffer(points, pointBuffer):\n pass\n else:\n points = pd.concat([points, newPoint])\n\n bufferSize = (self.xSize / 2 - 0.5) * self.xPixelSize\n buffer = points.buffer(bufferSize, cap_style=3)\n if save:\n create_new_folder_if_it_doesnt_exist(self.savePath)\n buffer.to_file(f\"{self.savePath}/{self.xSize}_tiles_from_random_points_without_overlap.geojson\",\n driver=\"GeoJSON\")\n print(\"Data Saved!\")\n\n @staticmethod\n def __numberOfPointIsInsidePolygon(geoSeries, polygon):\n numberofPointsInsidePolygon = len(geoSeries[geoSeries.within(polygon)])\n return numberofPointsInsidePolygon\n\n @staticmethod\n def __generate_n_points_within_bounds(numberOfPoints, xMax, xMin, yMax, yMin):\n numberOfPoints = numberOfPoints\n xLongitute = np.random.uniform(xMin, xMax, numberOfPoints)\n yLatitude = np.random.uniform(yMin, yMax, numberOfPoints)\n return xLongitute, yLatitude\n\n def generate_points_inside_a_polygon(self,shapefilePath, numberOfPoints):\n \"\"\"\n Function to generate n number of points inside a polygon.\n\n :param numberOfPoints: (int) - Number of points that will be created inside the polygon.\n :return: (geopandas.GeoSeries) - Geoseries with the requested number of points inside each polygon.\n \"\"\"\n # Empty geoseries to concatenate the results\n classification = []\n samplingPointsInsidePolygon = gpd.GeoSeries()\n # open the data\n try:\n shapefiles = gpd.read_file(shapefilePath)\n except:\n shapefiles = shapefilePath\n # loop over each polygon and generate n points inside\n for i, polygon in enumerate(shapefiles[\"geometry\"]):\n classification.append(shapefiles[\"classification\"][i])\n # get the bounds of the polygon\n xMin, yMin, xMax, yMax = polygon.bounds\n # generate numberOfPoints inside a polygon\n xLongitude, yLatitude = self.__generate_n_points_within_bounds(numberOfPoints, xMax, xMin, yMax, yMin)\n # Create a geoseries from x,y\n gdf_points = gpd.GeoSeries(gpd.points_from_xy(xLongitude, yLatitude))\n # Evaluate if number of points inside a Polygon is greater than the desired number of Points (may improve)\n while self.__numberOfPointIsInsidePolygon(gdf_points, polygon) < numberOfPoints:\n # Generate new points\n xLongitude, yLatitude = self.__generate_n_points_within_bounds(numberOfPoints, xMax, xMin, yMax, yMin)\n # geoseries from x,y\n gdf_points = gpd.GeoSeries(gpd.points_from_xy(xLongitude, yLatitude))\n # subset the desired number of points\n gdf_points = gdf_points[gdf_points.within(polygon)][0:numberOfPoints + 1]\n # add the cordnate reference system\n gdf_points.crs = shapefiles.crs\n # concatenate the results)\n samplingPointsInsidePolygon = pd.concat([samplingPointsInsidePolygon, gdf_points])\n samplingPointsInsidePolygon = gpd.GeoDataFrame(geometry=samplingPointsInsidePolygon)\n samplingPointsInsidePolygon[\"classification\"] = classification\n\n return samplingPointsInsidePolygon\n\n def generate_tiles_from_classified_points(self, shapefilePath, numberOfPoints=None, centroid=False, sizes=[32, 64, 128]):\n gdf = self.__classify_polygons_area(shapefilePath)\n gdf = gdf.reset_index()\n if centroid:\n samplingPoints = gpd.GeoDataFrame(geometry=gdf.centroid)\n samplingPoints[\"classification\"] = gdf[\"classification\"]\n\n else:\n samplingPoints = self.generate_points_inside_a_polygon(gdf, numberOfPoints)\n samplingPoints32 = samplingPoints[samplingPoints[\"classification\"] == 0]\n samplingPoints64 = samplingPoints[samplingPoints[\"classification\"] == 1]\n samplingPoints128 = samplingPoints[samplingPoints[\"classification\"] == 2]\n tiles32 = self.generate_tiles_from_points(samplingPoints32, size=sizes[0])\n tiles64 = self.generate_tiles_from_points(samplingPoints64, size=sizes[1])\n tiles128 = self.generate_tiles_from_points(samplingPoints128, size=sizes[2])\n\n return tiles32, tiles64, tiles128\n\n @ staticmethod\n def __classify_polygons_area(shapefilePath):\n def classify_areas(row):\n if row[\"area\"] < 2700:\n return 0\n elif (row[\"area\"] > 2700) & (row[\"area\"] < 6000):\n return 1\n else:\n return 2\n\n gdf = gpd.read_file(shapefilePath)\n gdf[\"area\"] = gdf.area\n gdf = gdf[gdf[\"area\"] != 0]\n gdf[\"classification\"] = gdf.apply(lambda row: classify_areas(row), axis=1)\n return gdf\n\n\n\nclass BinaryMasks:\n def __init__(self, imageRaster: Raster, shapefile: Shapefile, savePath):\n self.savePath = savePath\n self.shapefile = shapefile\n self.imageRaster = imageRaster\n\n def generate_binary_mask(self, save=False):\n # load raster\n image = self.imageRaster.open_raster()\n meta = self.imageRaster.get_metadata()\n saveName = self.imageRaster.rasterPath.split(\"/\")[-1].split(\".\")[0]\n # load shapefile\n labelShapefile = self.shapefile.open_shapefile()\n # Verify if the crs are the same\n\n if self.check_same_crs(imageCrs=meta[\"crs\"], shapeCrs=labelShapefile.crs):\n polygonsToRasterize = self.get_polygons_to_rasterize(meta, labelShapefile)\n outputImageSize = (meta['height'], meta['width'])\n mask = rasterize(shapes=polygonsToRasterize,\n out_shape=outputImageSize, all_touched=False)\n mask = mask.astype(\"uint16\")\n\n if save:\n self.save_raster(mask, meta, self.savePath, saveName)\n return saveName\n\n else:\n return mask, saveName\n\n @staticmethod\n def check_same_crs(imageCrs, shapeCrs):\n if imageCrs != shapeCrs:\n print(f\" Raster CRS : {imageCrs} Vetor CRS : {shapeCrs}.\\n Convert to the same CRS!\")\n return False\n else:\n return True\n\n @staticmethod\n def get_polygons_to_rasterize(meta, shapefiles):\n def poly_from_utm(polygon, transform):\n poly_pts = []\n\n poly = cascaded_union(polygon)\n for i in np.array(poly.exterior.coords):\n poly_pts.append(~transform * tuple(i)[0:2])\n\n new_poly = Polygon(poly_pts)\n return new_poly\n\n poly_shp = []\n for num, row in shapefiles.iterrows():\n if row['geometry'].geom_type == 'Polygon':\n poly = poly_from_utm(row['geometry'], meta['transform'])\n poly_shp.append(poly)\n else:\n for p in row['geometry']:\n poly = poly_from_utm(p, meta['transform'])\n poly_shp.append(poly)\n return poly_shp\n\n @staticmethod\n def save_raster(maskRaster, meta, savePath, saveName):\n # Salvar\n mask = maskRaster.astype(\"uint16\")\n bin_mask_meta = meta.copy()\n bin_mask_meta.update({'count': 1,\n \"dtype\": mask.dtype})\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n with rio.open(f\"{savePath}/{saveName}_mask.tif\", 'w', **bin_mask_meta) as dst:\n dst.write(mask * 255, 1)\n\n\nclass PatchImage:\n def __init__(self, imagePath, savePath,samplingShapefile: Shapefile):\n self.imagePath = imagePath\n self.savePath = savePath\n self.samplingShapefile = samplingShapefile\n\n def patch_images(self, size, save=False):\n imagesWithCorrectDimension = []\n wrongDimensionImages = []\n # open shapefile\n shapefile = self.samplingShapefile.open_shapefile()[\"geometry\"]\n\n print(f\"There are {len(shapefile)} polygons to patch the image.\")\n\n with rio.open(self.imagePath) as src:\n for i in range(len(shapefile)):\n try:\n out_image, out_transform = mask(src, [shapefile[i]], crop=True, filled=False)\n out_meta = src.meta\n if out_image.shape[1] == size and out_image.shape[2] == size:\n imagesWithCorrectDimension.append(reshape_as_image(out_image))\n if save:\n out_meta.update({\"driver\": \"GTiff\", \"height\": out_image.shape[1], \"width\": out_image.shape[2],\n \"transform\": out_transform})\n create_new_folder_if_it_doesnt_exist(self.savePath)\n with rio.open(f\"{self.savePath}/tiles_{str(i)}.tif\", \"w\", **out_meta) as dest:\n dest.write(out_image)\n else:\n wrongDimensionImages.append(i)\n except ValueError:\n pass\n print(f\"Number of shapefiles with wrong patch dimensions = {len(wrongDimensionImages)}\")\n print(f\"Number of shapefiles with correct patch dimensions = {len(imagesWithCorrectDimension)}\")\n\n if not save:\n return np.array(imagesWithCorrectDimension, dtype=\"float32\")\n\n\n\n# Raster(\"../data/5_bands/test/test_area_1.tif\").stack_new_band(\"../data/5_bands_elevation/test/test_area_1_terrain.tif\", \"../data/5_bands_elevation/test\", \"test_area_1\")\n# Raster(\"../data/5_bands/test/test_area_2.tif\").stack_new_band(\"../data/5_bands_elevation/test/test_area_2_terrain.tif\", \"../data/5_bands_elevation/test\", \"test_area_2\")\n# Raster(\"../data/5_bands/test/test_area_3.tif\").stack_new_band(\"../data/5_bands_elevation/test/test_area_3_terrain.tif\", \"../data/5_bands_elevation/test\", \"test_area_3\")\n\n# Raster(\"../data/5_bands/train/tile_0.tif\").calculate_ndvi(\"../data/5_bands_ndvi/train\",\"tile_0\")\n# Raster(\"../data/5_bands/test/test_area_1.tif\").calculate_ndvi(\"../data/5_bands_ndvi/test\",\"test_area_1\")\n# Raster(\"../data/5_bands/test/test_area_2.tif\").calculate_ndvi(\"../data/5_bands_ndvi/test\",\"test_area_2\")\n# Raster(\"../data/5_bands/test/test_area_3.tif\").calculate_ndvi(\"../data/5_bands_ndvi/test\",\"test_area_3\")"
] | [
[
"pandas.concat",
"numpy.expand_dims",
"numpy.nditer",
"numpy.linspace",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.array",
"numpy.meshgrid",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
1uc/morinth | [
"634769dc0925932e2c7ab7ff6fe5f978862bb50b"
] | [
"test/time_integration_test.py"
] | [
"# SPDX-License-Identifier: MIT\n# Copyright (c) 2021 ETH Zurich, Luc Grosheintz-Laval\n\nimport time\nimport numpy as np\n\nfrom morinth.burgers import Burgers\nfrom morinth.rusanov import Rusanov\nfrom morinth.grid import Grid\nfrom morinth.boundary_conditions import Periodic\nfrom morinth.time_integration import BackwardEuler, BDF2, DIRKa23, DIRKa34\nfrom morinth.runge_kutta import ForwardEuler, SSP2, SSP3, Fehlberg\nfrom morinth.time_loop import TimeLoop\nfrom morinth.time_keeper import FixedDuration, PlotNever, FixedSteps\nfrom morinth.math_tools import convergence_rate\n\n\nclass MockROC(object):\n def __init__(self, dt):\n self._dt = dt\n\n def __call__(self, u, t):\n return u\n\n def pick_time_step(self, u):\n return self._dt\n\ndef with_mask(Solver, mask, cfl_number):\n return lambda bc, mock_roc: Solver(bc, mock_roc, mask, cfl_number)\n\ndef test_mock_ode():\n bc = lambda x, t: None\n plotting_steps = PlotNever()\n mask = np.array([True])\n\n solvers = [ ForwardEuler,\n with_mask(BackwardEuler, mask, 3.0),\n SSP2,\n SSP3,\n Fehlberg\n ]\n\n rates = [ 1.0, 1.0, 2.0, 3.0, 5.0 ]\n\n assert len(solvers) == len(rates)\n\n T = 1.0\n all_resolutions = [10, 20, 40]\n error = np.empty(len(all_resolutions))\n\n for Solver, expected_rate in zip(solvers, rates):\n for k, res in enumerate(all_resolutions):\n mock_roc = MockROC(T/res)\n single_step = Solver(bc, mock_roc)\n time_loop = TimeLoop(single_step, lambda x: None, plotting_steps)\n\n u0 = np.array([1.0]).reshape((1, 1, 1))\n uT = time_loop(u0, FixedDuration(T))\n\n u_ref = u0*np.exp(T)\n # u_ref = u0 + np.array([T])\n error[k] = np.abs(uT - u_ref)\n\n observed_rate = np.abs(convergence_rate(error, np.array(all_resolutions)))\n assert np.all(observed_rate - expected_rate > -0.1), str(single_step)\n\nif __name__ == '__main__':\n bc = lambda x, t: None\n plotting_steps = PlotNever()\n\n solvers = [ ForwardEuler,\n SSP2,\n SSP3,\n Fehlberg\n ]\n\n n_steps, dt = 100000, 1e-6\n\n for Solver in solvers:\n single_step = Solver(bc, MockROC(dt))\n time_loop = TimeLoop(single_step, lambda x: None, plotting_steps)\n\n u0 = np.random.random((1000, 1, 1))\n\n t0 = time.perf_counter()\n uT = time_loop(u0, FixedSteps(n_steps))\n t1 = time.perf_counter()\n\n print(\"{:s} : {:.3f} s\".format(Solver.__name__, t1 - t0))\n"
] | [
[
"numpy.random.random",
"numpy.abs",
"numpy.all",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
changqi1/EasyRec | [
"f1e850491ad9eaed9b16543d99f57cc0bcb2923e"
] | [
"easy_rec/python/test/dh_local_run.py"
] | [
"import argparse\nimport logging\nimport os\nimport shutil\nimport sys\n\nimport tensorflow as tf\n\nfrom easy_rec.python.test.dh_test_util import datahub_test_util\nfrom easy_rec.python.test.odps_command import OdpsCommand\nfrom easy_rec.python.test.odps_test_prepare import prepare\nfrom easy_rec.python.test.odps_test_util import OdpsOSSConfig\nfrom easy_rec.python.test.odps_test_util import delete_oss_path\nfrom easy_rec.python.test.odps_test_util import get_oss_bucket\nfrom easy_rec.python.utils import test_utils\n\nlogging.basicConfig(\n level=logging.INFO, format='[%(asctime)s][%(levelname)s] %(message)s')\n\nodps_oss_config = OdpsOSSConfig(script_path='./samples/dh_script')\n\n\nclass TestPipelineOnEmr(tf.test.TestCase):\n \"\"\"Train eval test on emr.\"\"\"\n\n def setUp(self):\n logging.info('Testing %s.%s' % (type(self).__name__, self._testMethodName))\n self._success = True\n self._test_dir = test_utils.get_tmp_dir()\n logging.info('test datahub local dir: %s' % self._test_dir)\n\n def tearDown(self):\n if self._success:\n shutil.rmtree(self._test_dir)\n\n def test_datahub_train_eval(self):\n end = ['deep_fm/drop_table.sql']\n odps_cmd = OdpsCommand(odps_oss_config)\n\n self._success = test_utils.test_datahub_train_eval(\n '%s/configs/deepfm.config' % odps_oss_config.temp_dir, self._test_dir)\n odps_cmd.run_list(end)\n self.assertTrue(self._success)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--odps_config', type=str, default=None, help='odps config path')\n parser.add_argument(\n '--oss_config', type=str, default=None, help='ossutilconfig path')\n parser.add_argument(\n '--datahub_config', type=str, default=None, help='datahub_config')\n parser.add_argument(\n '--bucket_name', type=str, default=None, help='test oss bucket name')\n parser.add_argument('--arn', type=str, default=None, help='oss rolearn')\n parser.add_argument(\n '--odpscmd', type=str, default='odpscmd', help='odpscmd path')\n parser.add_argument(\n '--algo_project', type=str, default=None, help='algo project name')\n parser.add_argument(\n '--algo_res_project',\n type=str,\n default=None,\n help='algo resource project name')\n parser.add_argument(\n '--algo_version', type=str, default=None, help='algo version')\n args, unknown_args = parser.parse_known_args()\n\n sys.argv = [sys.argv[0]]\n for unk_arg in unknown_args:\n sys.argv.append(unk_arg)\n\n if args.odps_config:\n odps_oss_config.load_odps_config(args.odps_config)\n os.environ['ODPS_CONFIG_FILE_PATH'] = args.odps_config\n if args.datahub_config:\n odps_oss_config.load_dh_config(args.datahub_config)\n if args.oss_config:\n odps_oss_config.load_oss_config(args.oss_config)\n if args.odpscmd:\n odps_oss_config.odpscmd_path = args.odpscmd\n if args.algo_project:\n odps_oss_config.algo_project = args.algo_project\n if args.algo_res_project:\n odps_oss_config.algo_res_project = args.algo_res_project\n if args.algo_version:\n odps_oss_config.algo_version = args.algo_version\n if args.arn:\n odps_oss_config.arn = args.arn\n if args.bucket_name:\n odps_oss_config.bucket_name = args.bucket_name\n print(args)\n prepare(odps_oss_config)\n start = [\n 'deep_fm/create_external_deepfm_table.sql',\n 'deep_fm/create_inner_deepfm_table.sql'\n ]\n end = ['deep_fm/drop_table.sql']\n odps_cmd = OdpsCommand(odps_oss_config)\n odps_cmd.run_list(start)\n odps_oss_config._subscription()\n tf.test.main()\n # delete oss path\n bucket = get_oss_bucket(odps_oss_config.oss_key, odps_oss_config.oss_secret,\n odps_oss_config.endpoint, odps_oss_config.bucket_name)\n delete_oss_path(bucket, odps_oss_config.exp_dir, odps_oss_config.bucket_name)\n # delete tmp\n shutil.rmtree(odps_oss_config.temp_dir)\n"
] | [
[
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paulokuong/fourthbrain_capstone | [
"db4f76bfc5fd7b1ecc355282f37a87a06f62aa47"
] | [
"presentation/groupby_user_conversion.py"
] | [
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom datetime import datetime\nimport os\nimport time\n\nfrom sklearn.inspection import permutation_importance\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.preprocessing import StandardScaler\nfrom tpot.builtins import StackingEstimator\nfrom tpot.export_utils import set_param_recursive\nfrom sklearn.metrics import classification_report\nfrom xgboost import XGBClassifier\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Flatten\nfrom keras.layers import Dropout\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom matplotlib import pyplot\nfrom keras.layers import Bidirectional\nfrom keras.layers import TimeDistributed\nfrom tensorflow.keras.layers import GRU, Embedding, SimpleRNN, Activation\nimport tensorflow as tf\n\n\nclass FeatureSelection(object):\n @staticmethod\n def by_coorelation(x, threshold=0.8, debug=False):\n \"\"\"Feature selection by eliminating highly correlated features.\n\n Args:\n x (pandas dataframe): features.\n threshold (float[optional]): score above which feature is highly correlated.\n debug (boolean[optional]): True to show debug messages.\n\n Return:\n pandas dataframe: dataframe with selected features.\n \"\"\"\n cor = x.corr()\n keep_columns = np.full((cor.shape[0],), True, dtype=bool)\n for i in range(cor.shape[0]):\n for j in range(i + 1, cor.shape[0]):\n if np.abs(cor.iloc[i, j]) >= threshold:\n if keep_columns[j]:\n keep_columns[j] = False\n if debug:\n print((\n f'Feature \"{x.columns[j]}\" is highly '\n f'related to \"{x.columns[i]}\". '\n f'Remove \"{x.columns[j]}\"'))\n if debug:\n print(len(np.full((cor.shape[0],), True, dtype=bool)))\n selected_columns = x.columns[keep_columns]\n return x[selected_columns]\n\n @staticmethod\n def by_permutation_importance(\n x, y, threshold=0.01, n_repeats=10, random_state=42, n_jobs=2):\n \"\"\"Feature selection by permutation importance.\n\n Args:\n x (pandas dataframe): features.\n threshold (float[optional]): score above which the feature is\n considered as important.\n\n \"\"\"\n feature_names = [f'feature {i}' for i in range(x.shape[1])]\n forest = RandomForestClassifier(random_state=random_state)\n forest.fit(x, y)\n start_time = time.time()\n result = permutation_importance(\n forest, x, y, n_repeats=n_repeats, random_state=random_state,\n n_jobs=n_jobs)\n elapsed_time = time.time() - start_time\n forest_importances = pd.Series(\n result.importances_mean, index=feature_names)\n importances = pd.DataFrame(forest_importances, columns=['score'])\n importances = importances.sort_values(by='score', ascending=False)\n importances.loc[:, 'feature'] = [\n filtered_x.columns[int(i.replace('feature ', ''))]\n for i in importances.index]\n importances[importances['score'] > threshold]\n return x[list(\n importances[importances['score'] > threshold]['feature'].values)]\n\n\nclass GroupBy(object):\n\n def __init__(self, raw_data_path):\n if not os.path.exists(raw_data_path):\n raise Exception(f'Path {raw_data_path} does not exist.')\n\n self.raw_data = pd.read_json(raw_data_path, lines=True)\n\n def preprocessing_for_bin_class(self):\n \"\"\"Preprcess GroupBy data for binary classification training.\n\n Args:\n raw_data_path (str): local path to raw json data.\n Returns:\n dict: dictionary of training\n \"\"\"\n\n df = self.raw_data\n transformed_df = df[\n ['customerId', 'customerVisitorId', 'customerSessionId',\n 'sessionStartTime', 'sessionEndTime', 'customerSessionNumber']]\n transformed_df.loc[:, 'deviceCategory'] = df['trafficSource'].transform(\n lambda x: x.get('deviceCategory', ''))\n transformed_df.loc[:, 'browser'] = df['trafficSource'].transform(\n lambda x: x.get('browser', ''))\n transformed_df.loc[:, 'os'] = df['trafficSource'].transform(\n lambda x: x.get('os', ''))\n transformed_df.loc[:, 'userAgent'] = df['trafficSource'].transform(\n lambda x: x.get('userAgent', ''))\n transformed_df.loc[:, 'language'] = df['trafficSource'].transform(\n lambda x: x.get('language'))\n transformed_df.loc[:, 'source'] = df['trafficSource'].transform(\n lambda x: x.get('source'))\n transformed_df.loc[:, 'has_campaign'] = df['trafficSource'].transform(\n lambda x: 1 if x.get('campaign') is not None else 0)\n transformed_df.loc[:, 'sessionStartTime'] = df['sessionStartTime'].transform(\n lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f %Z'))\n transformed_df.loc[:, 'sessionEndTime'] = df['sessionEndTime'].transform(\n lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f %Z'))\n transformed_df.loc[:, 'sessionDuration'] = df[['sessionStartTime', 'sessionEndTime']].apply(\n lambda x: (datetime.strptime(x['sessionEndTime'], '%Y-%m-%d %H:%M:%S.%f %Z') -\n datetime.strptime(x['sessionStartTime'], '%Y-%m-%d %H:%M:%S.%f %Z')).seconds, axis=1)\n transformed_df.loc[:, 'hourOfDay'] = df['sessionStartTime'].transform(\n lambda x: int(datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f %Z').strftime(\"%H\")))\n total_df = []\n for i in range(len(df['totals'])):\n new_dict = {k: float(v) if 'total' in k or 'unique' in k else v\n for k, v in df.iloc[i]['totals'].items()}\n total_df.append(new_dict)\n cleaned_df = pd.concat(\n [transformed_df, pd.DataFrame(total_df)], axis=1)\n cleaned_df = cleaned_df.fillna(0)\n\n all_browsers = sorted(pd.unique(cleaned_df['browser']))\n all_os = sorted(pd.unique(cleaned_df['os']))\n all_deviceCategory = sorted(pd.unique(cleaned_df['deviceCategory']))\n all_language = sorted(pd.unique(cleaned_df['language'].astype('str')))\n all_source = sorted(pd.unique(cleaned_df['source'].astype('str')))\n cleaned_df.loc[:, 'browser'] = cleaned_df['browser'].transform(\n lambda x: all_browsers.index(x))\n cleaned_df.loc[:, 'os'] = cleaned_df['os'].transform(\n lambda x: all_os.index(x))\n cleaned_df.loc[:, 'language'] = cleaned_df['language'].transform(\n lambda x: all_language.index(str(x)))\n cleaned_df.loc[:, 'source'] = cleaned_df['source'].transform(\n lambda x: all_source.index(str(x)))\n cleaned_df.loc[:, 'deviceCategory'] = cleaned_df['deviceCategory'].transform(\n lambda x: all_deviceCategory.index(x))\n cleaned_df.loc[:, 'bounce'] = cleaned_df['bounce'].transform(\n lambda x: int(x))\n cleaned_df.loc[:, 'events'] = cleaned_df['events'].transform(\n lambda x: int(x))\n cleaned_df.loc[:, 'timeOnSiteSeconds'] = cleaned_df['timeOnSite'].transform(\n lambda x: datetime.strptime(x, '%H:%M:%S').second + 60 * datetime.strptime(\n x, '%H:%M:%S').minute + 3600 * datetime.strptime(x, '%H:%M:%S').hour)\n cleaned_df.loc[:, 'newSession'] = cleaned_df['newSession'].transform(\n lambda x: 1 if x is True else 0)\n cleaned_df.loc[:, 'has_purchase'] = cleaned_df['totalOrders'].transform(\n lambda x: 1 if int(x) > 0 else 0)\n cleaned_df.loc[:, 'productPriceMean'] = df['hits'].apply(\n lambda x: np.nan_to_num(np.mean([np.mean([j.get('price') or 0\n for j in i['product']]) for i in x])))\n cleaned_df = cleaned_df.drop(\n columns=[\n 'sessionStartTime', 'sessionEndTime', 'userAgent', 'customerId',\n 'customerVisitorId', 'totalOrders', 'timeOnSite',\n 'queriesSearched', 'customerSessionId', 'totalOrderQty',\n 'uniqueOrders', 'totalOrderRevenue'])\n # sorted(cleaned_df.columns)\n x = cleaned_df.loc[:, list(\n set(cleaned_df.columns) - set('has_purchase'))]\n del x['has_purchase']\n y = cleaned_df.loc[:, ['has_purchase']]\n return {\"features\": x, \"label\": y}\n\n def preprocessing_for_sequence_model(self, num_of_events=30):\n df = self.raw_data\n oo = df[['hits']].apply(\n lambda x: [\n list(set([j.get('eventType').get('category')\n for j in hit])) for hit in x])['hits']\n # Get event type map\n event_type_map = {y: index + 1 for index, y in enumerate(\n [i for i in pd.unique(oo.explode()) if type(i) == str])}\n # Get sequences and sort the events by hitSequence which shows the order\n # of each event. Apply event type map after sorting.\n sequence_df = df.copy(deep=True)\n sequence_df.loc[:, 'sequence'] = sequence_df[['hits']].apply(\n lambda x: [\n [event_type_map[j[0]]\n for j in sorted(\n [(j.get('eventType').get('category'),\n j.get('hitSequence')) for j in hit])]\n for hit in x])['hits']\n # Find the target from the raw dataset.\n total_df = []\n for i in range(len(df['totals'])):\n new_dict = {k: float(v) if 'total' in k or 'unique' in k else v\n for k, v in df.iloc[i]['totals'].items()}\n total_df.append(new_dict)\n sequence_df = pd.concat([sequence_df, pd.DataFrame(total_df)], axis=1)\n sequence_df = sequence_df.fillna(0)\n sequence_df.loc[:, 'has_purchase'] = sequence_df['totalOrders'].transform(\n lambda x: 1 if int(x) > 0 else 0)\n\n final_sequence_df = sequence_df[\n ['customerSessionId', 'sequence', 'has_purchase']\n ][sequence_df['sequence'].map(len) <= num_of_events]\n event_sequence = final_sequence_df['sequence'].to_list()\n # Pad 0 to make all sequences to have the same size.\n x = pad_sequences(event_sequence)\n y = np.array(pd.get_dummies(\n final_sequence_df['has_purchase'], prefix='Purchase'))\n\n @staticmethod\n def train_xgb_bin_class(\n features, label, test_size=0.33, random_state=42, debug=False):\n \"\"\"Train binary classification using XGBoost algorithm\n\n Args:\n preprocessed_data (pandas dataframe): preprocessed data.\n test_size (float): test data size in percentage.\n random_state (int): random state.\n debug (boolean): True for print out debug messages.\n \"\"\"\n # Select features\n new_x = FeatureSelection.by_coorelation(features, debug=debug)\n new_x = FeatureSelection.by_permutation_importance(new_x)\n # Split dataset\n x_train, x_test, y_train, y_test = train_test_split(\n new_x.values, label, test_size=test_size,\n random_state=random_state)\n # Train model\n exported_pipeline = XGBClassifier(\n learning_rate=0.1, max_depth=4, min_child_weight=8,\n n_estimators=100, n_jobs=1, subsample=0.9500000000000001,\n verbosity=0, random_state=random_state)\n exported_pipeline.fit(x_train, list(y_train.values.ravel()))\n results = exported_pipeline.predict(x_test)\n pd.DataFrame(classification_report(y_test, results, output_dict=True))\n\n @staticmethod\n def train_lstm(\n features, label, op=30, neurons=40, epochs=150, batch_size=1000,\n validation_split=0.2):\n x_train, x_test, y_train, y_test = train_test_split(\n np.array(features), label, test_size=0.3)\n x_train = x_train.reshape((x_train.shape[0], 1, x_train.shape[1]))\n x_test = x_test.reshape((x_test.shape[0], 1, x_test.shape[1]))\n tf.keras.backend.clear_session()\n model = Sequential()\n model.add(Bidirectional(\n LSTM(neurons, return_sequences=True), input_shape=(1, op)))\n model.add(Bidirectional(LSTM(2 * neurons)))\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n model.compile(\n optimizer=tf.optimizers.Adam(learning_rate=0.0003),\n loss='binary_crossentropy',\n metrics=[tf.keras.metrics.Recall()])\n return lstm_model.fit(\n x_train, y_train, epochs=epochs, batch_size=batch_size,\n validation_split=validation_split)\n"
] | [
[
"pandas.Series",
"sklearn.ensemble.RandomForestClassifier",
"numpy.abs",
"sklearn.inspection.permutation_importance",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.full",
"tensorflow.keras.backend.clear_session",
"pandas.unique",
"pandas.read_json",
"tensorflow.optimizers.Adam",
"tensorflow.keras.metrics.Recall",
"numpy.array",
"sklearn.metrics.classification_report",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
esayyari/q2-feature-engineering | [
"d3419dabd4722818aafd7b13da957222ee4d3cf8"
] | [
"q2_feature_engineering/_smote/ML_combine.py"
] | [
"from imblearn import combine\nfrom qiime2.plugin import (Str, Int)\nimport biom\nfrom q2_feature_engineering._tada.logger import LOG\nfrom qiime2 import NumericMetadataColumn\nimport numpy as np\nimport pandas as pd\nimport qiime2\n\ndispatcher = {'SMOTEENN': combine.SMOTEENN, 'SMOTETomek': combine.SMOTETomek}\n\n\ndef _sort_metada(targets_metadata, biom_table):\n targets = targets_metadata.to_dataframe()\n\n # filter features and targest so samples match\n index = set(targets.index)\n index = [ix for ix in biom_table.ids('sample') if ix in index]\n targets = targets.loc[index]\n feature_data = biom_table.filter(index, inplace=False)\n return targets, feature_data\n\n\ndef _read_inputs(biom_table: biom.Table, meta_data: NumericMetadataColumn = None):\n if meta_data:\n meta, biom_table = _sort_metada(meta_data, biom_table)\n y = meta.iloc[:, 0]\n samples = meta.index\n else:\n samples = biom_table.ids('sample')\n y = pd.DataFrame(data=np.asarray(np.ones((len(samples), 1))).ravel(), index=samples)\n\n _table = biom_table.sort_order(axis='sample', order=samples)\n\n if np.sum(samples != _table.ids('sample')) > 0:\n raise ValueError(\"The samples IDs in meta data and biom table are not the same! The difference is:\",\n set(samples) - set(_table.ids('sample')), \"Please double check.\")\n\n return _table, y\n\n\ndef synthetic_over_sampling(table: biom.Table, metadata: NumericMetadataColumn,\n concatenate_meta: Str, method: Str = 'SMOTETomek',\n k_neighbors: Int = 5, m_neighbors: Int = 10, n_jobs: Int = 1,\n log_fp: Str = None, sampling_strategy: Str = 'auto',\n random_state: Int = 42, output_log_fp: Str = None) -> biom.Table:\n if log_fp:\n logger_ins = LOG(log_fp=log_fp).get_logger('synthetic_sampling_combination')\n logger_ins.info(\"The parameters used for oversampling are\")\n logger_ins.info('k_neighbors:', k_neighbors)\n logger_ins.info('m_neighbors:', m_neighbors)\n logger_ins.info('Sampling method:', method)\n logger_ins.info('Output log file path:', log_fp)\n logger_ins.info('sampling_strategy:', sampling_strategy)\n logger_ins.info('n_jobs:', n_jobs)\n logger_ins.info('random_state:', random_state)\n\n cls = dispatcher[method]\n if method != 'RandomOverSampler':\n table.norm(inplace=True)\n if log_fp:\n logger_ins.info(\"The input table is normalized before using it for oversampling\")\n sorted_table, sorted_metadata = _read_inputs(table, meta_data=metadata)\n matrix_data = sorted_table.matrix_data.transpose()\n if method not in dispatcher:\n raise ValueError(\n 'The optional methods for over sampling are', dispatcher.keys(), \"instead it received\", method\n )\n if method == 'ADASYN':\n over_sampling_cls = cls(sampling_strategy=sampling_strategy,\n random_state=random_state, n_neighbors=k_neighbors, n_jobs=n_jobs)\n elif method == 'RandomOverSampler':\n over_sampling_cls = cls(sampling_strategy=sampling_strategy, random_state=random_state)\n else:\n over_sampling_cls = cls(sampling_strategy=sampling_strategy, m_neighbors=m_neighbors,\n random_state=random_state, n_jobs=n_jobs, k_neighbors=k_neighbors)\n X_resampled, y_resampled = over_sampling_cls.fit_resample(matrix_data, sorted_metadata)\n if np.sum(np.abs(X_resampled[:len(matrix_data), :] - matrix_data)) != 0 or \\\n np.sum(y_resampled[:len(matrix_data)] == metadata) != len(matrix_data):\n raise ValueError(\n \"Over sampling method changed the data! Please double check your biom table\"\n )\n else:\n if log_fp:\n logger_ins.info(\"The oversampling finished successfully!\")\n logger_ins.info(\"The first\", len(matrix_data), \"samples belong to the original training samples and the \"\n \"next\", len(X_resampled) - len(matrix_data),\n \"samples belong to the new ones\")\n logger_ins.info(\"Overall, the size of data is\", len(X_resampled))\n if method != 'RandomOverSampler':\n dummy_samples = np.asarray(list(sorted_table.ids('sample')) +\n [\"dummy_sample_\" + str(i) for i in range(len(X_resampled) - len(matrix_data))])\n else:\n dummy_samples = over_sampling_cls.sample_indices_\n\n oversampled_table = biom.Table(X_resampled, observation_ids=sorted_table.ids('observation'),\n sample_ids=dummy_samples)\n oversampled_metadata = pd.DataFrame(index=dummy_samples, data=y_resampled)\n oversampled_metadata.index.names = ['#SampleID']\n oversampled_metadata.columns = ['label']\n oversampled_meta = qiime2.Metadata(oversampled_metadata)\n oversampled_meta.save()\n\n return oversampled_table\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
snmatharu/Multi-Face-Deep-Learning | [
"f5b80f8f3462119139efe04b6995f54e650ac8e4"
] | [
"modelling.py"
] | [
"# train_data = np.load('train_data.npy') \r\n# test_data = np.load('test_data.npy') \r\n'''Creating the neural network using tensorflow'''\r\n# Importing the required libraries \r\n\r\n\r\nimport tflearn \r\nfrom tflearn.layers.conv import conv_2d, max_pool_2d \r\nfrom tflearn.layers.core import input_data, dropout, fully_connected \r\nfrom tflearn.layers.estimator import regression \r\nimport numpy as np \r\nfrom tqdm import tqdm \r\n\r\nIMG_SIZE = 224\r\nLR = 1e-3\r\n\r\ntrain_data = np.load('train_data.npy')\r\ntest_data = np.load('test_data.npy')\r\n\r\nMODEL_NAME = 'face.model'.format(LR, '6conv-basic') \r\n\r\ntflearn.init_graph()\r\n\r\nconvnet = input_data(shape =[None, IMG_SIZE, IMG_SIZE, 1], name ='input') \r\nconvnet = conv_2d(convnet, 32, 5, activation ='relu') \r\nconvnet = max_pool_2d(convnet, 5) \r\n \r\nconvnet = conv_2d(convnet, 64, 5, activation ='relu') \r\nconvnet = max_pool_2d(convnet, 5) \r\n \r\nconvnet = conv_2d(convnet, 128, 5, activation ='relu') \r\nconvnet = max_pool_2d(convnet, 5) \r\n \r\nconvnet = conv_2d(convnet, 64, 5, activation ='relu') \r\nconvnet = max_pool_2d(convnet, 5) \r\n \r\nconvnet = conv_2d(convnet, 32, 5, activation ='relu') \r\nconvnet = max_pool_2d(convnet, 5) \r\n \r\nconvnet = fully_connected(convnet, 1024, activation ='relu') \r\nconvnet = dropout(convnet, 0.8)\r\n \r\nconvnet = fully_connected(convnet, 2, activation ='softmax') \r\nconvnet = regression(convnet, optimizer ='rmsprop', learning_rate = LR, \r\n\tto_one_hot = True, n_classes = 2,\r\n\tloss ='categorical_crossentropy', name ='targets') \r\n \r\nmodel = tflearn.DNN(convnet, tensorboard_dir ='log') \r\n \r\n# Splitting the testing data and training data \r\ntrain = train_data\r\ntest = test_data\r\n\r\n'''Setting up the features and lables'''\r\n# X-Features & Y-Labels \r\n\r\nX = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 1) \r\nY = [i[1] for i in train] \r\ntest_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1) \r\ntest_y = [i[1] for i in test] \r\n \r\n'''Fitting the data into our model'''\r\n# epoch = 5 taken \r\nmodel.fit({'input': X}, {'targets': Y}, n_epoch = 3, \r\n validation_set =({'input': test_x}, {'targets': test_y}), \r\n snapshot_step = 500, show_metric = True, run_id = MODEL_NAME)\r\nmodel.save(MODEL_NAME) "
] | [
[
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jennifereldiaz/fly-tnbc | [
"7ba08be9839c57e110a1b039b0b2c7393db16521"
] | [
"mutations_gh.py"
] | [
"#nov 9 2015\n\nimport pandas as pd\nimport numpy as np\n\n#get list of drivers\n#mutsigCV output (from the online tool)\nmutsig = pd.read_csv('MutSigCV2015.sig_genes.txt',sep='\\t')\nmutsig = mutsig[mutsig.q < 0.1]\ncosmic = pd.read_excel('COSMIC_cancer_gene_census.xls',sheetname = 'List')\npan = pd.read_excel('Pan_Cancer.xlsx',sheetname='Sheet1')\npan = pan[pan.Type=='MUTATION']\nvogel = pd.read_excel('Vogelstein-cancer-genes.xlsx',sheetname='Table S2A',\n skiprows=1)\nciv = pd.read_csv('../nightly-GeneSummaries_CiVICdb_160329.tsv',sep='\\t',header=0)\ndrivers = pd.concat([mutsig.gene,cosmic.Symbol,pan['Altered Locus/Gene'],\n vogel['Gene Symbol'],civ.name,pd.Series(['TERT'])]).reset_index(drop=True)\n#this is WXS so TERT promoter mutations won't show up. Maybe check expression data\n#,'TRIO'\n\n#open maf\nmaf = pd.read_csv('MUTATIONS/Somatic_Mutations/WUSM__IlluminaGA_DNASeq/Level_2/genome.wustl.edu__Illumina_Genome_Analyzer_DNA_Sequencing_level2.maf',\n sep='\\t')\nmaf = maf[['Hugo_Symbol', 'Chrom','Ncbi_Build',\n 'Start_Position', 'End_Position', 'Strand', 'Variant_Classification',\n 'Variant_Type', 'Reference_Allele', 'Tumor_Seq_Allele1',\n 'Tumor_Seq_Allele2', 'Dbsnp_Rs', 'Dbsnp_Val_Status',\n 'Tumor_Sample_Barcode', 'Matched_Norm_Sample_Barcode',\n 'Match_Norm_Seq_Allele1', 'Match_Norm_Seq_Allele2',\n 'Tumor_Validation_Allele1', 'Tumor_Validation_Allele2',\n 'Match_Norm_Validation_Allele1', 'Match_Norm_Validation_Allele2',\n 'Mutation_Status']]\n\nmaf['TCGA_ID'] = maf.Tumor_Sample_Barcode.str.slice(stop=12)\n\n#remove silent mutations\nmaf = maf[maf.Variant_Classification != 'Silent']\n\n#select driver genes\ndmaf = maf[maf.Hugo_Symbol.isin(drivers)]\n\n#make a table\ndic = {'Missense_Mutation':2.0, 'Splice_Site':0.3, 'Frame_Shift_Del':0.1,\n 'In_Frame_Del':1.8, 'Nonsense_Mutation':0.0, 'In_Frame_Ins':1.9,\n 'Frame_Shift_Ins':0.2, 'RNA':1.6, 'Nonstop_Mutation':1.7}\ndmaf['Variant_Value'] = dmaf['Variant_Classification'].map(dic)\ntable = dmaf.pivot_table(index='TCGA_ID',columns='Hugo_Symbol',\n values='Variant_Value',\n aggfunc=lambda x: np.sum(x)+10*(len(x)-1))\ntable.fillna(1.0,inplace=True)\nmissed = pd.Series(maf.TCGA_ID.unique().tolist())\nmissed = missed[~missed.isin(table.index)]\nempty = pd.DataFrame(data=1.0,index=missed.tolist(),columns=table.columns)\ntable = pd.concat([table,empty])\ntable.to_csv('BRCA_somatic_muts_matrix.csv')\n"
] | [
[
"pandas.concat",
"pandas.read_excel",
"pandas.read_csv",
"pandas.Series",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
harupy/mlflow-example | [
"783d50b2a77293e602d5f7108f3bfe63af88e824"
] | [
"train.py"
] | [
"# The data set used in this example is from http://archive.ics.uci.edu/ml/datasets/Wine+Quality\n# P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.\n# Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.\n\nimport os\nimport warnings\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import ElasticNet\n\nimport mlflow\nimport mlflow.sklearn\n\n\ndef eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n np.random.seed(40)\n\n # Read the wine-quality csv file (make sure you're running this from the root of MLflow!)\n wine_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"wine-quality.csv\")\n data = pd.read_csv(wine_path)\n\n # Split the data into training and test sets. (0.75, 0.25) split.\n train, test = train_test_split(data)\n\n # The predicted column is \"quality\" which is a scalar from [3, 9]\n train_x = train.drop([\"quality\"], axis=1)\n test_x = test.drop([\"quality\"], axis=1)\n train_y = train[[\"quality\"]]\n test_y = test[[\"quality\"]]\n\n alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5\n l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5\n\n with mlflow.start_run():\n lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n lr.fit(train_x, train_y)\n\n predicted_qualities = lr.predict(test_x)\n\n (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n\n print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n print(\" RMSE: %s\" % rmse)\n print(\" MAE: %s\" % mae)\n print(\" R2: %s\" % r2)\n\n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"l1_ratio\", l1_ratio)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"r2\", r2)\n mlflow.log_metric(\"mae\", mae)\n \n print(\"x\" * 100)\n print(\"artifact uri:\", mlflow.get_artifact_uri())\n mlflow.sklearn.log_model(lr, \"model\")\n"
] | [
[
"pandas.read_csv",
"sklearn.metrics.r2_score",
"numpy.random.seed",
"sklearn.linear_model.ElasticNet",
"sklearn.metrics.mean_absolute_error",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gavinmcclelland/sarcasm-detection | [
"492bde0e4d1eec112fb96b2a13851d8778c3ac42"
] | [
"cnn_model_hyperparameter_tuning.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"10_CNN_Master_Final.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1SMRhwB_Xaf9EC31gZh9HpVpE2iW5LfYJ\n\"\"\"\nimport logging\nlogging.getLogger().setLevel(logging.CRITICAL)\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport os\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\nimport torch\n\ntorch.cuda.empty_cache()\nprint(torch.cuda.get_device_name(0))\n\nclass Identity(torch.nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n# Setting this to True uses a CNN archetecture with 2 fully connected layers.\n# If False there are 300 features (len(filter_sizes) * n_filters)\nDUAL_FC_LAYERS = True\n# The features are extracted from the output of the first layer (150 features per model)\nNUM_FEATURES = 150\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nclass CNN(nn.Module):\n def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes,\n output_dim, dropout, pad_idx):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n self.convs = nn.ModuleList([\n nn.Conv2d(in_channels=1,\n out_channels=n_filters, \n kernel_size=(fs, embedding_dim))\n for fs in filter_sizes\n ])\n self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)\n self.dropout = nn.Dropout(dropout)\n \n # self.fc = Identity()\n # self.dropout = Identity()\n\n def forward(self, text):\n # text = [batch size, sent len]\n embedded = self.embedding(text)\n\n # embedded = [batch size, sent len, emb dim]\n embedded = embedded.unsqueeze(1)\n\n # embedded = [batch size, 1, sent len, emb dim]\n conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]\n\n # conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]\n pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]\n\n cat = self.dropout(torch.cat(pooled, dim=1))\n\n return self.fc(cat)\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nclass CNN_2FC(nn.Module):\n def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes,\n output_dim, dropout, pad_idx):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n self.convs = nn.ModuleList([\n nn.Conv2d(in_channels=1,\n out_channels=n_filters, \n kernel_size=(fs, embedding_dim))\n for fs in filter_sizes\n ])\n self.fc1 = nn.Linear(len(filter_sizes) * n_filters, NUM_FEATURES)\n self.fc = nn.Linear(NUM_FEATURES, output_dim)\n self.dropout = nn.Dropout(dropout)\n \n # self.fc = Identity()\n # self.dropout = Identity()\n\n def forward(self, text):\n # text = [batch size, sent len]\n embedded = self.embedding(text)\n\n # embedded = [batch size, sent len, emb dim]\n embedded = embedded.unsqueeze(1)\n\n # embedded = [batch size, 1, sent len, emb dim]\n conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]\n\n # conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]\n pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]\n\n cat = self.dropout(torch.cat(pooled, dim=1))\n\n return self.fc(self.fc1(cat))\n\nclass SUPER_CNN(nn.Module):\n def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes,\n output_dim, dropout, pad_idx, use_base=False, num_features=300):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n self.convs = nn.ModuleList([\n nn.Conv2d(in_channels=1,\n out_channels=n_filters, \n kernel_size=(fs, embedding_dim))\n for fs in filter_sizes\n ])\n\n # self.sentiment_model = CNN(vocab_size, embedding_dim, n_filters, filter_sizes,\n # output_dim, dropout, pad_idx)\n # self.sentiment_model.load_state_dict(torch.load('CNN-sentiment.pt'))\n self.use_base = use_base\n \n if self.use_base:\n self.sarcasm_base_model = torch.load('CNN-sarcasm-base.pt')\n for param in self.sarcasm_base_model.parameters():\n param.requires_grad = False\n self.sarcasm_base_model.embedding = Identity()\n self.sarcasm_base_model.fc = Identity()\n self.sarcasm_base_model.dropout = Identity()\n \n\n self.sentiment_model = torch.load('CNN-sentiment.pt', map_location=torch.device('cpu'))\n for param in self.sentiment_model.parameters():\n param.requires_grad = False\n self.sentiment_model.embedding = Identity()\n self.sentiment_model.fc = Identity()\n self.sentiment_model.dropout = Identity()\n\n # self.emotion_model = CNN(vocab_size, embedding_dim, n_filters, filter_sizes,\n # 5, dropout, pad_idx)\n # self.emotion_model.load_state_dict(torch.load('CNN-emotion.pt'))\n\n self.emotion_model = torch.load('CNN-emotion.pt', map_location=torch.device('cpu'))\n for param in self.emotion_model.parameters():\n param.requires_grad = False\n self.emotion_model.embedding = Identity()\n self.emotion_model.fc = Identity()\n self.emotion_model.dropout = Identity()\n\n # self.formality_model = CNN(vocab_size, embedding_dim, n_filters, filter_sizes,\n # output_dim, dropout, pad_idx)\n # self.formality_model.load_state_dict(torch.load('CNN-formality-binary.pt'))\n\n self.formality_model = torch.load('CNN-formality-binary.pt', map_location=torch.device('cpu'))\n for param in self.formality_model.parameters():\n param.requires_grad = False\n self.formality_model.embedding = Identity()\n self.formality_model.fc = Identity()\n self.formality_model.dropout = Identity()\n\n # self.informativeness_model = CNN(vocab_size, embedding_dim, n_filters, filter_sizes,\n # output_dim, dropout, pad_idx)\n # self.informativeness_model.load_state_dict(torch.load('CNN-informativeness-binary.pt'))\n\n self.informativeness_model = torch.load('CNN-informativeness-binary.pt', map_location=torch.device('cpu'))\n for param in self.informativeness_model.parameters():\n param.requires_grad = False\n self.informativeness_model.embedding = Identity()\n self.informativeness_model.fc = Identity()\n self.informativeness_model.dropout = Identity()\n\n if DUAL_FC_LAYERS:\n self.fc1 = nn.Linear(len(filter_sizes)*n_filters, NUM_FEATURES)\n #self.fc = nn.Linear(5 * NUM_FEATURES, output_dim)\n self.fc = nn.Linear(5 * NUM_FEATURES, output_dim)\n else:\n #self.fc = nn.Linear(5 * len(filter_sizes)*n_filters, output_dim)\n self.fc = nn.Linear(3 * len(filter_sizes)*n_filters, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, text):\n # text = [batch size, sent len]\n embedded = self.embedding(text)\n\n # embedded = [batch size, sent len, emb dim]\n # embedded = embedded.unsqueeze(1)\n\n # embedded = [batch size, 1, sent len, emb dim]\n \n # print(\"TESTING\")\n o1 = self.sentiment_model(embedded)\n o2 = self.emotion_model(embedded)\n o3 = self.formality_model(embedded)\n o4 = self.informativeness_model(embedded)\n if self.use_base:\n o5 = self.sarcasm_base_model(embedded)\n cat = self.dropout(torch.cat((o1, o2, o3, o4, o5), dim=1))\n #cat = self.dropout(torch.cat((o2, o5), dim=1))\n #cat = self.dropout(o5)\n else:\n conved = [F.relu(conv(embedded.unsqueeze(1))).squeeze(3) for conv in self.convs]\n # conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]\n pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]\n pooled = torch.cat(pooled,dim=1)\n if DUAL_FC_LAYERS:\n pooled = self.fc1(pooled)\n cat = self.dropout(torch.cat((pooled, o1, o2, o3, o4), dim=1))\n #cat = self.dropout(pooled)\n # print(\"TESTING 2\")\n # print(pooled.type)\n # print(o1.type)\n # print(o2.type)\n \n # print(cat.shape)\n\n return self.fc(cat)\n\n\"\"\"## Sarcasm Model Definition\"\"\"\n\nfrom torchtext import data\nfrom torchtext import datasets\nimport torch.optim as optim\nimport random\nimport numpy as np\nimport time\nfrom sklearn.metrics import f1_score\nSEED = 1234\n\nLR = 0.001\nclass ModelTrainer():\n\n def __init__(self, model_name, model_type=None, text_field_name=None,\n label_field_name=None, IMDB=False, train_file_name=None,\n test_file_name=None, embedding_dim=100, criterion_str=\"CE\",\n max_vocab_size=25000, num_filters=100, filter_sizes=[3, 4, 5],\n dropout=0.5, output_dim=1, batch_size=64, dtype=torch.long, use_base=False, num_features=300):\n self.model_name = model_name\n self.model_type = model_type\n self.text_field_name = text_field_name\n self.label_field_name = label_field_name\n self.IMDB = IMDB\n self.train_file_name = train_file_name\n self.test_file_name = test_file_name\n self.criterion_str = criterion_str\n self.embedding_dim = embedding_dim\n self.max_vocab_size = max_vocab_size\n self.num_filters = num_filters\n self.filter_sizes = filter_sizes\n self.dropout = dropout\n self.batch_size = batch_size\n self.dtype = dtype\n self.use_base = use_base\n\n def load_dataset(self):\n \"\"\" Loads the dataset using torchtext\n \"\"\"\n r_seed = random.seed(SEED)\n np.random.seed(SEED)\n torch.manual_seed(SEED)\n torch.backends.cudnn.deterministic = True\n\n self.TEXT = data.Field(\n tokenize='spacy',\n tokenizer_language='en_core_web_sm',\n batch_first=True)\n self.LABEL = data.LabelField(dtype=self.dtype) # dtype=torch.float)\n # LOAD DATASET\n if self.IMDB:\n self.dataset, test_data = datasets.IMDB.splits(self.TEXT, self.LABEL)\n train_data, valid_data = self.dataset.split(random_state=r_seed)\n else:\n if self.text_field_name == \"Sentence\":\n # Formality and Informativeness don't work with dict fields\n # tuples representative of tabular format\n fields = [\n (self.label_field_name, self.LABEL),\n (self.text_field_name, self.TEXT)\n ]\n skip_header = True\n else:\n fields = {\n self.label_field_name: (self.label_field_name, self.LABEL),\n self.text_field_name: (self.text_field_name, self.TEXT)\n }\n skip_header = False\n format = self.train_file_name.split('.')[-1]\n if self.test_file_name:\n \n self.dataset, test_data = data.TabularDataset.splits(\n #path='/content',\n path='./content',\n train=self.train_file_name,\n test=self.test_file_name,\n format=format,\n fields=fields,\n skip_header=skip_header\n )\n train_data, valid_data = self.dataset.split(random_state=r_seed)\n else:\n self.dataset = data.TabularDataset.splits(\n #path='/content',\n path='./content',\n train=self.train_file_name,\n format=format,\n fields=fields,\n skip_header=skip_header\n )[0]\n train_data, valid_data, test_data = self.dataset.split(\n split_ratio=[0.8, 0.1, 0.1], random_state=r_seed)\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if torch.cuda.is_available():\n print(torch.cuda.get_device_name(0))\n\n self.train_iter, self.val_iter, self.test_iter = data.BucketIterator.splits(\n (train_data, valid_data, test_data),\n batch_size=self.batch_size,\n device=self.device,\n sort=False\n )\n\n def build_vocab(self):\n \"\"\" Builds the vocabulary of the dataset using torchtext\n \"\"\"\n self.TEXT.build_vocab(\n self.dataset,\n max_size=self.max_vocab_size,\n vectors=\"glove.6B.100d\",\n unk_init=torch.Tensor.normal_\n )\n self.LABEL.build_vocab(self.dataset)\n\n def init_model(self):\n \"\"\" Create CNN model with the supplied/derived parameters.\n Loads \"glove.6B.100d\" weights into the model's embedding layer\n \"\"\"\n # Get the size of the vocabulary\n vocab_size = len(self.TEXT.vocab)\n if self.criterion_str == \"CE\":\n output_dim = len(self.LABEL.vocab)\n else:\n output_dim = 1\n pad_idx = self.TEXT.vocab.stoi[self.TEXT.pad_token]\n # Initialize model\n if self.model_type == 'deep':\n self.model = VDCNN(output_dim, vocab_size, pad_idx,\n self.embedding_dim, shortcut=True)\n elif self.model_type == 'master':\n self.model = SUPER_CNN(vocab_size, self.embedding_dim, \n self.num_filters, self.filter_sizes, \n output_dim, self.dropout, pad_idx, use_base=self.use_base)\n else:\n if DUAL_FC_LAYERS:\n self.model = CNN_2FC(vocab_size, self.embedding_dim, self.num_filters,\n self.filter_sizes, output_dim, self.dropout,\n pad_idx)\n else:\n self.model = CNN(vocab_size, self.embedding_dim, self.num_filters,\n self.filter_sizes, output_dim, self.dropout,\n pad_idx)\n # Get pretrained weights from vocab\n pretrained_embeddings = self.TEXT.vocab.vectors\n self.model.embedding.weight.data.copy_(pretrained_embeddings)\n\n # zero initial weights of <unk> and <pad>\n unk_index = self.TEXT.vocab.stoi[self.TEXT.unk_token]\n\n self.model.embedding.weight.data[unk_index] = torch.zeros(self.embedding_dim)\n self.model.embedding.weight.data[pad_idx] = torch.zeros(self.embedding_dim)\n\n def init_optimizer(self):\n \"\"\" Initializes the optimizor (Adam) and the criterion (BCEWithLogitsLoss)\n \"\"\"\n self.optimizer = optim.Adam(self.model.parameters(), lr=LR)\n self.model = self.model.to(self.device)\n if self.criterion_str == \"CE\":\n self.criterion = nn.CrossEntropyLoss()\n if self.criterion_str == \"WCE\":\n self.criterion = nn.BCEWithLogitsLoss(pos_weight=torch.FloatTensor([10]))\n elif self.criterion_str == \"BCE\":\n self.criterion = nn.BCEWithLogitsLoss()\n elif self.criterion_str == \"Reg\":\n self.criterion = nn.MSELoss()\n self.criterion = self.criterion.to(self.device)\n\n def train_model(self, num_epochs):\n \"\"\" Trains and validates the model (and prints the results) for the\n given number of epochs. Saves the model from the epoch which\n yeilded the lowest validation loss.\n\n Args:\n num_epochs (int): number of epochs to train the model\n \"\"\"\n print(self.model_name)\n best_valid_loss = float('inf')\n\n for epoch in range(num_epochs):\n start_time = time.time()\n\n train_loss, train_acc = self.train_epoch()\n valid_loss, valid_acc, _, _ = self.evaluate_epoch(self.val_iter)\n\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n # torch.save(self.model.state_dict(), f'{self.model_name}.pt')\n torch.save(self.model, f'{self.model_name}.pt')\n\n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')\n\n def count_parameters(self):\n num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)\n print(f'The model has {num_params:,} trainable parameters')\n\n def train_epoch(self):\n \"\"\" Trains the model for 1 epoch of the train iterator\n\n Returns:\n tuple: (loss, accuracy)\n \"\"\"\n epoch_loss = 0\n epoch_acc = 0\n self.model.train()\n for batch in self.train_iter:\n # print(batch.__dict__)\n self.optimizer.zero_grad()\n predictions = self.model(getattr(batch, batch.input_fields[0]))\n # Reduce dimentionality if using binary cross entropy loss\n if self.criterion_str == \"BCE\":\n predictions = predictions.squeeze(1)\n acc_formula = binary_accuracy\n elif self.criterion_str == \"CE\":\n acc_formula = categorical_accuracy \n loss = self.criterion(predictions, getattr(batch, batch.target_fields[0]))\n acc = acc_formula(predictions, getattr(batch, batch.target_fields[0]))\n loss.backward()\n self.optimizer.step()\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(self.train_iter), epoch_acc / len(self.train_iter)\n\n def evaluate_epoch(self, iterator):\n epoch_loss = 0\n epoch_acc = 0\n logits_list = []\n labels_list = []\n self.model.eval()\n with torch.no_grad():\n for batch in iterator:\n predictions = self.model(getattr(batch, batch.input_fields[0]))\n logits_list.append(predictions)\n labels_list.append(getattr(batch, batch.target_fields[0]))\n # Reduce dimentionality if using binary cross entropy loss\n if self.criterion_str == \"BCE\":\n predictions = predictions.squeeze(1)\n acc_formula = binary_accuracy\n elif self.criterion_str == \"CE\":\n acc_formula = categorical_accuracy\n loss = self.criterion(predictions, getattr(batch, batch.target_fields[0]))\n acc = acc_formula(predictions, getattr(batch, batch.target_fields[0]))\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n logits_list = torch.cat(logits_list)\n labels_list = torch.cat(labels_list)\n # softmaxes = F.softmax(logits_list, dim=1)\n # _, predictions_list = torch.max(softmaxes, dim=1)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if logits_list[0].shape[0] == 1:\n predictions_list = torch.round(torch.sigmoid(logits_list))\n ece_criterion = ECE_binary().to(device)\n\n else:\n softmaxes = F.softmax(logits_list, dim=1)\n _, predictions_list = torch.max(softmaxes, dim=1)\n #create ece class object\n ece_criterion = ECE_multiclass().to(device)\n\n # # first input is raw logits tensor (not softmaxed) and the second is a tensor of correct labels \n ece_test = ece_criterion(logits_list, labels_list).item()\n \n f1 = f1_score(labels_list.detach().cpu().numpy(),predictions_list.detach().cpu().numpy(), average='macro')\n return epoch_loss / len(iterator), epoch_acc / len(iterator) , f1, ece_test\n\n def test_model(self):\n \"\"\" Tests the model with the highest validation loss on the test iterator\n \"\"\"\n # self.model.load_state_dict(torch.load(f'{self.model_name}.pt'))\n torch.load(f'{self.model_name}.pt')\n test_loss, test_acc, f1, ece = self.evaluate_epoch(self.test_iter)\n print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%\\n | F1 Score: {f1:.3f} | Calibration Error: {ece:.3f} |')\n #print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}')\n with open('model_results.txt', \"a\") as f:\n # f.write(f'{self.model_name}\\nTest Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%\\n | F1 Score: {f1:.3f} | Calibration Error: {ece:.3f} |' )\n f.write(f'{self.model_name}\\nTest Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f} |' )\n\n def do_what_we_want(self, num_epochs=10):\n self.load_dataset()\n self.build_vocab()\n self.init_model()\n self.init_optimizer()\n self.train_model(num_epochs)\n self.test_model()\n\n\ndef binary_accuracy(preds, y):\n \"\"\"\n Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8\n \"\"\"\n # round predictions to the closest integer\n rounded_preds = torch.round(torch.sigmoid(preds))\n correct = (rounded_preds == y).float() # convert into float for division\n acc = correct.sum() / len(correct)\n return acc\n\n\ndef categorical_accuracy(preds, y):\n \"\"\"\n Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8\n \"\"\"\n top_pred = preds.argmax(1, keepdim = True)\n correct = top_pred.eq(y.view_as(top_pred)).sum()\n acc = correct.float() / y.shape[0]\n return acc\n \n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\nclass ECE_binary(nn.Module):\n\n def __init__(self, n_bins=15):\n \"\"\"\n n_bins (int): number of confidence interval bins\n \"\"\"\n super(ECE_binary, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n predictions = torch.round(torch.sigmoid(logits))\n confidences = []\n for x in logits:\n confidences.append(max(1-x,x))\n confidences = torch.Tensor(confidences)\n accuracies = predictions.eq(labels.unsqueeze(1))\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n return ece\n\nclass ECE_multiclass(nn.Module):\n\n def __init__(self, n_bins=15):\n \"\"\"\n n_bins (int): number of confidence interval bins\n \"\"\"\n super(ECE_multiclass, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, dim=1)\n accuracies = predictions.eq(labels)\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n return ece\n\n# clear contents of file\nwith open(\"model_results.txt\", \"w\") as f:\n pass\n\n'''\n# ======================== FORMALITY BINARY MODEL ========================= #\ntrainer = ModelTrainer(\n model_name=\"CNN-formality-binary\", \n text_field_name='Sentence', \n label_field_name='Formality', \n train_file_name='mturk_news_formality_BINARY.csv', \n criterion_str='BCE',\n dtype=torch.float)\ntrainer.do_what_we_want(num_epochs=8)\n\n\n# ======================== INFORMATIVENESS BINARY MODEL ========================= #\ntrainer = ModelTrainer(\n model_name=\"CNN-informativeness-binary\", \n text_field_name='Sentence', \n label_field_name='Informativeness', \n train_file_name='mturk_news_informativeness_reg.csv', \n criterion_str='BCE',\n dtype=torch.float)\ntrainer.do_what_we_want(num_epochs=8)\n\n\n# ======================== EMOTION MODEL ========================= #\ntrainer = ModelTrainer(\n model_name=\"CNN-emotion\", \n text_field_name='Text', \n label_field_name='Emotion', \n train_file_name='emotion_train.csv', \n test_file_name='emotion_test.csv',\n criterion_str='CE'\n)\ntrainer.do_what_we_want(num_epochs=8)\n\n# ======================== SENTIMENT MODEL ========================= #\ntrainer = ModelTrainer(\"CNN-sentiment\", IMDB=True, criterion_str=\"BCE\", dtype=torch.float)\ntrainer.do_what_we_want(num_epochs=8)\n\n# ======================== SARCASM MASTER MODEL ========================= #\ntrainer = ModelTrainer(\n model_name=\"CNN-sarcasm-master-without-base\",\n model_type=\"master\",\n text_field_name='headline', \n label_field_name='is_sarcastic', \n train_file_name='Sarcasm_Headlines_Dataset_v2.csv', \n criterion_str='BCE',\n dtype=torch.float,\n use_base=False)\ntrainer.do_what_we_want(num_epochs=8)\n'''\n\n# after 1 iteration of the loop the seed gets set, so to get random combinations you need to only run it once then rerun the code. It's annoying.\nnum_random_iterations = 1\nfor _ in range(num_random_iterations): \n\n batch_size = random.choice([32, 64])\n embed = 100\n drop = random.uniform(0.1, 0.5)\n filters = random.choice([100,200,300,400,500])\n filter_list = [[3, 4, 5], [2, 3, 4, 5], [3, 4, 5, 7], [2, 3, 4, 5, 7], [3, 5]]\n filter_comb = random.choice(filter_list)\n LR = random.uniform(1e-4, 1e-2) \n print(\"start tuning\")\n print('lr: {}, filters: {}, batch size: {}, dropout: {}, embedding dim: {}, NUM_FEATURES: {}, filter sizes: {}'.format(LR, filters, batch_size, drop, embed, NUM_FEATURES, filter_comb)) \n epochs = 8 \n\n # ======================== FORMALITY BINARY MODEL ========================= #\n trainer = ModelTrainer(\n model_name=\"CNN-formality-binary\", \n text_field_name='Sentence', \n label_field_name='Formality', \n train_file_name='mturk_news_formality_BINARY.csv', \n embedding_dim=embed,\n criterion_str='BCE',\n num_filters=filters,\n filter_sizes=filter_comb,\n dropout=drop,\n dtype=torch.float,\n batch_size=batch_size\n )\n \n trainer.do_what_we_want(num_epochs=epochs)\n\n\n # ======================== INFORMATIVENESS BINARY MODEL ========================= #\n trainer = ModelTrainer(\n model_name=\"CNN-informativeness-binary\", \n text_field_name='Sentence', \n label_field_name='Informativeness', \n train_file_name='mturk_news_informativeness_BINARY.csv', \n embedding_dim=embed,\n criterion_str='BCE',\n num_filters=filters,\n filter_sizes=filter_comb,\n dropout=drop, \n dtype=torch.float,\n batch_size=batch_size)\n trainer.do_what_we_want(num_epochs=epochs)\n\n # ======================== EMOTION MODEL ========================= #\n trainer = ModelTrainer(\n model_name=\"CNN-emotion\", \n text_field_name='Text', \n label_field_name='Emotion', \n train_file_name='emotion_train.csv', \n test_file_name='emotion_test.csv',\n embedding_dim=embed,\n criterion_str='CE',\n num_filters=filters,\n filter_sizes=filter_comb,\n dropout=drop,\n batch_size=batch_size\n )\n trainer.do_what_we_want(num_epochs=epochs)\n\n # ======================== SENTIMENT MODEL ========================= #\n trainer = ModelTrainer(\"CNN-sentiment\", IMDB=True, embedding_dim=embed, criterion_str=\"BCE\",num_filters=filters,filter_sizes=filter_comb, dropout=drop, dtype=torch.float, batch_size=batch_size)\n trainer.do_what_we_want(num_epochs=epochs)\n\n # ======================== SARCASM MASTER MODEL ========================= #\n trainer = ModelTrainer(\n model_name=\"CNN-sarcasm-master-without-base\",\n model_type=\"master\",\n text_field_name='headline', \n label_field_name='is_sarcastic', \n train_file_name='Sarcasm_Headlines_Dataset_v2.csv', \n embedding_dim=embed, \n criterion_str='BCE',\n num_filters=filters,\n filter_sizes=filter_comb,\n dropout=drop,\n dtype=torch.float,\n use_base=False,\n batch_size=batch_size)\n trainer.do_what_we_want(num_epochs=epochs)\n"
] | [
[
"torch.nn.functional.max_pool1d",
"torch.abs",
"torch.nn.functional.softmax",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.load",
"torch.nn.Embedding",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.device",
"torch.save",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.sigmoid",
"torch.linspace",
"torch.nn.Conv2d",
"torch.cuda.empty_cache",
"torch.nn.Linear",
"numpy.random.seed",
"torch.Tensor",
"torch.manual_seed",
"torch.cuda.get_device_name",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aba-ai-learning/DailyStudy | [
"b04d066582af930d0116e6b1a39b0a7fffb5dda9"
] | [
"dailynotes/2020-01/0114_fast_median_filter_with_hist.py"
] | [
"\nimport cv2\nimport numpy as np\nimport math\n\n\ndef calcMedian(histogram, threshholdvalue):\n tmpnum = 0\n for i in range(len(histogram)):\n tmpnum += histogram[i]\n if tmpnum > threshholdvalue:\n return i\n\n return None\n\n\ndef fast_median_filter(input, window_size):\n h, w = input.shape\n output = np.zeros(input.shape, dtype=np.uint8)\n print(input.shape)\n N = window_size*window_size\n threshholdvalue = N/2 + 1\n radius = (window_size - 1) / 2\n right = w - radius\n bot = h - radius\n\n histogram = [0 for i in range(256)]\n\n for i in range(radius, right, 1):\n for j in range(radius, bot, 1):\n ## build first histogram\n if j == radius:\n histogram = [0 for _ in range(256)]\n for y in range(i-radius, i+radius, 1):\n for x in range(j-radius, j+radius, 1):\n value = input[x][y]\n histogram[value] += 1\n else:\n #update histogram : minus left add right\n left = j-radius-1\n right = j+radius\n for y in range(i-radius, i+radius, 1):\n valuel = input[left][y]\n valuer = input[right][y]\n histogram[valuel] -= 1\n histogram[valuer] += 1\n\n ###calc median with histogram\n medianvalue = calcMedian(histogram, threshholdvalue)\n output[j][i] = medianvalue\n\n # ##border keep 0\n # for i in range(0, radius):\n\n return output\n\n\nif __name__ == '__main__':\n\n imgpath = 'test_.jpg'\n img = cv2.imread(imgpath)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n output = fast_median_filter(img, 5)\n cv2.imwrite('fast_median_filter.jpg', output)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sontungtran/dopt | [
"2507fb5b5fc2a8cfb7dcc3bedaf86d27babf4aa0"
] | [
"dopt/utils/general_utils.py"
] | [
"r\"\"\"\nHelper functions\n\"\"\"\nimport sys\nimport time\nfrom datetime import datetime\nfrom contextlib import contextmanager\nimport torch\nimport random\nimport subprocess\n\n@contextmanager\ndef add_prefix_to_print(prefix): \n global is_new_line\n orig_write = sys.stdout.write\n is_new_line = True\n def new_write(*args, **kwargs):\n global is_new_line\n if args[0] == \"\\n\":\n is_new_line = True\n elif is_new_line:\n orig_write(\"[\" + str(prefix) + \"]: \")\n is_new_line = False\n orig_write(*args, **kwargs)\n sys.stdout.write = new_write\n yield\n sys.stdout.write = orig_write\n\n@contextmanager\ndef timer(label):\n import time\n start = time.time()\n yield\n print(f\"[Process {label}] elasped in {time.time()-start}\")\n \ndef get_output_shape(model, image_dim):\n return model(torch.rand(*(image_dim))).data.shape\n\ndef generate_seed():\n return random.randint(1, 100000)\n\ndef get_gpu_info():\n sp = subprocess.Popen(['nvidia-smi', '-q'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out_str = sp.communicate()\n out_list = out_str[0].decode(\"utf8\").split('\\n')\n out_dict = {}\n for item in out_list:\n try:\n key, val = item.split(':')\n key, val = key.strip(), val.strip()\n if key in out_dict: # Already exists\n out_dict[key].append(val)\n else:\n out_dict[key] = [val]\n except:\n pass\n return out_dict\n\ndef get_general_info(pid):\n sp = subprocess.Popen([\"ps\", \"-up\", str(pid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out_str = sp.communicate()\n outputs = out_str[0].decode(\"utf8\").split(\"\\n\")\n labels = outputs[0].split()\n info = outputs[1].split()\n if len(info) > len(labels): # Join commands that were splitted\n last_label_idx = len(labels)-1 \n info[last_label_idx] = \" \".join(info[last_label_idx:])\n info = info[:len(labels)]\n process_info = {labels[i]: info[i] for i in range(len(info))}\n return process_info\n\ndef get_all_gpu_processes_info():\n processes = {}\n out_dict = get_gpu_info()\n if \"Total\" in out_dict: \n max_gpu = int(out_dict[\"Total\"][0].split()[0])\n else:\n max_gpu = -1\n processes[\"max_gpu\"] = max_gpu\n processes[\"time_updated\"] = datetime.now().strftime(\"%m/%d/%Y-%H:%M:%S\")\n for i, process_id in enumerate(out_dict[\"Process ID\"]):\n process_info = get_general_info(process_id)\n processes[process_id] = {\n \"name\": out_dict[\"Name\"][i],\n \"user\": process_info[\"USER\"],\n \"gpu_used\": int(out_dict[\"Used GPU Memory\"][i].split()[0]),\n \"%cpu_used\": float(process_info[\"%CPU\"]),\n \"%mem_used\": float(process_info[\"%MEM\"]),\n \"command\": process_info[\"COMMAND\"]\n }\n return processes\n \n"
] | [
[
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CEHENKLE/gitchues | [
"dff5adaf67c40edad9f2ff4cb0d8f6d6b0f85cad"
] | [
"setup.py"
] | [
"from datetime import date\nfrom github import Github\nimport os\nimport pandas as pd\nimport sys\n\n\n# used on first run to set up the pickle where we'll be storing stuff\ndef main(labels, my_org):\n token = os.getenv('GITHUB_TOKEN', '...')\n g = Github(token)\n org = g.get_organization(my_org)\n\n repo_data = []\n repo_names = []\n panda_list = []\n\n for repo in org.get_repos():\n repo_names.append(repo.name)\n the_date = str(date.today())\n\n# I bet there's a more pythonic way to do this.\n for label in labels:\n for repo in org.get_repos():\n repo_data.append(repo.get_issues(state=\"open\", labels=[label]).totalCount)\n panda_list.append({\n the_date: (repo_data)\n })\n repo_data = []\n# see, this I like ;)\n dataframes = list(map(lambda x: pd.DataFrame(x, index=repo_names), panda_list))\n print(dataframes)\n i = 0\n for dataframe in dataframes:\n dataframe.to_pickle(\"gitchues-\"+labels[i]+\".pkl\")\n i += 1\n return\n\n\nif __name__ == \"__main__\":\n main(list(sys.argv[1]), str(sys.argv[2]))\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
iannjari/COVID-19-Cases-and-Mortality-Analysis-and-Prediction | [
"1cc4ce799664f897bb4c6c08ea83735dcfb09a3d"
] | [
"src/app.py"
] | [
"import dash\nfrom dash import dcc\nfrom dash import html\nfrom dash.dependencies import Input, Output, State\nimport plotly.express as px\nimport pandas as pd\nimport plotly.graph_objects as go \nimport os\nimport smtplib\nfrom smtplib import *\nfrom email.message import EmailMessage\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport re\nimport DNS\nfrom validate_email import validate_email\npwd=os.getcwd()\n\n\n# DATA PREPERATION\n# Read data for maps\ncase_map =pd.read_excel(pwd+\"\\\\..\\\\data\\\\casemapdata.xlsx\")\ndeath_map =pd.read_excel(pwd+\"\\\\..\\\\data\\\\deathmapdata.xlsx\")\n\n\n# Read cases line plot and predicted data\ncase_preds=pd.read_csv(pwd+\"\\\\..\\\\data\\\\casepredictions.csv\")\ncases=pd.read_excel(pwd+\"\\\\..\\\\data\\\\cases_plot.xlsx\")\n\n# Read deaths line plot and predicted data\ndeath_preds=pd.read_csv(pwd+\"\\\\..\\\\data\\\\deathpredictions.csv\")\ndeaths=pd.read_excel(pwd+\"\\\\..\\\\data\\\\deaths_plot.xlsx\")\n\n# Read Vaccinations data\nvaccines=pd.read_csv(pwd+'\\\\..\\\\data\\\\vaccinations.csv')\n\n# GLOBAL VARIABLES DECLARATION\n\napp = dash.Dash(__name__)\napp.config.suppress_callback_exceptions = True\napp.css.config.serve_locally = True\napp.scripts.config.serve_locally = True\n\nfig3=go.Figure()\n\n\napp.layout = html.Div([\n \n dcc.Location(id='url', refresh=False),\n html.Div(id='page-content'),\n ])\n\nindex_page = html.Div([\n html.H1('Global Caseload and Mortality by Country',\n style={'textAlign':'center'}),\n html.Br(),\n dcc.Link('Cases and Deaths By Country', href='/compare'),\n html.Br(),\n html.Br(),\n dcc.Link('Download Report', href='/report'),\n html.Br(),\n html.Br(),\n dcc.Link('Predictions', href='/predict'),\n html.Br(),\n html.Br(),\n dcc.Link('Kenya Vaccinations by County/Region', href='/vaccines'),\n html.Br(),\n html.Br(),\n\n dcc.Dropdown(\n id='dropdown3',\n value = 'Cases',\n options=[\n {'label': 'See Global Cases', 'value': 'Cases'},\n {'label': 'See Global Mortality', 'value': 'Deaths'}],\n style={'textAlign': 'center',\n 'width':'50%',\n 'margin-left': 'auto',\n 'margin-right': 'auto'\n },\n clearable=False\n ),\n \n html.Br(),\n html.Br(),\n html.Br(),\n \n dcc.Graph(id=\"choropleth\", figure=fig3,\n style={'width':'75%',\n 'margin-left': 'auto',\n 'margin-right': 'auto' }),\n \n html.Br(),\n \n \n])\n\npage_1_layout = html.Div([\n html.H1('Cases and Deaths by Country',\n style={'textAlign':'center'}),\n html.Br(),\n \n dcc.Link('Global Caseload and Mortality by Country', href='/index_page'),\n html.Br(),\n html.Br(),\n dcc.Link('Download Report', href='/report'),\n html.Br(),\n html.Br(),\n dcc.Link('Predictions', href='/predict'),\n html.Br(),\n html.Br(),\n dcc.Link('Kenya Vaccinations by County/Region', href='/vaccines'),\n html.Br(),\n html.Br(),\n \n html.Div([\n dcc.Dropdown(\n id='dropdown1',\n value = 'Afghanistan',\n options=[{'label': i, 'value': i} for i in cases.columns[1:]],\n style={'width': '40%',\n 'margin-left': 'auto',\n 'margin-right': 'auto',\n 'bottom': '2px'},\n clearable=False),\n\n dcc.Dropdown(\n id='dropdown6',\n value = 'Kenya',\n options=[{'label': i, 'value': i} for i in cases.columns[1:]],\n style={'width': '40%',\n 'margin-left': 'auto',\n 'margin-right': 'auto'},\n clearable=False),\n \n dcc.Graph(id='graph1'),\n html.Div(id='dd-output-container1'),],\n style={'width': '50%','display': 'inline-block'}),\n html.Div([\n dcc.Dropdown(\n id='dropdown2',\n value = 'Afghanistan',\n options=[{'label': i, 'value': i} for i in deaths.columns[1:]],\n style={'width': '40%',\n 'margin-left': 'auto',\n 'margin-right': 'auto',\n 'bottom': '2px'},\n clearable=False),\n dcc.Dropdown(\n id='dropdown7',\n value = 'Kenya',\n options=[{'label': i, 'value': i} for i in deaths.columns[1:]],\n style={'width': '40%',\n 'margin-left': 'auto',\n 'margin-right': 'auto'},\n clearable=False),\n \n dcc.Graph(id='graph2'),\n \n html.Div(id='dd-output-container')],\n style={'width': '50%', 'display': 'inline-block'})\n])\n\n\npage_2_layout = html.Div([\n html.Br(),\n html.Br(),\n html.H1('Download or Mail Report',style={'textAlign':'center'}),\n dcc.Link('Global Caseload and Mortality', href='/'),\n html.Br(),\n html.Br(),\n dcc.Link('Cases and Deaths By Country', href='/compare'),\n html.Br(),\n html.Br(),\n dcc.Link('Predictions', href='/predict'),\n html.Br(),\n html.Br(),\n dcc.Link('Kenya Vaccinations by County/Region', href='/vaccines'),\n html.Br(),\n html.P('Download the latest COVID-19 report by clicking the button below;',style={'textAlign':'center'}),\n html.Br(),\n html.Div(\n [html.Button(\"Download Report\", id=\"btn_doc\"),\n dcc.Download(id=\"download-doc\")],style={'textAlign':'center'}),\n html.Br(),\n html.P('To have the report automatically sent to you via email, enter your email address below then click Submit Email.',style={'textAlign':'center'}),\n html.Div([\n html.Div(dcc.Input(id='input-on-submit', type='email',value=\"\"),style={'textAlign':'center'}),\n html.Br(),\n html.Div(\n html.Button('Submit Email', id='submit-val', n_clicks=0),style={'textAlign':'center'}),\n html.Br(),\n html.Div(id='email-string',style={'textAlign':'center'})\n ])\n])\n\n# Page 3, for predictions\npage_3_layout = html.Div([\n html.H1('Predict Cases and Deaths by Country',\n style={'textAlign':'center'}),\n html.Br(),\n \n dcc.Link('Global Caseload and Mortality by Country', href='/index_page'),\n html.Br(),\n html.Br(),\n dcc.Link('Compare Cases and Deaths by Country', href='/compare'),\n html.Br(),\n html.Br(),\n dcc.Link('Download Report', href='/report'),\n html.Br(),\n html.Br(),\n dcc.Link('Kenya Vaccinations by County/Region', href='/vaccines'),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n \n html.Div([\n dcc.Dropdown(\n id='dropdown4',\n value = 'Afghanistan',\n options=[{'label': i, 'value': i} for i in cases.columns[1:]],\n style={'width': '50%',\n 'margin-left': 'auto',\n 'margin-right': 'auto'}),\n \n dcc.Graph(id='graph4'),\n html.Div(id='dd-output-container1'),],\n style={'width': '50%','display': 'inline-block'}),\n html.Div([\n dcc.Dropdown(\n id='dropdown5',\n value = 'Afghanistan',\n options=[{'label': i, 'value': i} for i in deaths.columns[1:]],\n style={'width': '50%',\n 'margin-left': 'auto',\n 'margin-right': 'auto'}),\n \n dcc.Graph(id='graph5'),\n \n html.Div(id='dd-output-container')],\n style={'width': '50%', 'display': 'inline-block'})\n])\n\npage_4_layout=html.Div(\n [html.Br(),\n html.H1('Kenya Vaccinations by County and Region',style={'textAlign':'center'}),\n html.Br(),\n html.Br(),\n dcc.Link('Global Caseload and Mortality by Country', href='/index_page'),\n html.Br(),\n html.Br(),\n dcc.Link('Compare Cases and Deaths by Country', href='/compare'),\n html.Br(),\n html.Br(),\n dcc.Link('Download Report', href='/report'),\n html.Br(),\n html.Br(),\n dcc.Link('Predictions', href='/predict'),\n html.Br(),\n html.Br(),\n dcc.RadioItems(id='radio-button',options=[{'label': 'Region', 'value': 'r'},\n {'label': 'County', 'value': 'c'}\n ],\n value='r'),\n html.Br(),\n html.Br(),\n dcc.Graph(id='vaccines-graph')\n ])\n\n\n# Update the pages index\[email protected](dash.dependencies.Output('page-content', 'children'),\n [dash.dependencies.Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/compare':\n return page_1_layout\n elif pathname == '/report':\n return page_2_layout\n elif pathname == '/predict':\n return page_3_layout\n elif pathname=='/vaccines':\n return page_4_layout\n else:\n return index_page\n # You could also return a 404 \"URL not found\" page here\n\[email protected](\n Output(\"graph1\", \"figure\"),\n Input('dropdown6', 'value'),\n Input('dropdown1','value')\n )\n\n\ndef display_graph1(dropdown1,dropdown6):\n \n # Plot cases graph\n \n fig = go.Figure()\n fig.add_trace(go.Scatter(x=cases['Date'],y=cases[dropdown1],name=dropdown1))\n fig.add_trace(go.Scatter(x=cases['Date'],y=cases[dropdown6],name=dropdown6))\n fig.update_layout( title=\"Caseload\", xaxis_title=\"Dates\", yaxis_title=\"Cases\")\n \n return fig\n\[email protected](\n Output(\"graph2\",\"figure\"),\n [Input('dropdown2', 'value'),\n Input('dropdown7', 'value')])\n\ndef display_graph2(dropdown2,dropdown7): \n # Plot deaths graph\n fig2=go.Figure()\n fig2.add_trace(go.Scatter(x=deaths['Date'],y=deaths[dropdown2],mode='lines',name=dropdown2,line=dict(color='#00ff7f')))\n fig2.add_trace(go.Scatter(x=deaths['Date'],y=deaths[dropdown7],mode='lines',name=dropdown7,line=dict(color='#800080')))\n fig2.update_layout( title=\"Mortality\", xaxis_title=\"Dates\", yaxis_title=\"Deaths\")\n \n return fig2\n\[email protected](\n Output(\"choropleth\", \"figure\"),\n [Input('dropdown3', 'value'),\n ])\n\ndef display_map(dropdown3):\n \n if dropdown3=='Cases':\n fig3 = go.Figure(data=go.Choropleth(\n locations = case_map['CODE'],\n z = case_map['Total Cases'],\n text = case_map['Country/Region'],\n colorscale = 'Blues',\n autocolorscale=False,\n reversescale=True,\n marker_line_color='darkgray',\n marker_line_width=0.5,\n colorbar_title = 'Total Cases',\n ))\n\n fig3.update_layout(\n title_text='Cumulative Cases per Country',\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'\n ),\n annotations = [dict(\n x=0.55,\n y=0.1,\n xref='paper',\n yref='paper',\n text='Source: <a href=\"https://github.com/CSSEGISandData/COVID-19\">\\\n JHU CSSE COVID-19 Data</a>',\n showarrow = False\n )]\n )\n else:\n fig3 = go.Figure(data=go.Choropleth(\n locations = death_map['CODE'],\n z = death_map['Total Deaths'],\n text = death_map['Country/Region'],\n colorscale = 'Reds',\n autocolorscale=False,\n reversescale=False,\n marker_line_color='darkgray',\n marker_line_width=0.5,\n colorbar_title = 'Total Cases',\n ))\n\n fig3.update_layout(\n title_text='Cumulative Deaths per Country',\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'\n ),\n annotations = [dict(\n x=0.55,\n y=0.1,\n xref='paper',\n yref='paper',\n text='Source: <a href=\"https://github.com/CSSEGISandData/COVID-19\">\\\n JHU CSSE COVID-19 Data</a>',\n showarrow = False\n )]\n )\n \n return fig3\n \n\[email protected](\n Output(\"download-doc\", \"data\"),\n Input(\"btn_doc\", \"n_clicks\"),\n prevent_initial_call=True,\n)\ndef download_doc(n_clicks):\n return dcc.send_file(pwd+\"\\\\..\\\\data\\\\report.pdf\")\n\n\n# Callback for predictions\[email protected](\n [Output(\"graph4\", \"figure\"),Output(\"graph5\",\"figure\")],\n [Input('dropdown4', 'value'),\n Input('dropdown5', 'value')\n ])\n\ndef prediction_cases(dropdown4,dropdown5):\n \n # Filter cases prediction data\n predict_data_cases=case_preds[['Date',dropdown4]]\n\n # Plot Cases predictions\n fig7 = go.Figure(data=[go.Table(\n header=dict(values=list(['Date','Cases']),\n fill_color='paleturquoise',\n align='left'),\n cells=dict(values=[predict_data_cases['Date'], predict_data_cases[dropdown4].astype(int)],\n fill_color='lavender',\n align='left'))\n ])\n \n # Filter deaths prediction data\n predict_data_deaths=death_preds[['Date',dropdown5]]\n\n # Plot Deaths predictions\n fig8 = go.Figure(data=[go.Table(\n header=dict(values=list(['Date','Deaths']),\n fill_color='paleturquoise',\n align='left'),\n cells=dict(values=[predict_data_deaths['Date'], predict_data_deaths[dropdown5].astype(int)],\n fill_color='lavender',\n align='left'))\n ])\n\n return fig7, fig8\n\n\n# Email callback\[email protected](\n Output('email-string', 'children'),\n Input('submit-val', 'n_clicks'),\n State('input-on-submit', 'value')\n)\n\n\ndef email(n_clicks,value):\n email_string=\"\"\n if value !=\"\":\n regex = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b'\n \n # pass the regular expression and the string into the fullmatch() method\n # check whether the email exists\n if(re.fullmatch(regex, value)) and validate_email(value,verify=True):\n \n if n_clicks>0:\n try:\n # Validate and save email address\n EMAIL_ADDRESS = \"[email protected]\"\n EMAIL_PASSWORD = os.environ.get('EMAIL_PASSWORD')\n\n sender_address = EMAIL_ADDRESS\n receiver_address = value\n mail_content = '''Hello,\n\nHere is today's Covid report.\nIf you did not request this mail, kindly ignore it!\n \nThank you!\n '''\n\n message = MIMEMultipart()\n message['From'] = sender_address\n message['To'] = receiver_address\n message['Subject'] = \"COVID-19 REPORT\"\n \n\n message.attach(MIMEText(mail_content, 'plain'))\n attach_file_name = pwd+\"\\\\..\\\\data\\\\report.pdf\"\n attach_file = open(attach_file_name, 'rb') # Open the file as binary mode\n payload = MIMEBase('application', 'octate-stream')\n payload.set_payload((attach_file).read())\n encoders.encode_base64(payload) #encode the attachment\n #add payload header with filename\n payload.add_header('Content-Disposition', 'attachment', filename='report.pdf')\n message.attach(payload)\n\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(message)\n\n email_string=\"An email with the report has been sent and your email address saved to our list.\"\n email_list=pd.read_csv(pwd+'\\\\..\\\\data\\\\emaillist.csv')\n email_list=email_list['Address']\n if value not in email_list:\n value=pd.Series(value)\n email_list=email_list.append(value, ignore_index = True)\n email_list=pd.DataFrame(email_list)\n email_list=email_list.rename(columns={0:'Address'})\n email_list.to_csv(pwd+'\\\\..\\\\data\\\\emaillist.csv')\n\n\n except SMTPRecipientsRefused:\n email_string= str(\"The email address you entered may not exist! Please check it again and retry.\")\n except SMTPServerDisconnected:\n email_string=str('The server unexpectedly disconnected, or an attempt \\n'\n 'was made to use the SMTP instance before connecting it to a server. Try again later')\n except SMTPSenderRefused:\n email_string='Sender address refused.'\n except SMTPConnectError:\n email_string='An error occurred during establishment of a connection with the server.'\n except SMTPAuthenticationError:\n email_string='An Authentication Error occured'\n else:\n email_string=\"\"\n else:\n email_string=\"Invalid Email. Check whether you have typed the address correctly.\"\n \n else:\n if n_clicks>0:\n email_string=\"Please enter an address!\"\n return email_string \n\n# Vaccinations callback\[email protected](\n Output('vaccines-graph','figure'),\n Input('radio-button','value')\n)\n\ndef vaccinations(value):\n if value =='c':\n fig9 = go.Figure(go.Bar(\n y=vaccines['County'].values[::-1],\n x=vaccines['Vaccinations'].values[::-1],\n orientation='h'))\n\n else:\n df=vaccines.groupby(['Region']).sum()\n df=df.reset_index()\n fig9=go.Figure(go.Bar(\n y=df['Region'].values[::-1],\n x=vaccines['Vaccinations'].values[::-1],\n orientation='h'))\n \n return fig9\napp.run_server(debug=True)\n\n"
] | [
[
"pandas.read_excel",
"pandas.read_csv",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
xmodar/invtorch | [
"74b80be3b4126925e583282b6f78171b99788b37"
] | [
"invtorch/utils/parametrizations.py"
] | [
"\"\"\"Parametrization Modules\"\"\"\nimport torch\nfrom torch import nn\nfrom torch.nn.utils.parametrizations import _OrthMaps, _Orthogonal\n\n__all__ = ['NonZero', 'Orthogonal', 'ScaledOrthogonal']\n\n\nclass NonZero(nn.Module):\n \"\"\"Parameterization to force the values to be nonzero\"\"\"\n def __init__(self, preserve_sign=True):\n super().__init__()\n self.preserve_sign = preserve_sign\n\n def forward(self, inputs):\n \"\"\"Perform the forward pass\"\"\"\n return self.call(inputs, self.preserve_sign)\n\n right_inverse = forward\n\n @staticmethod\n def call(inputs, preserve_sign=True):\n \"\"\"Force values to be nonzero\"\"\"\n eps = torch.finfo(inputs.dtype).eps * 1e2\n eps_t = torch.tensor(eps, dtype=inputs.dtype, device=inputs.device)\n if preserve_sign:\n eps_t = torch.where(inputs < 0, -eps_t, eps_t)\n return inputs.where(inputs.detach().abs() > eps, eps_t)\n\n\nclass Orthogonal(_Orthogonal):\n \"\"\"Orthogonal or unitary parametrization for matrices\"\"\"\n def __init__(self, weight, view=None, strategy=None, fast=True):\n if view is not None:\n weight = weight.data.view(view)\n assert weight.dim() > 1, f'viewed tensor is {weight.dim()}D (< 2D)'\n if strategy is None:\n if weight.shape[-2] == weight.shape[-1] or weight.is_complex():\n strategy = 'matrix_exp'\n else:\n strategy = 'householder'\n orth_enum = getattr(_OrthMaps, strategy, None)\n if orth_enum is None:\n maps = {x.name for x in _OrthMaps}\n raise ValueError(f'strategy={strategy} not in {maps}')\n super().__init__(weight, orth_enum, use_trivialization=fast)\n self.view = view\n\n def call(self, function, weight):\n \"\"\"calls a function on a tensor and views it if necessary\"\"\"\n if self.view is not None:\n return function(weight.view(self.view)).view_as(weight)\n return function(weight)\n\n def forward(self, weight): # pylint: disable=arguments-renamed\n return self.call(super().forward, weight)\n\n def right_inverse(self, weight): # pylint: disable=arguments-renamed\n return self.call(super().right_inverse, weight)\n\n\nclass ScaledOrthogonal(Orthogonal):\n \"\"\"Scaled orthogonal parametrization for matrices\"\"\"\n def call(self, function, weight):\n def function_(matrix):\n eps = torch.finfo(matrix.dtype).eps * 1e2\n dim = -1 if weight.shape[-1] > weight.shape[-2] else -2\n norm = matrix.norm(2, dim, keepdim=True).clamp_min(eps)\n return function(matrix / norm) * norm\n\n return super().call(function_, weight)\n"
] | [
[
"torch.finfo",
"torch.where",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangzhe0912/missshi_deeplearning_ai | [
"b21ac9e2abb203486ee08fd47851617057010eaa"
] | [
"10 initialization_regularization_gradientCheck/init_utils.py"
] | [
"#-*- coding: UTF-8 -*-\n\"\"\"\n# WANGZHE12\n\"\"\"\nimport sklearn\nimport sklearn.datasets\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1 / (1 + np.exp(-x))\n return s\n\n\ndef relu(x):\n \"\"\"\n Compute the relu of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- relu(x)\n \"\"\"\n s = np.maximum(0, x)\n\n return s\n\n\ndef compute_loss(a3, Y):\n \"\"\"\n Implement the loss function\n\n Arguments:\n a3 -- post-activation, output of forward propagation\n Y -- \"true\" labels vector, same shape as a3\n\n Returns:\n loss - value of the loss function\n \"\"\"\n\n m = Y.shape[1]\n logprobs = np.multiply(-np.log(a3), Y) + np.multiply(-np.log(1 - a3), 1 - Y)\n loss = 1. / m * np.nansum(logprobs)\n\n return loss\n\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Implements the forward propagation (and computes the loss) presented in Figure 2.\n\n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape ()\n b1 -- bias vector of shape ()\n W2 -- weight matrix of shape ()\n b2 -- bias vector of shape ()\n W3 -- weight matrix of shape ()\n b3 -- bias vector of shape ()\n\n Returns:\n loss -- the loss function (vanilla logistic loss)\n \"\"\"\n\n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n\n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n z1 = np.dot(W1, X) + b1\n a1 = relu(z1)\n z2 = np.dot(W2, a1) + b2\n a2 = relu(z2)\n z3 = np.dot(W3, a2) + b3\n a3 = sigmoid(z3)\n\n cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)\n\n return a3, cache\n\n\ndef backward_propagation(X, Y, cache):\n \"\"\"\n Implement the backward propagation presented in figure 2.\n\n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat)\n cache -- cache output from forward_propagation()\n\n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n m = X.shape[1]\n (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache\n\n dz3 = 1. / m * (a3 - Y)\n dW3 = np.dot(dz3, a2.T)\n db3 = np.sum(dz3, axis=1, keepdims=True)\n\n da2 = np.dot(W3.T, dz3)\n dz2 = np.multiply(da2, np.int64(a2 > 0))\n dW2 = np.dot(dz2, a1.T)\n db2 = np.sum(dz2, axis=1, keepdims=True)\n\n da1 = np.dot(W2.T, dz2)\n dz1 = np.multiply(da1, np.int64(a1 > 0))\n dW1 = np.dot(dz1, X.T)\n db1 = np.sum(dz1, axis=1, keepdims=True)\n\n gradients = {\"dz3\": dz3, \"dW3\": dW3, \"db3\": db3,\n \"da2\": da2, \"dz2\": dz2, \"dW2\": dW2, \"db2\": db2,\n \"da1\": da1, \"dz1\": dz1, \"dW1\": dW1, \"db1\": db1}\n\n return gradients\n\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n\n Arguments:\n parameters -- python dictionary containing your parameters\n grads -- python dictionary containing your gradients, output of n_model_backward\n\n Returns:\n parameters -- python dictionary containing your updated parameters\n parameters['W' + str(i)] = ...\n parameters['b' + str(i)] = ...\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for k in range(L):\n parameters[\"W\" + str(k + 1)] = parameters[\"W\" + str(k + 1)] - learning_rate * grads[\"dW\" + str(k + 1)]\n parameters[\"b\" + str(k + 1)] = parameters[\"b\" + str(k + 1)] - learning_rate * grads[\"db\" + str(k + 1)]\n\n return parameters\n\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a n-layer neural network.\n\n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n\n Returns:\n p -- predictions for the given dataset X\n \"\"\"\n\n m = X.shape[1]\n p = np.zeros((1, m), dtype=np.int)\n\n # Forward propagation\n a3, caches = forward_propagation(X, parameters)\n\n # convert probas to 0/1 predictions\n for i in range(0, a3.shape[1]):\n if a3[0, i] > 0.5:\n p[0, i] = 1\n else:\n p[0, i] = 0\n\n # print results\n print(\"Accuracy: \" + str(np.mean((p[0, :] == y[0, :]))))\n\n return p\n\n\ndef load_dataset():\n np.random.seed(1)\n train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)\n np.random.seed(2)\n test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)\n # Visualize the data\n plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n test_X = test_X.T\n test_Y = test_Y.reshape((1, test_Y.shape[0]))\n return train_X, train_Y, test_X, test_Y\n\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)\n plt.show()\n\n\ndef predict_dec(parameters, X):\n \"\"\"\n Used for plotting decision boundary.\n\n Arguments:\n parameters -- python dictionary containing your parameters\n X -- input data of size (m, K)\n\n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n\n # Predict using forward propagation and a classification threshold of 0.5\n a3, cache = forward_propagation(X, parameters)\n predictions = (a3 > 0.5)\n return predictions"
] | [
[
"numpy.dot",
"numpy.log",
"matplotlib.pyplot.contourf",
"numpy.maximum",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.arange",
"numpy.int64",
"numpy.nansum",
"sklearn.datasets.make_circles",
"numpy.mean",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DSC-SPIDAL/twisterx | [
"773f11ff79648938dc5dd0678b786b97ed289b36"
] | [
"python/pycylon/setup.py"
] | [
"##\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\n# References\n\"\"\"\nhttps://github.com/FedericoStra/cython-package-example/blob/master/setup.py\nhttps://github.com/thewtex/cython-cmake-example/blob/master/setup.py\n\"\"\"\n\nimport os\nimport platform\nimport sysconfig\nfrom distutils.sysconfig import get_python_lib\nfrom distutils.util import strtobool\n\nimport numpy as np\nimport pyarrow as pa\nfrom Cython.Build import cythonize\nfrom setuptools import Extension, find_packages, setup\n\nimport versioneer\n\nversion = versioneer.get_version(),\ncmdclass = versioneer.get_cmdclass(),\n\n# os.environ[\"CXX\"] = \"mpic++\"\npyarrow_location = os.path.dirname(pa.__file__)\npyarrow_version = pa.__version__\n\nprint(\"PYARROW version:\", pyarrow_version)\n\nCYLON_PREFIX = os.environ.get('CYLON_PREFIX')\nARROW_PREFIX = os.environ.get('ARROW_PREFIX')\nCYLON_GLOO = strtobool(os.environ.get('CYLON_GLOO') or '0')\nGLOO_PREFIX = os.environ.get('GLOO_PREFIX')\nCYLON_UCX = strtobool(os.environ.get('CYLON_UCX') or '0')\nCYLON_UCC = strtobool(os.environ.get('CYLON_UCC') or '0')\nUCC_PREFIX = os.environ.get('UCC_PREFIX')\n\nif not CYLON_PREFIX:\n raise ValueError(\"CYLON_PREFIX not set\")\n\ntry:\n nthreads = int(os.environ.get(\"PARALLEL_LEVEL\", \"0\") or \"0\")\nexcept Exception:\n nthreads = 0\n\ncompiler_directives = {\"language_level\": 3, \"embedsignature\": True}\n\nstd_version = '-std=c++14'\nadditional_compile_args = [std_version, '-DARROW_METADATA_V4 -DNEED_EXCLUSIVE_SCAN']\narrow_lib_include_dir = None\narrow_library_directory = None\nif not ARROW_PREFIX:\n arrow_lib_include_dir = os.path.join(pyarrow_location, \"include\")\n arrow_library_directory = pyarrow_location\n additional_compile_args.append('-D_GLIBCXX_USE_CXX11_ABI=0')\n additional_compile_args.append('-DOMPI_SKIP_MPICXX=1')\n if not os.path.exists(arrow_library_directory):\n arrow_library_directory = os.path.join(pyarrow_location, \"lib64\")\nelse:\n arrow_lib_include_dir = os.path.join(ARROW_PREFIX, \"include\")\n arrow_library_directory = os.path.join(ARROW_PREFIX, \"lib\")\n if not os.path.exists(arrow_library_directory):\n arrow_library_directory = os.path.join(ARROW_PREFIX, \"lib64\")\n\npyarrow_include_dir = os.path.join(pyarrow_location, 'include')\n\nextra_compile_args = []\nextra_link_args = []\nif os.name == 'posix':\n extra_compile_args = os.popen(\n \"mpic++ --showme:compile\").read().strip().split(' ')\n extra_link_args = os.popen(\"mpic++ --showme:link\").read().strip().split(' ')\n extra_compile_args = extra_compile_args + additional_compile_args\n extra_link_args.append(\"-W\")\n\nglob_library_directory = os.path.join(CYLON_PREFIX, \"glog\", \"install\", \"lib\")\n\nglog_lib_include_dir = os.path.join(CYLON_PREFIX, \"glog\", \"install\", \"include\")\ncylon_library_directory = os.path.join(CYLON_PREFIX, \"lib\")\ncylon_library_directory_debug = os.path.join(CYLON_PREFIX, \"lib\", \"Debug\")\ncylon_library_directory_release = os.path.join(CYLON_PREFIX, \"lib\", \"Release\")\n\nlibrary_directories = [cylon_library_directory,\n cylon_library_directory_debug,\n cylon_library_directory_release,\n arrow_library_directory,\n glob_library_directory,\n get_python_lib(),\n os.path.join(os.sys.prefix, \"lib\")]\n\nOS_NAME = platform.system()\n\nif OS_NAME == 'Linux' or OS_NAME == 'Darwin':\n mpi_library_dir = os.popen(\"mpicc --showme:libdirs\").read().strip().split(' ')\nelse:\n import mpi4py\n\n mpi_library_dir = [mpi4py.get_config()['library_dirs']]\nlibrary_directories.extend(mpi_library_dir)\n\nlibraries = [\"arrow\", \"cylon\", \"glog\"]\n\ncylon_include_dir = os.path.abspath(os.path.join(__file__, \"../../..\", \"cpp\", \"src\"))\n\n_include_dirs = [cylon_include_dir,\n arrow_lib_include_dir,\n glog_lib_include_dir,\n pyarrow_include_dir,\n np.get_include(),\n os.path.dirname(sysconfig.get_path(\"include\"))]\n\nif OS_NAME == 'Linux' or OS_NAME == 'Darwin':\n mpi_include_dir = os.popen(\"mpicc --showme:incdirs\").read().strip().split(' ')\nelse:\n import mpi4py\n\n mpi_include_dir = [mpi4py.get_config()['include_dirs']]\n_include_dirs.extend(mpi_include_dir)\n\nmacros = []\n# compile_time_env serves as preprocessor macros. ref: https://github.com/cython/cython/issues/2488\ncompile_time_env = {'CYTHON_GLOO': False, 'CYTHON_UCC': False}\nif CYLON_GLOO:\n libraries.append('gloo')\n library_directories.append(os.path.join(GLOO_PREFIX, 'lib'))\n _include_dirs.append(os.path.join(GLOO_PREFIX, 'include'))\n macros.append(('GLOO_USE_MPI', '1'))\n macros.append(('BUILD_CYLON_GLOO', '1'))\n compile_time_env['CYTHON_GLOO'] = True\n\nif CYLON_UCC and CYLON_UCX:\n libraries.append('ucc')\n library_directories.append(os.path.join(UCC_PREFIX, 'lib'))\n _include_dirs.append(os.path.join(UCC_PREFIX, 'include'))\n macros.append(('BUILD_CYLON_UCX', '1'))\n macros.append(('BUILD_CYLON_UCC', '1'))\n compile_time_env['CYTHON_UCC'] = True\n\nprint('Libraries :', libraries)\nprint(\"Lib dirs :\", library_directories)\nprint(\"Include dirs :\", _include_dirs)\nprint(\"Macros :\", macros)\nprint(\"Compile time env:\", compile_time_env)\n\n# Adopted the Cudf Python Build format\n# https://github.com/rapidsai/cudf\n\nextensions = [\n Extension(\n \"*\",\n sources=[\"pycylon/*/*.pyx\"],\n include_dirs=_include_dirs,\n language='c++',\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n libraries=libraries,\n library_dirs=library_directories,\n define_macros=macros,\n )]\n\ncompiler_directives = {\"language_level\": 3, \"embedsignature\": True}\npackages = find_packages(include=[\"pycylon\", \"pycylon.*\"])\n\nprint(\"PACKAGES: \" + str(packages))\n\nsetup(\n name=\"pycylon\",\n packages=packages,\n version=versioneer.get_version(),\n setup_requires=[\"cython\",\n \"setuptools\",\n \"numpy\",\n ],\n ext_modules=cythonize(\n extensions,\n nthreads=nthreads,\n compiler_directives=dict(\n profile=False, language_level=3, embedsignature=True\n ),\n compile_time_env=compile_time_env,\n ),\n package_data=dict.fromkeys(find_packages(include=[\"pycylon*\"]), [\"*.pxd\"], ),\n python_requires='>=3.7',\n install_requires=[\n 'numpy',\n f'pyarrow=={pyarrow_version}',\n 'cython',\n ],\n zip_safe=False\n)\nprint(\"Pycylon setup done!\")\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gudgud96/AI-Music-Composition | [
"626f4713b0d60400ce2d588d6d96e9dd98f74625"
] | [
"note/chord_to_note_generator.py"
] | [
"'''\nAuthor: Tan Hao Hao\nProject: deeppop\nPurpose: Chord to note generator.\n'''\nimport pickle\nimport sys,os\nsys.path.append('..')\n\nfrom keras import backend as K\nfrom dataset.data_pipeline import DataPipeline\nfrom models.model_builder import ModelBuilder\nfrom tools.utils import piano_roll_to_pretty_midi, chord_index_to_piano_roll, convert_chord_indices_to_embeddings\nimport numpy as np\n\n\nclass ChordToNoteGenerator:\n def __init__(self):\n self.X_train = None\n self.Y_train = None\n self.X_test = None\n self.Y_test = None\n self.model = None\n self.model_name = None\n\n def train_chord_to_melody_model(self, tt_split=0.9, epochs=100, model_name='basic_rnn'):\n '''\n Train model step - model takes in chord piano roll and outputs melody piano roll.\n :param tt_split: train test split\n :param epochs: number of epochs to train\n :param model_name: specify which model we are training\n :return: None. Model is assigned as self.model for this generator\n '''\n\n # Train test split\n self.__prepare_data_tt_splited(tt_split=tt_split, model_name=model_name, src=\"nottingham-embed\")\n # print('Chords shape: {} Melodies shape: {}'.format(chords.shape, melodies.shape))\n\n # Load / train model\n if model_name == 'basic_rnn':\n if os.path.exists(\"basic_rnn.h5\"):\n mb = ModelBuilder(self.X_train, self.Y_train, self.X_test, self.Y_test)\n model = mb.build_basic_rnn_model(input_dim=self.X_train.shape[1:])\n model.load_weights(\"basic_rnn.h5\")\n else:\n mb = ModelBuilder(self.X_train, self.Y_train, self.X_test, self.Y_test)\n model = mb.build_attention_bidirectional_rnn_model(input_dim=self.X_train.shape[1:])\n model = mb.train_model(model, epochs, loss=\"categorical_crossentropy\")\n model.save_weights(\"basic_rnn.h5\")\n\n self.model = model\n\n def load_model(self, model_name, tt_split=0.9, is_fast_load=True):\n # clear session to avoid any errors\n K.clear_session()\n\n print(\"Chosen model: {}\".format(model_name))\n\n if not is_fast_load:\n # Train test split\n if model_name == 'bidem' or model_name == 'attention' or model_name == \"bidem_preload\":\n self.__prepare_data_tt_splited(tt_split=tt_split, model_name=model_name, src='nottingham-embed')\n print('Chords shape: {} Melodies shape: {}'.format(self.X_train.shape, self.Y_train.shape))\n else:\n self.__prepare_data_tt_splited(tt_split=tt_split, model_name=model_name, src='nottingham')\n print('Chords shape: {} Melodies shape: {}'.format(self.X_train.shape, self.Y_train.shape))\n\n if is_fast_load:\n mb = ModelBuilder(None, None, None, None)\n else:\n mb = ModelBuilder(self.X_train, self.Y_train, self.X_test, self.Y_test)\n\n if model_name == 'basic_rnn_normalized':\n self.model = mb.build_basic_rnn_model(input_dim=(1200, 128))\n weights_path = '../note/active_models/basic_rnn_weights_500.h5'\n print('Loading ' + weights_path + '...')\n self.model.load_weights(weights_path)\n\n elif model_name == 'basic_rnn_unnormalized':\n self.model = mb.build_basic_rnn_model(input_dim=(1200, 128))\n weights_path = '../note/active_models/basic_rnn_weights_500_unnormalized.h5'\n print('Loading ' + weights_path + '...')\n self.model.load_weights(weights_path)\n\n elif model_name == 'bidem':\n self.model = mb.build_bidirectional_rnn_model(input_dim=(1200,))\n weights_path = '../note/active_models/bidem_weights_500.h5'\n print('Loading ' + weights_path + '...')\n self.model.load_weights(weights_path)\n\n elif model_name == 'bidem_regularized':\n self.model = mb.build_bidirectional_rnn_model_no_embeddings(input_dim=(1200,1))\n weights_path = '../note/active_models/bidirectional_regularized_500.h5'\n print('Loading ' + weights_path + '...')\n self.model.load_weights(weights_path)\n\n elif model_name == 'attention':\n self.model = mb.build_attention_bidirectional_rnn_model(input_dim=(1200,))\n weights_path = '../note/active_models/attention_weights_1000.h5'\n print('Loading ' + weights_path + '...')\n self.model.load_weights(weights_path)\n\n elif model_name == 'bidem_preload':\n self.model = mb.build_bidirectional_rnn_model_no_embeddings(input_dim=(None, 32))\n weights_path = '../note/active_models/bidirectional_embedding_preload_100.h5'\n print('Loading ' + weights_path + '...')\n self.model.load_weights(weights_path)\n\n else:\n print('No model name: {}'.format(model_name))\n return\n\n self.model_name = model_name\n\n def generate_notes_from_chord(self, chords, train_loss='softmax', is_bidem=True, is_return=False):\n '''\n Generate notes from chords in test set, need to specify index.\n :param chords: chord piano roll - (128, x)\n :return: None. Write Midi out as melody.mid.\n '''\n # Preprocessing\n print(self.model_name)\n if self.model_name == \"bidem_preload\":\n chords = convert_chord_indices_to_embeddings(chords)\n elif self.model_name == \"bidem_regularized\":\n chords = np.expand_dims(chords, axis=-1)\n\n # Prediction\n if is_bidem:\n y = self.model.predict(np.expand_dims(chords, axis=0))\n else:\n y = self.model.predict(np.expand_dims(np.transpose(chords, (1,0)), axis=0))\n\n # Handle probabilities according to training loss used\n if train_loss == 'softmax':\n b = np.zeros_like(y)\n b[np.arange(len(y)), np.arange(len(y[0])), y.argmax(2)] = 1\n\n # Matrix to piano roll\n y = np.transpose(np.squeeze(b), (1,0))\n y[y > 0] = 90\n chords[chords > 0] = 90\n\n # Write out as midi\n y_midi = piano_roll_to_pretty_midi(y, fs=12)\n y_midi.write('melody.mid')\n\n if is_return:\n return y\n\n def __get_raw_data(self, src='nottingham', model_name='basic_rnn'):\n '''\n Get raw data depending on data source and model.\n :param src: Data source includes 'nottingham' and 'lakh'.\n :param model_name: Model includes 'basic_rnn'.\n :return:\n '''\n if src == 'nottingham':\n dp = DataPipeline()\n chords, melodies = dp.get_nottingham_piano_roll(is_small_set=True, is_shifted=False)\n\n # plt.imshow(chords[0])\n # plt.show()\n # plt.imshow(melodies[0])\n # plt.show()\n\n chords[chords > 0] = 1\n melodies[melodies > 0] = 1\n csparsity = 1.0 - np.count_nonzero(chords) / chords.size\n msparsity = 1.0 - np.count_nonzero(melodies) / melodies.size\n print(csparsity, msparsity)\n cshape, mshape = chords.shape, melodies.shape\n chords, melodies = self.__process_raw_data(chords, melodies, model=model_name)\n print(chords.shape, melodies.shape)\n\n elif src == 'nottingham-embed':\n dp = DataPipeline()\n chords, melodies = dp.get_nottingham_embed(is_small_set=True)\n melodies[melodies > 0] = 1\n cshape, mshape = chords.shape, melodies.shape\n print(chords.shape, melodies.shape)\n\n return chords, melodies\n\n def __process_raw_data(self, chords, melodies, model='basic_rnn'):\n if model == 'basic_rnn':\n chords, melodies = np.transpose(chords, (0, 2, 1)), np.transpose(melodies, (0, 2, 1))\n elif model == 'basic_rnn_embed':\n melodies = np.transpose(melodies, (0, 2, 1))\n\n return chords, melodies\n\n def __prepare_data_tt_splited(self, tt_split=0.9, src='nottingham', model_name='basic_rnn'):\n chords, melodies = self.__get_raw_data(src=src, model_name=model_name)\n if src == 'nottingham-embed':\n chords, melodies = self.__process_raw_data(chords, melodies, model='basic_rnn_embed')\n else:\n chords, melodies = self.__process_raw_data(chords, melodies, model='basic_rnn')\n split_ind = int(tt_split * len(chords))\n self.X_train, self.Y_train, self.X_test, self.Y_test = chords[:split_ind], \\\n melodies[:split_ind], \\\n chords[split_ind:], \\\n melodies[split_ind:]\n\n\ndef evaluate_preload_embeddings():\n model_name = \"bidem\"\n generator = ChordToNoteGenerator()\n generator.load_model(model_name=model_name, is_fast_load=False)\n\n ind = 6\n # chords = np.transpose(generator.X_test[ind], (1,0))\n chords = generator.X_test[ind]\n print(chords.shape)\n from utils import piano_roll_to_pretty_midi\n\n chords_temp = np.copy(chords)\n chord_midi = piano_roll_to_pretty_midi(chord_index_to_piano_roll(chords_temp), fs=12)\n chord_midi.write('chord.mid')\n\n # this is only for embedding preload\n if model_name == \"bidem_preload\":\n chords_embeddings = []\n infile = open('../dataset/chord_embeddings_dict.pickle', 'rb')\n embedding_dict = pickle.load(infile)\n for chord in chords:\n if chord == 0:\n chords_embeddings.append(np.zeros((32,)))\n else:\n chords_embeddings.append(embedding_dict[chord])\n chords = np.array(chords_embeddings)\n\n chords = np.expand_dims(chords, axis=-1)\n\n generator.generate_notes_from_chord(chords=chords)\n from generator.song_generator import merge_melody_with_chords\n merge_melody_with_chords('melody.mid', 'chord.mid', 'song.mid')\n\n actual = np.transpose(generator.Y_test[ind], (1, 0))\n actual[actual > 0] = 90\n a_midi = piano_roll_to_pretty_midi(actual, fs=12)\n a_midi.write('actual.mid')\n merge_melody_with_chords('actual.mid', 'chord.mid', 'song-actual.mid')\n\n\ndef beam_search():\n cg = ChordToNoteGenerator()\n cg.train_chord_to_melody_model(model_name=\"seq2seq\", epochs=3)\n\n\nif __name__ == \"__main__\":\n # evaluate_preload_embeddings()\n beam_search()"
] | [
[
"numpy.expand_dims",
"numpy.squeeze",
"numpy.copy",
"numpy.zeros_like",
"numpy.count_nonzero",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KumarLabJax/JABS-behavior-classifier | [
"8c038a7510ae08d90418403a723e396344bb671c"
] | [
"src/classifier/classifier.py"
] | [
"import random\nimport typing\nfrom enum import IntEnum\nfrom importlib import import_module\nfrom pathlib import Path\nimport joblib\n\nimport numpy as np\nfrom sklearn.ensemble import (\n RandomForestClassifier,\n GradientBoostingClassifier\n)\nfrom sklearn.metrics import (\n accuracy_score,\n precision_recall_fscore_support,\n confusion_matrix\n)\nfrom sklearn.model_selection import train_test_split, LeaveOneGroupOut\n\nfrom src.project import TrackLabels\nfrom src.project import ProjectDistanceUnit\n\n_VERSION = 3\n\n\nclass ClassifierType(IntEnum):\n RANDOM_FOREST = 1\n GRADIENT_BOOSTING = 2\n XGBOOST = 3\n\n\n_classifier_choices = [\n ClassifierType.RANDOM_FOREST,\n ClassifierType.GRADIENT_BOOSTING\n]\n\ntry:\n _xgboost = import_module(\"xgboost\")\n # we were able to import xgboost, make it available as an option:\n _classifier_choices.append(ClassifierType.XGBOOST)\nexcept Exception:\n # we were unable to import the xgboost module. It's either not\n # installed (it should be if the user used our requirements.txt)\n # or it may have been unable to be imported due to a missing\n # libomp. Either way, we won't add it to the available choices and\n # we can otherwise ignore this exception\n _xgboost = None\n\n\nclass Classifier:\n LABEL_THRESHOLD = 20\n\n _classifier_names = {\n ClassifierType.RANDOM_FOREST: \"Random Forest\",\n ClassifierType.GRADIENT_BOOSTING: \"Gradient Boosting\",\n ClassifierType.XGBOOST: \"XGBoost\"\n }\n\n def __init__(self, classifier=ClassifierType.RANDOM_FOREST, n_jobs=1):\n \"\"\"\n :param classifier: type of classifier to use. Must be ClassifierType\n :param n_jobs: number of jobs to use for classifiers that support\n this parameter for parallelism\n enum value. Defaults to ClassifierType.RANDOM_FOREST\n \"\"\"\n\n self._classifier_type = classifier\n self._classifier = None\n self._window_size = None\n self._uses_social = None\n self._extended_features = None\n self._behavior = None\n self._distance_unit = None\n self._n_jobs = n_jobs\n self._version = _VERSION\n\n # make sure the value passed for the classifier parameter is valid\n if classifier not in _classifier_choices:\n raise ValueError(\"Invalid classifier type\")\n\n @property\n def classifier_name(self) -> str:\n \"\"\" return the name of the classifier used as a string \"\"\"\n return self._classifier_names[self._classifier_type]\n\n @property\n def classifier_type(self) -> ClassifierType:\n \"\"\" return classifier type \"\"\"\n return self._classifier_type\n\n @property\n def window_size(self) -> int:\n return self._window_size\n\n @property\n def uses_social(self) -> bool:\n return self._uses_social\n\n @property\n def extended_features(self) -> typing.Dict[str, typing.List[str]]:\n return self._extended_features\n\n @property\n def behavior_name(self) -> str:\n return self._behavior\n\n @property\n def version(self) -> int:\n return self._version\n\n @property\n def distance_unit(self) -> ProjectDistanceUnit:\n \"\"\"\n return the distance unit for the features that were used to train\n this classifier\n \"\"\"\n return self._distance_unit\n\n @staticmethod\n def train_test_split(per_frame_features, window_features, label_data):\n \"\"\"\n split features and labels into training and test datasets\n\n :param per_frame_features: per frame features as returned from\n IdentityFeatures object, filtered to only include labeled frames\n :param window_features: window features as returned from\n IdentityFeatures object, filtered to only include labeled frames\n :param label_data: labels that correspond to the features\n :return: dictionary of training and test data and labels:\n\n {\n 'training_data': list of numpy arrays,\n 'test_data': list of numpy arrays,\n 'training_labels': numpy array,\n 'test_labels': numpy_array,\n }\n \"\"\"\n datasets = []\n\n # add per frame features to our data set\n for feature in sorted(per_frame_features):\n datasets.append(per_frame_features[feature])\n\n # add window features to our data set\n for feature in sorted(window_features):\n if feature == 'percent_frames_present':\n datasets.append(window_features[feature])\n else:\n # [source_feature_name][operator_applied] : numpy array\n # iterate over operator names\n for op in sorted(window_features[feature]):\n # append the numpy array to the dataset\n datasets.append(window_features[feature][op])\n\n # split labeled data and labels\n split_data = train_test_split(np.concatenate(datasets, axis=1),\n label_data)\n\n return {\n 'test_labels': split_data.pop(),\n 'training_labels': split_data.pop(),\n 'training_data': split_data[::2],\n 'test_data': split_data[1::2]\n }\n\n @staticmethod\n def leave_one_group_out(per_frame_features, window_features, labels,\n groups):\n \"\"\"\n implements \"leave one group out\" data splitting strategy\n :param per_frame_features: per frame features for all labeled data\n :param window_features: window features for all labeled data\n :param labels: labels corresponding to each feature row\n :param groups: group id corresponding to each feature row\n :return: dictionary of training and test data and labels:\n {\n 'training_data': list of numpy arrays,\n 'test_data': list of numpy arrays,\n 'training_labels': numpy array,\n 'test_labels': numpy_array,\n }\n \"\"\"\n logo = LeaveOneGroupOut()\n x = Classifier.combine_data(per_frame_features, window_features)\n splits = list(logo.split(x, labels, groups))\n\n # pick random split, make sure we pick a split where the test data\n # has sufficient labels of both classes\n random.shuffle(splits)\n count = 0\n for split in splits:\n\n behavior_count = np.count_nonzero(\n labels[split[1]] == TrackLabels.Label.BEHAVIOR)\n not_behavior_count = np.count_nonzero(\n labels[split[1]] == TrackLabels.Label.NOT_BEHAVIOR)\n\n if (behavior_count >= Classifier.LABEL_THRESHOLD and\n not_behavior_count >= Classifier.LABEL_THRESHOLD):\n count += 1\n yield {\n 'training_labels': labels[split[0]],\n 'training_data': x[split[0]],\n 'test_labels': labels[split[1]],\n 'test_data': x[split[1]],\n 'test_group': groups[split[1]][0]\n }\n\n # number of splits exhausted without finding at least one that meets\n # criteria\n # the UI won't allow us to reach this case\n if count == 0:\n raise ValueError(\"unable to split data\")\n\n def set_classifier(self, classifier):\n \"\"\" change the type of the classifier being used \"\"\"\n if classifier not in _classifier_choices:\n raise ValueError(\"Invalid Classifier Type\")\n self._classifier_type = classifier\n\n def classifier_choices(self):\n \"\"\"\n get the available classifier types\n :return: dict where keys are ClassifierType enum values, and the\n values are string names for the classifiers. example:\n\n {\n <ClassifierType.RANDOM_FOREST: 1>: 'Random Forest',\n <ClassifierType.GRADIENT_BOOSTING: 2>: 'Gradient Boosting',\n <ClassifierType.XGBOOST: 3>: 'XGBoost'\n }\n \"\"\"\n return {\n d: self._classifier_names[d] for d in _classifier_choices\n }\n\n def train(self, data, behavior: str, window_size: int, uses_social: bool,\n extended_features: typing.Dict,\n distance_unit: ProjectDistanceUnit,\n random_seed: typing.Optional[int] = None):\n \"\"\"\n train the classifier\n :param data: dict returned from train_test_split()\n :param behavior: string name of behavior we are training for\n :param window_size: window size used for training\n :param uses_social: does training data include social features?\n :param extended_features: additional features used by classifier\n :param distance_unit: the distance unit used for training\n :param random_seed: optional random seed (used when we want reproducible\n results between trainings)\n :return: None\n\n NOTE: window_size, uses_social, extended_features, and distance_unit\n is used only to verify that a trained classifer can be used\n (check the classifier doesn't use features that are not supported the\n project)\n \"\"\"\n features = data['training_data']\n labels = data['training_labels']\n\n self._uses_social = uses_social\n self._window_size = window_size\n self._behavior = behavior\n self._distance_unit = distance_unit\n self._extended_features = extended_features\n\n if self._classifier_type == ClassifierType.RANDOM_FOREST:\n self._classifier = self._fit_random_forest(features, labels,\n random_seed=random_seed)\n elif self._classifier_type == ClassifierType.GRADIENT_BOOSTING:\n self._classifier = self._fit_gradient_boost(features, labels,\n random_seed=random_seed)\n elif _xgboost is not None and self._classifier_type == ClassifierType.XGBOOST:\n self._classifier = self._fit_xgboost(features, labels,\n random_seed=random_seed)\n else:\n raise ValueError(\"Unsupported classifier\")\n\n def predict(self, features):\n \"\"\"\n predict classes for a given set of features\n \"\"\"\n return self._classifier.predict(features)\n\n def predict_proba(self, features):\n return self._classifier.predict_proba(features)\n\n def save(self, path: Path):\n joblib.dump(self, path)\n\n def load(self, path: Path):\n c = joblib.load(path)\n\n if not isinstance(c, Classifier):\n raise ValueError(\n f\"{path} is not instance of Classifier\")\n\n if c.version != _VERSION:\n raise ValueError(f\"Error deserializing classifier. \"\n f\"File version {c.version}, expected {_VERSION}.\")\n\n # make sure the value passed for the classifier parameter is valid\n if c._classifier_type not in _classifier_choices:\n raise ValueError(\"Invalid classifier type\")\n\n self._classifier = c._classifier\n self._behavior = c._behavior\n self._window_size = c._window_size\n self._uses_social = c._uses_social\n self._classifier_type = c._classifier_type\n self._distance_unit = c._distance_unit\n\n def _update_classifier_type(self):\n # we may need to update the classifier type based on\n # on the type of the loaded object\n if isinstance(self._classifier, RandomForestClassifier):\n self._classifier_type = ClassifierType.RANDOM_FOREST\n elif isinstance(self._classifier, GradientBoostingClassifier):\n self._classifier_type = ClassifierType.GRADIENT_BOOSTING\n else:\n self._classifier_type = ClassifierType.XGBOOST\n\n @staticmethod\n def accuracy_score(truth, predictions):\n return accuracy_score(truth, predictions)\n\n @staticmethod\n def precision_recall_score(truth, predictions):\n return precision_recall_fscore_support(truth, predictions)\n\n @staticmethod\n def confusion_matrix(truth, predictions):\n return confusion_matrix(truth, predictions)\n\n @staticmethod\n def combine_data(per_frame, window):\n \"\"\"\n iterate over feature sets and combine them to create a dataset with the\n shape #frames, #features\n :param per_frame: per frame features dictionary\n :param window: window feature dictionary\n :return: numpy array with shape #frames,#features\n \"\"\"\n datasets = []\n # add per frame features to our data set\n # sort the feature names in the dict so the order is consistent\n for feature in sorted(per_frame):\n datasets.append(per_frame[feature])\n\n # add window features to our data set\n # sort the feature names in the dict so the order is consistent\n for feature in sorted(window):\n # [source_feature_name][operator_applied] : numpy array\n # iterate over operator names\n for op in sorted(window[feature]):\n # append the numpy array to the dataset\n datasets.append(window[feature][op])\n\n # expand any 1D features to 2D so that we can concatenate in one call\n datasets = [(d[:, np.newaxis] if d.ndim == 1 else d) for d in datasets]\n return np.concatenate(datasets, axis=1)\n\n def _fit_random_forest(self, features, labels,\n random_seed: typing.Optional[int] = None):\n if random_seed is not None:\n classifier = RandomForestClassifier(n_jobs=self._n_jobs,\n random_state=random_seed)\n else:\n classifier = RandomForestClassifier(n_jobs=self._n_jobs)\n return classifier.fit(features, labels)\n\n def _fit_gradient_boost(self, features, labels,\n random_seed: typing.Optional[int] = None):\n if random_seed is not None:\n classifier = GradientBoostingClassifier(random_state=random_seed)\n else:\n classifier = GradientBoostingClassifier()\n return classifier.fit(features, labels)\n\n def _fit_xgboost(self, features, labels,\n random_seed: typing.Optional[int] = None):\n if random_seed is not None:\n classifier = _xgboost.XGBClassifier(n_jobs=self._n_jobs,\n random_state=random_seed)\n else:\n classifier = _xgboost.XGBClassifier(n_jobs=self._n_jobs)\n classifier.fit(features, labels)\n return classifier\n\n def print_feature_importance(self, feature_list, limit=20):\n \"\"\"\n print the most important features and their importance\n :param feature_list:\n :param limit:\n :return:\n \"\"\"\n # Get numerical feature importance\n importances = list(self._classifier.feature_importances_)\n # List of tuples with variable and importance\n feature_importance = [(feature, round(importance, 2)) for\n feature, importance in\n zip(feature_list, importances)]\n # Sort the feature importance by most important first\n feature_importance = sorted(feature_importance, key=lambda x: x[1],\n reverse=True)\n # Print out the feature and importance\n print(f\"{'Feature Name':55} Importance\")\n print('-' * 70)\n for feature, importance in feature_importance[:limit]:\n print(f\"{feature:55} {importance:0.2f}\")\n\n @staticmethod\n def label_threshold_met(all_counts: dict, min_groups: int):\n \"\"\"\n determine if the labeling threshold is met\n :param all_counts: labeled frame and bout counts for the entire project\n parameter is a dict with the following form\n {\n '<video name>': [\n (\n <identity>,\n (behavior frame count, not behavior frame count),\n (behavior bout count, not behavior bout count)\n ),\n ]\n }\n\n :param min_groups: minimum number of groups required (more than one\n group is always required for the \"leave one group out\" train/test split,\n but may be more than 2 for k-fold cross validation if k > 2)\n\n \"\"\"\n group_count = 0\n for video, counts in all_counts.items():\n for count in counts:\n if (count[1][0] >= Classifier.LABEL_THRESHOLD and\n count[1][1] >= Classifier.LABEL_THRESHOLD):\n group_count += 1\n\n return True if 1 < group_count >= min_groups else False\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.confusion_matrix",
"numpy.concatenate",
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.count_nonzero",
"sklearn.model_selection.LeaveOneGroupOut",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sumugit/DiCE | [
"247a7ba2d933e0bdde337421b660875a6ae0e194"
] | [
"dice_ml/explainer_interfaces/dice_KD.py"
] | [
"\"\"\"\nModule to generate counterfactual explanations from a KD-Tree\nThis code is similar to 'Interpretable Counterfactual Explanations Guided by Prototypes': https://arxiv.org/pdf/1907.02584.pdf\n\"\"\"\nfrom dice_ml.explainer_interfaces.explainer_base import ExplainerBase\nimport numpy as np\nimport timeit\nimport pandas as pd\nimport copy\n\nfrom dice_ml import diverse_counterfactuals as exp\nfrom dice_ml.constants import ModelTypes\n\n\nclass DiceKD(ExplainerBase):\n\n def __init__(self, data_interface, model_interface):\n \"\"\"Init method\n\n :param data_interface: an interface class to access data related params.\n :param model_interface: an interface class to access trained ML model.\n\n \"\"\"\n self.total_random_inits = 0\n super().__init__(data_interface) # initiating data related parameters\n\n # As DiCE KD uses one-hot-encoding\n self.data_interface.create_ohe_params()\n\n # initializing model variables\n self.model = model_interface\n self.model.load_model() # loading pickled trained model if applicable\n self.model.transformer.feed_data_params(data_interface)\n self.model.transformer.initialize_transform_func()\n\n # loading trained model\n self.model.load_model()\n\n # number of output nodes of ML model\n if self.model.model_type == ModelTypes.Classifier:\n self.num_output_nodes = self.model.get_num_output_nodes2(\n self.data_interface.data_df[0:1][self.data_interface.feature_names])\n\n self.predicted_outcome_name = self.data_interface.outcome_name + '_pred'\n\n def _generate_counterfactuals(self, query_instance, total_CFs, desired_range=None, desired_class=\"opposite\",\n features_to_vary=\"all\",\n permitted_range=None, sparsity_weight=1,\n feature_weights=\"inverse_mad\", stopping_threshold=0.5, posthoc_sparsity_param=0.1,\n posthoc_sparsity_algorithm=\"linear\", verbose=False):\n \"\"\"Generates diverse counterfactual explanations\n\n :param query_instance: A dictionary of feature names and values. Test point of interest.\n :param total_CFs: Total number of counterfactuals required.\n :param desired_range: For regression problems. Contains the outcome range to generate counterfactuals in.\n :param desired_class: Desired counterfactual class - can take 0 or 1. Default value is \"opposite\" to the\n outcome class of query_instance for binary classification.\n :param features_to_vary: Either a string \"all\" or a list of feature names to vary.\n :param permitted_range: Dictionary with continuous feature names as keys and permitted min-max range in\n list as values. Defaults to the range inferred from training data.\n If None, uses the parameters initialized in data_interface.\n :param sparsity_weight: Parameter to determine how much importance to give to sparsity\n :param feature_weights: Either \"inverse_mad\" or a dictionary with feature names as keys and corresponding\n weights as values. Default option is \"inverse_mad\" where the weight for a continuous\n feature is the inverse of the Median Absolute Devidation (MAD) of the feature's\n values in the training set; the weight for a categorical feature is equal to 1 by default.\n :param stopping_threshold: Minimum threshold for counterfactuals target class probability.\n :param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.\n :param posthoc_sparsity_algorithm: Perform either linear or binary search. Takes \"linear\" or \"binary\".\n Prefer binary search when a feature range is large (for instance, income\n varying from 10k to 1000k) and only if the features share a monotonic\n relationship with predicted outcome in the model.\n :param verbose: Parameter to determine whether to print 'Diverse Counterfactuals found!'\n\n :return: A CounterfactualExamples object to store and visualize the resulting counterfactual explanations\n (see diverse_counterfactuals.py).\n \"\"\"\n data_df_copy = self.data_interface.data_df.copy()\n\n features_to_vary = self.setup(features_to_vary, permitted_range, query_instance, feature_weights)\n\n # Prepares user defined query_instance for DiCE.\n query_instance_orig = query_instance.copy()\n query_instance = self.data_interface.prepare_query_instance(query_instance=query_instance)\n\n # find the predicted value of query_instance\n test_pred = self.predict_fn(query_instance)[0]\n\n query_instance[self.data_interface.outcome_name] = test_pred\n desired_class = self.misc_init(stopping_threshold, desired_class, desired_range, test_pred)\n if desired_range is not None:\n if desired_range[0] > desired_range[1]:\n raise ValueError(\"Invalid Range!\")\n\n if desired_class == \"opposite\" and self.model.model_type == ModelTypes.Classifier:\n if self.num_output_nodes == 2:\n desired_class = 1.0 - test_pred\n\n elif self.num_output_nodes > 2:\n raise ValueError(\"Desired class can't be opposite if the number of classes is more than 2.\")\n\n if isinstance(desired_class, int) and desired_class > self.num_output_nodes - 1:\n raise ValueError(\"Desired class should be within 0 and num_classes-1.\")\n\n # Partitioned dataset and KD Tree for each class (binary) of the dataset\n self.dataset_with_predictions, self.KD_tree, self.predictions = \\\n self.build_KD_tree(data_df_copy, desired_range, desired_class, self.predicted_outcome_name)\n\n query_instance, cfs_preds = self.find_counterfactuals(data_df_copy,\n query_instance, query_instance_orig,\n desired_range,\n desired_class,\n total_CFs, features_to_vary,\n permitted_range,\n sparsity_weight,\n stopping_threshold,\n posthoc_sparsity_param,\n posthoc_sparsity_algorithm, verbose)\n self.cfs_preds = cfs_preds\n\n return exp.CounterfactualExamples(data_interface=self.data_interface,\n final_cfs_df=self.final_cfs_df,\n test_instance_df=query_instance,\n final_cfs_df_sparse=self.final_cfs_df_sparse,\n posthoc_sparsity_param=posthoc_sparsity_param,\n desired_range=desired_range,\n desired_class=desired_class,\n model_type=self.model.model_type)\n\n def predict_fn(self, input_instance):\n \"\"\"returns predictions\"\"\"\n return self.model.get_output(input_instance, model_score=False)\n\n def do_sparsity_check(self, cfs, query_instance, sparsity_weight):\n cfs = cfs.assign(sparsity=np.nan, distancesparsity=np.nan)\n for index, row in cfs.iterrows():\n cnt = 0\n for column in self.data_interface.continuous_feature_names:\n if not np.isclose(row[column], query_instance[column].values[0]):\n cnt += 1\n for column in self.data_interface.categorical_feature_names:\n if row[column] != query_instance[column].values[0]:\n cnt += 1\n\n cfs.at[index, \"sparsity\"] = cnt\n\n cfs[\"distance\"] = (cfs[\"distance\"] - cfs[\"distance\"].min()) / (cfs[\"distance\"].max() - cfs[\"distance\"].min())\n cfs[\"sparsity\"] = (cfs[\"sparsity\"] - cfs[\"sparsity\"].min()) / (cfs[\"sparsity\"].max() - cfs[\"sparsity\"].min())\n cfs[\"distancesparsity\"] = cfs[\"distance\"] + sparsity_weight * cfs[\"sparsity\"]\n cfs = cfs.sort_values(by=\"distancesparsity\")\n cfs = cfs.drop([\"distance\", \"sparsity\", \"distancesparsity\"], axis=1)\n\n return cfs\n\n def vary_valid(self, KD_query_instance, total_CFs, features_to_vary, permitted_range, query_instance,\n sparsity_weight):\n \"\"\"This function ensures that we only vary features_to_vary when generating counterfactuals\"\"\"\n\n # TODO: this should be a user-specified parameter\n num_queries = min(len(self.dataset_with_predictions), total_CFs * 10)\n cfs = []\n\n if self.KD_tree is not None and num_queries > 0:\n KD_tree_output = self.KD_tree.query(KD_query_instance, num_queries)\n distances = KD_tree_output[0][0]\n indices = KD_tree_output[1][0]\n\n cfs = self.dataset_with_predictions.iloc[indices].copy()\n cfs['distance'] = distances\n cfs = self.do_sparsity_check(cfs, query_instance, sparsity_weight)\n cfs = cfs.drop(self.data_interface.outcome_name, axis=1)\n\n self.final_cfs = pd.DataFrame()\n final_indices = []\n cfs_preds = []\n total_cfs_found = 0\n\n # Iterating through the closest points from the KD tree and checking if any of these are valid\n if self.KD_tree is not None and total_CFs > 0:\n cfs = cfs.reset_index(drop=True)\n for i in range(len(cfs)):\n if total_cfs_found == total_CFs:\n break\n valid_cf_found = True\n for feature in self.data_interface.feature_names:\n if feature not in features_to_vary and cfs[feature].iat[i] != query_instance[feature].values[0]:\n valid_cf_found = False\n break\n if feature in self.data_interface.continuous_feature_names:\n if not self.feature_range[feature][0] <= cfs[feature].iat[i] <= self.feature_range[feature][1]:\n valid_cf_found = False\n break\n else:\n if not cfs[feature].iat[i] in self.feature_range[feature]:\n valid_cf_found = False\n break\n\n if valid_cf_found:\n if not self.duplicates(cfs, final_indices.copy(), i):\n total_cfs_found += 1\n final_indices.append(i)\n if total_cfs_found > 0:\n self.final_cfs = cfs.iloc[final_indices]\n self.final_cfs = self.final_cfs.drop([self.predicted_outcome_name], axis=1)\n # Finding the predicted outcome for each cf\n for i in range(total_cfs_found):\n cfs_preds.append(\n self.dataset_with_predictions.iloc[final_indices[i]][self.predicted_outcome_name])\n\n return self.final_cfs[:total_CFs], cfs_preds\n\n def duplicates(self, cfs, final_indices, i):\n final_indices.append(i)\n temp_cfs = cfs.iloc[final_indices]\n return temp_cfs.duplicated().iloc[-1]\n\n def find_counterfactuals(self, data_df_copy, query_instance, query_instance_orig, desired_range, desired_class,\n total_CFs, features_to_vary, permitted_range,\n sparsity_weight, stopping_threshold, posthoc_sparsity_param, posthoc_sparsity_algorithm,\n verbose):\n \"\"\"Finds counterfactuals by querying a K-D tree for the nearest data points in the desired class from the dataset.\"\"\"\n\n start_time = timeit.default_timer()\n\n # Making the one-hot-encoded version of query instance match the one-hot encoded version of the dataset\n query_instance_df_dummies = pd.get_dummies(query_instance_orig)\n for col in pd.get_dummies(data_df_copy[self.data_interface.feature_names]).columns:\n if col not in query_instance_df_dummies.columns:\n query_instance_df_dummies[col] = 0\n\n self.final_cfs, cfs_preds = self.vary_valid(query_instance_df_dummies,\n total_CFs,\n features_to_vary,\n permitted_range,\n query_instance_orig,\n sparsity_weight)\n\n total_cfs_found = len(self.final_cfs)\n if total_cfs_found > 0:\n # post-hoc operation on continuous features to enhance sparsity - only for public data\n if posthoc_sparsity_param is not None and posthoc_sparsity_param > 0 and 'data_df' in self.data_interface.__dict__:\n self.final_cfs_df_sparse = copy.deepcopy(self.final_cfs)\n self.final_cfs_df_sparse = self.do_posthoc_sparsity_enhancement(self.final_cfs_df_sparse, query_instance,\n posthoc_sparsity_param,\n posthoc_sparsity_algorithm)\n else:\n self.final_cfs_df_sparse = None\n else:\n self.final_cfs_df_sparse = None\n\n self.final_cfs_df = self.final_cfs\n if total_cfs_found > 0:\n self.round_to_precision()\n\n self.elapsed = timeit.default_timer() - start_time\n\n m, s = divmod(self.elapsed, 60)\n\n if verbose:\n if total_cfs_found < total_CFs:\n self.elapsed = timeit.default_timer() - start_time\n m, s = divmod(self.elapsed, 60)\n print('Only %d (required %d) ' % (total_cfs_found, self.total_CFs),\n 'Diverse Counterfactuals found for the given configuation, perhaps ',\n 'change the query instance or the features to vary...' '; total time taken: %02d' % m,\n 'min %02d' % s, 'sec')\n else:\n print('Diverse Counterfactuals found! total time taken: %02d' % m, 'min %02d' % s, 'sec')\n\n return query_instance, cfs_preds\n"
] | [
[
"numpy.isclose",
"pandas.DataFrame",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ishine/TCAN | [
"4e0dab3a6b0e2b450e16ccf912e13e25093dfd87"
] | [
"utils/dataset.py"
] | [
"import os\nimport torch\nimport pickle\nimport unidecode\nimport observations\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom torch.utils import data\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\n\nimport logging\nfrom IPython import embed\n\n\nclass RawDataset(data.Dataset):\n def __init__(self, dir_data_root, dataset_name, task, seq_len, valid_len, is_corpus=True, is_permute=False, seed=1111):\n super(RawDataset, self).__init__() \n self.is_permute = is_permute\n if is_permute:\n torch.manual_seed(seed)\n self.permute = torch.Tensor(np.random.permutation(784).astype(np.float64)).long()\n self.dataset_name = dataset_name\n self.seq_len = seq_len\n self.data_all, self.label_all, self.n_dict, self.dictionary = self._get_data(dir_data_root, dataset_name, task, seq_len, valid_len, is_corpus)\n \n def __getitem__(self, index):\n data = Variable(self.data_all[index])\n if self.dataset_name == 'mnist':\n data = data.view(1, 784).float()\n # data = data.view(784).long()\n if self.is_permute:\n data = data[:, self.permute]\n label = Variable(self.label_all[index])\n return data, label\n\n def __len__(self):\n return len(self.data_all)\n \n def _get_data(self, dir_data_root, dataset_name, task, seq_len, valid_len, is_corpus):\n dir_data = os.path.join(dir_data_root, dataset_name)\n if dataset_name == 'penn':\n if os.path.exists(dir_data + \"/corpus\") and is_corpus:\n corpus = pickle.load(open(dir_data + '/corpus', 'rb'))\n else:\n corpus = Corpus_word(dir_data)\n pickle.dump(corpus, open(dir_data + '/corpus', 'wb'))\n \n elif dataset_name == 'char_penn':\n dir_data = os.path.join(dir_data_root, dataset_name)\n if os.path.exists(dir_data + \"/corpus\") and is_corpus:\n\n corpus = pickle.load(open(dir_data + '/corpus', 'rb'))\n else:\n file, testfile, valfile = getattr(observations, 'ptb')(dir_data)\n corpus = Corpus_char(file + \" \" + valfile + \" \" + testfile)\n corpus.train = char_tensor(corpus, file)\n corpus.valid = char_tensor(corpus, valfile)\n corpus.test = char_tensor(corpus, testfile)\n pickle.dump(corpus, open(dir_data + '/corpus', 'wb'))\n\n elif dataset_name == 'mnist':\n corpus = Corpus_mnist(dir_data)\n\n elif dataset_name == 'wikitext-2':\n if os.path.exists(dir_data + \"/corpus\") and is_corpus:\n corpus = pickle.load(open(dir_data + '/corpus', 'rb'))\n else:\n corpus = Corpus_word(dir_data)\n pickle.dump(corpus, open(dir_data + '/corpus', 'wb'))\n \n elif dataset_name == 'wikitext-103':\n if os.path.exists(dir_data + \"/corpus\") and is_corpus:\n corpus = pickle.load(open(dir_data + '/corpus', 'rb'))\n else:\n corpus = Corpus_word(dir_data)\n pickle.dump(corpus, open(dir_data + '/corpus', 'wb'))\n\n n_dict = len(corpus.dictionary)\n dictionary = corpus.dictionary\n if task == 'train':\n data_task = corpus.train\n elif task == 'valid':\n data_task = corpus.valid\n elif task == 'test':\n data_task = corpus.test\n\n if self.dataset_name == 'mnist':\n if task == 'valid':\n task = 'test'\n # return getattr(data_task, '{}_data'.format(task))[:640], getattr(data_task, '{}_labels'.format(task))[:640], n_dict\n return getattr(data_task, '{}_data'.format(task)), getattr(data_task, '{}_labels'.format(task)), n_dict\n\n num_data = data_task.size(0) // valid_len\n data_all, label_all = [], []\n for i in range(num_data):\n if i*valid_len+seq_len+1 > data_task.size(0):\n break\n data_all.append(data_task[i*valid_len:i*valid_len+seq_len])\n label_all.append(data_task[i*valid_len+1:i*valid_len+seq_len+1])\n\n return data_all, label_all, n_dict, dictionary\n\n\ndef char_tensor(corpus, string):\n tensor = torch.zeros(len(string)).long()\n for i in tqdm(range(len(string)), ncols=80):\n tensor[i] = corpus.dictionary.char2idx[string[i]]\n return Variable(tensor)\n\n\nclass Dictionary_char(object):\n def __init__(self):\n self.char2idx = {}\n self.idx2char = []\n self.counter = Counter()\n\n def add_word(self, char):\n self.counter[char] += 1\n\n def prep_dict(self):\n for char in self.counter:\n if char not in self.char2idx:\n self.idx2char.append(char)\n self.char2idx[char] = len(self.idx2char) - 1\n\n def __len__(self):\n return len(self.idx2char)\n\n\nclass Corpus_char(object):\n def __init__(self, string):\n self.dictionary = Dictionary_char()\n for c in string:\n self.dictionary.add_word(c)\n self.dictionary.prep_dict()\n self.train = None\n self.valid = None\n self.test = None\n\n\nclass Corpus_word(object):\n def __init__(self, path):\n self.dictionary = Dictionary_word()\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids\n\nclass Dictionary_word(object):\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\nclass Corpus_mnist(object):\n def __init__(self, path):\n self.dictionary = list(range(10))\n self.train = datasets.MNIST(root=path, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n self.valid = datasets.MNIST(root=path, train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n self.test = datasets.MNIST(root=path, train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n\nif __name__ == '__main__':\n dir_data_root = '../data'\n dataset_name = 'char_penn'\n task = 'train'\n batch_size = 16\n seq_len = 80\n valid_len = 40\n rawdataset = RawDataset(dir_data_root, dataset_name, task, seq_len, valid_len)\n embed()\n total = 0\n for _ in rawdataset:\n total += 1\n print(total)\n\n"
] | [
[
"torch.manual_seed",
"torch.LongTensor",
"numpy.random.permutation",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qkirikigaku/Parallelized_LDA | [
"2256f4b583917bafb53d01759c8f47c2100b0c1e"
] | [
"Drawing/comparison_K.py"
] | [
"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n args = sys.argv\n \"\"\"args[1]:data_type\"\"\"\n K = int(args[2])\n\n files = (K-1)*[0]\n i = 2\n while(i < K+1):\n string = 'result/data' + args[1] + '/result_k'\n if(i <= 9):\n string += '0' + str(i) + '.txt'\n files[i-2] = string\n else:\n string += str(i) + '.txt'\n files[i-2] = string\n i += 1;\n\n ELBO = np.zeros([K-1])\n\n labels = (K-1)*[0]\n for i in range(K-1):\n data = open(files[i])\n elbo = data.readline()\n elbo = elbo.replace(\"\\n\",\"\")\n ELBO[i] = float(elbo)\n labels[i] = 'topic' + str(i+2)\n\n max_el = 0\n semi_el = 0\n for i in range(K-1):\n if(ELBO[i] > ELBO[max_el]):\n semi_el = max_el\n max_el = i\n elif(ELBO[i] > ELBO[semi_el]):\n semi_el = i\n\n el_colors = (K-1)*[\"b\"]\n el_colors[max_el] = \"r\"\n el_colors[semi_el] = \"g\"\n\n left = np.arange(1,K,1)\n height = ELBO.copy()\n\n plt.bar(left,height, align = \"center\", color = el_colors)\n plt.title(\"Variational lower bound of each signature number\")\n plt.xlabel(\"signature numbers\")\n plt.ylabel(\"Variational lower bound\")\n plt.xticks(left, labels, rotation = 90, fontsize = \"small\")\n plt.tight_layout()\n name = 'result/data' + args[1] + '/figure/ELBO.png'\n plt.savefig(name)\n plt.close(1)\n\n plt.figure()\n\n plt.bar(left,height, align = \"center\", color = el_colors)\n plt.title(\"Variational lower bound of each signature number\")\n plt.xlabel(\"signature numbers\")\n plt.ylabel(\"Variational lower bound\")\n plt.xticks(left, labels, rotation = 90, fontsize = \"small\")\n y_max =max(height) * 0.99\n y_min =min(height) * 1.01\n plt.ylim(ymax = y_max, ymin = y_min)\n plt.tight_layout()\n name = 'result/data' + args[1] + '/figure/ELBO_a.png'\n plt.savefig(name)\n plt.close(1)\n\n plt.figure()\n plt.bar(left[max_el-10:], height[max_el-10:], align = 'center', color = el_colors[max_el-10:])\n plt.title(\"Variational lower bound of each signature number\")\n plt.xlabel(\"signature numbers\")\n plt.ylabel(\"Variational lower bound\")\n plt.xticks(left[max_el-10:], labels[max_el-10:], rotation = 90, fontsize = \"small\")\n y_max =max(height) + 100\n y_min =min(height[max_el-10:]) - 100\n plt.ylim(ymax = y_max, ymin = y_min)\n plt.tight_layout()\n name = 'result/data' + args[1] + '/figure/ELBO_b.png'\n plt.savefig(name)\n plt.close(1)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bingrao/Bug-Transformer | [
"9e39dc553c281f6372b7a8cfc8205aa186645899",
"9e39dc553c281f6372b7a8cfc8205aa186645899"
] | [
"onmt/encoders/rnn_encoder.py",
"onmt/modules/global_attention.py"
] | [
"\"\"\"Define RNN-based encoders.\"\"\"\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\n\nfrom onmt.encoders.encoder import EncoderBase\nfrom onmt.utils.rnn_factory import rnn_factory\nimport torch\n\nclass RNNEncoder(EncoderBase):\n \"\"\" A generic recurrent neural network encoder.\n\n Args:\n rnn_type (str):\n style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]\n bidirectional (bool) : use a bidirectional RNN\n num_layers (int) : number of stacked layers\n hidden_size (int) : hidden size of each layer\n dropout (float) : dropout value for :class:`torch.nn.Dropout`\n embeddings (onmt.modules.Embeddings): embedding module to use\n \"\"\"\n\n def __init__(self, rnn_type, bidirectional, num_layers,\n hidden_size, dropout=0.0, embeddings=None,\n use_bridge=False):\n super(RNNEncoder, self).__init__()\n assert embeddings is not None\n\n num_directions = 2 if bidirectional else 1\n assert hidden_size % num_directions == 0\n hidden_size = hidden_size // num_directions\n self.embeddings = embeddings\n\n self.rnn, self.no_pack_padded_seq = \\\n rnn_factory(rnn_type,\n input_size=embeddings.embedding_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout,\n bidirectional=bidirectional)\n\n # Initialize the bridge layer\n self.use_bridge = use_bridge\n if self.use_bridge:\n self._initialize_bridge(rnn_type,\n hidden_size,\n num_layers)\n\n @classmethod\n def from_opt(cls, opt, embeddings):\n \"\"\"Alternate constructor.\"\"\"\n return cls(\n opt.rnn_type,\n opt.brnn,\n opt.enc_layers,\n opt.enc_rnn_size,\n opt.dropout[0] if type(opt.dropout) is list else opt.dropout,\n embeddings,\n opt.bridge)\n\n def forward(self, src, lengths=None, **kwargs):\n \"\"\"See :func:`EncoderBase.forward()`\"\"\"\n self._check_args(src, lengths)\n # [seq_len, batch_size, 1] --> [seq_len, batch_size, dim]\n emb = self.embeddings(src)\n # s_len, batch, emb_dim = emb.size()\n\n position = kwargs.get(\"position\", None)\n packed_emb = emb\n if lengths is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Tensor.\n lengths_list = lengths.view(-1).tolist()\n packed_emb = pack(emb, lengths_list)\n # encoder_final (hidden, cell) -> ([nums_layer*directions, batch_size, dim],\n # [nums_layer*directions, batch_size, dim]) torch.Size([6, 83, 512])\n memory_bank, encoder_final = self.rnn(packed_emb)\n\n if lengths is not None and not self.no_pack_padded_seq:\n # [seq_len, batch_size, dim] torch.Size([47, 83, 512])\n memory_bank = unpack(memory_bank)[0]\n\n if self.use_bridge:\n encoder_final = self._bridge(encoder_final)\n return encoder_final, memory_bank, lengths\n\n def _initialize_bridge(self, rnn_type,\n hidden_size,\n num_layers):\n\n # LSTM has hidden and cell state, other only one\n number_of_states = 2 if rnn_type == \"LSTM\" else 1\n # Total number of states\n self.total_hidden_dim = hidden_size * num_layers\n\n # Build a linear layer for each\n self.bridge = nn.ModuleList([nn.Linear(self.total_hidden_dim,\n self.total_hidden_dim,\n bias=True)\n for _ in range(number_of_states)])\n\n def _bridge(self, hidden):\n \"\"\"Forward hidden state through bridge.\"\"\"\n def bottle_hidden(linear, states):\n \"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)\n\n if isinstance(hidden, tuple): # LSTM\n outs = tuple([bottle_hidden(layer, hidden[ix])\n for ix, layer in enumerate(self.bridge)])\n else:\n outs = bottle_hidden(self.bridge[0], hidden)\n return outs\n\n def update_dropout(self, dropout):\n self.rnn.dropout = dropout\n\n\nclass PathRNNEncoder(EncoderBase):\n \"\"\" A generic recurrent neural network encoder.\n\n Args:\n rnn_type (str):\n style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]\n bidirectional (bool) : use a bidirectional RNN\n num_layers (int) : number of stacked layers\n hidden_size (int) : hidden size of each layer\n dropout (float) : dropout value for :class:`torch.nn.Dropout`\n embeddings (onmt.modules.Embeddings): embedding module to use\n \"\"\"\n\n def __init__(self, rnn_type, bidirectional, num_layers, input_size,\n hidden_size, dropout=0.0, embeddings=None, use_bridge=False):\n super(PathRNNEncoder, self).__init__()\n\n num_directions = 2 if bidirectional else 1\n assert hidden_size % num_directions == 0\n hidden_size = hidden_size // num_directions\n self.embeddings = embeddings\n self.rnn, self.no_pack_padded_seq = rnn_factory(rnn_type,\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout,\n bidirectional=bidirectional)\n\n # Initialize the bridge layer\n self.use_bridge = use_bridge\n if self.use_bridge:\n self._initialize_bridge(rnn_type,\n hidden_size,\n num_layers)\n\n @classmethod\n def from_opt(cls, opt, embeddings):\n \"\"\"Alternate constructor.\"\"\"\n return cls(\n rnn_type='LSTM',\n bidirectional=False,\n num_layers=1,\n input_size=opt.enc_rnn_size,\n hidden_size=opt.enc_rnn_size,\n dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,\n embeddings=embeddings,\n use_bridge=opt.bridge)\n\n def forward(self, src, lengths=None, **kwargs):\n # x_path [b*l, p], torch.Size([3901, 16]), The padded of each AST path\n # x_example_len [b], torch.Size([83]), The number of paths (l) for each example in a batch\n # x_path_len [b*l], The number of AST nodes (k) for each path\n\n x_path, x_example_len, x_path_len = src\n\n x_path_len, perm_idx = x_path_len.sort(0, descending=True)\n x_path = x_path[perm_idx]\n emb = self.embeddings(x_path.unsqueeze(-1)).transpose(0, 1) # torch.Size([1886, 21]) -> torch.Size([21, 1886, 512])\n if lengths is None:\n lengths = x_path_len\n\n packed_emb = emb\n if lengths is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Tensor.\n # https://gist.github.com/HarshTrivedi/f4e7293e941b17d19058f6fb90ab0fec\n lengths_list = lengths.view(-1).tolist()\n packed_emb = pack(emb, lengths_list)\n\n # memory_bank, [p, b*l, dim], torch.Size([21, 1886, 512])\n # state -> [hidden, cell], [1, b*l, dim], torch.Size([1, 1886, 512])\n memory_bank, (final_hidden, final_cell) = self.rnn(packed_emb)\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank, _ = unpack(memory_bank)\n\n sorted_idx, reversed_perm_idx = perm_idx.sort(0)\n final_hidden = final_hidden[:, reversed_perm_idx, :]\n final_cell = final_cell[:, reversed_perm_idx, :]\n memory_bank = memory_bank[:, reversed_perm_idx, :]\n\n if self.use_bridge:\n final_hidden, final_cell = self._bridge((final_hidden, final_cell))\n\n # [batch_size, (l, dim)]\n output_bag = torch.split(final_hidden.squeeze(0),\n x_example_len.cpu().detach().tolist(), dim=0)\n\n src_len = kwargs.get('src_len', 100)\n # src_path_vec, [b, p_l, dim], torch.Size([41, 46, 512])\n src_path_vec = torch.stack([torch.nn.functional.pad(x, pad=[0, 0, 0, src_len - x.size(0)],\n mode='constant', value=0) for x in output_bag])\n\n return src_path_vec, (final_hidden, final_cell)\n\n def _initialize_bridge(self, rnn_type,\n hidden_size,\n num_layers):\n\n # LSTM has hidden and cell state, other only one\n number_of_states = 2 if rnn_type == \"LSTM\" else 1\n # Total number of states\n self.total_hidden_dim = hidden_size * num_layers\n\n # Build a linear layer for each\n self.bridge = nn.ModuleList([nn.Linear(self.total_hidden_dim,\n self.total_hidden_dim,\n bias=True)\n for _ in range(number_of_states)])\n\n def _bridge(self, hidden):\n \"\"\"Forward hidden state through bridge.\"\"\"\n\n def bottle_hidden(linear, states):\n \"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)\n\n if isinstance(hidden, tuple): # LSTM\n outs = tuple([bottle_hidden(layer, hidden[ix])\n for ix, layer in enumerate(self.bridge)])\n else:\n outs = bottle_hidden(self.bridge[0], hidden)\n return outs\n\n def update_dropout(self, dropout):\n self.rnn.dropout = dropout\n",
"\"\"\"Global attention modules (Luong / Bahdanau)\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom onmt.modules.sparse_activations import sparsemax\nfrom onmt.utils.misc import aeq, sequence_mask\n\n# This class is mainly used by decoder.py for RNNs but also\n# by the CNN / transformer decoder when copy attention is used\n# CNN has its own attention mechanism ConvMultiStepAttention\n# Transformer has its own MultiHeadedAttention\n\n\nclass GlobalAttention(nn.Module):\n r\"\"\"\n Global attention takes a matrix and a query vector. It\n then computes a parameterized convex combination of the matrix\n based on the input query.\n\n Constructs a unit mapping a query `q` of size `dim`\n and a source matrix `H` of size `n x dim`, to an output\n of size `dim`.\n\n\n .. mermaid::\n\n graph BT\n A[Query]\n subgraph RNN\n C[H 1]\n D[H 2]\n E[H N]\n end\n F[Attn]\n G[Output]\n A --> F\n C --> F\n D --> F\n E --> F\n C -.-> G\n D -.-> G\n E -.-> G\n F --> G\n\n All models compute the output as\n :math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where\n :math:`a_j` is the softmax of a score function.\n Then then apply a projection layer to [q, c].\n\n However they\n differ on how they compute the attention score.\n\n * Luong Attention (dot, general):\n * dot: :math:`\\text{score}(H_j,q) = H_j^T q`\n * general: :math:`\\text{score}(H_j, q) = H_j^T W_a q`\n\n\n * Bahdanau Attention (mlp):\n * :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)`\n\n\n Args:\n dim (int): dimensionality of query and key\n coverage (bool): use coverage term\n attn_type (str): type of attention to use, options [dot,general,mlp]\n attn_func (str): attention function to use, options [softmax,sparsemax]\n\n \"\"\"\n\n def __init__(self, dim, coverage=False, attn_type=\"dot\",\n attn_func=\"softmax\"):\n super(GlobalAttention, self).__init__()\n\n self.dim = dim\n assert attn_type in [\"dot\", \"general\", \"mlp\"], (\n \"Please select a valid attention type (got {:s}).\".format(\n attn_type))\n self.attn_type = attn_type\n assert attn_func in [\"softmax\", \"sparsemax\"], (\n \"Please select a valid attention function.\")\n self.attn_func = attn_func\n\n if self.attn_type == \"general\":\n self.linear_in = nn.Linear(dim, dim, bias=False)\n elif self.attn_type == \"mlp\":\n self.linear_context = nn.Linear(dim, dim, bias=False)\n self.linear_query = nn.Linear(dim, dim, bias=True)\n self.v = nn.Linear(dim, 1, bias=False)\n # mlp wants it with bias\n out_bias = self.attn_type == \"mlp\"\n self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)\n\n if coverage:\n self.linear_cover = nn.Linear(1, dim, bias=False)\n\n def score(self, h_t, h_s):\n \"\"\"\n Args:\n h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``\n h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``\n\n Returns:\n FloatTensor: raw attention scores (unnormalized) for each src index\n ``(batch, tgt_len, src_len)``\n \"\"\"\n\n # Check input sizes\n src_batch, src_len, src_dim = h_s.size()\n tgt_batch, tgt_len, tgt_dim = h_t.size()\n aeq(src_batch, tgt_batch)\n aeq(src_dim, tgt_dim)\n aeq(self.dim, src_dim)\n\n if self.attn_type in [\"general\", \"dot\"]:\n if self.attn_type == \"general\":\n h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)\n h_t_ = self.linear_in(h_t_)\n h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)\n h_s_ = h_s.transpose(1, 2)\n # (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)\n return torch.bmm(h_t, h_s_)\n else:\n dim = self.dim\n wq = self.linear_query(h_t.view(-1, dim))\n wq = wq.view(tgt_batch, tgt_len, 1, dim)\n wq = wq.expand(tgt_batch, tgt_len, src_len, dim)\n\n uh = self.linear_context(h_s.contiguous().view(-1, dim))\n uh = uh.view(src_batch, 1, src_len, dim)\n uh = uh.expand(src_batch, tgt_len, src_len, dim)\n\n # (batch, t_len, s_len, d)\n wquh = torch.tanh(wq + uh)\n\n return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)\n\n def forward(self, source, memory_bank, memory_lengths=None, coverage=None):\n \"\"\"\n\n Args:\n source (FloatTensor): query vectors ``(batch, tgt_len, dim)``, torch.Size([83, 512])\n memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``, torch.Size([83, 47, 512])\n memory_lengths (LongTensor): the source context lengths ``(batch,)``\n coverage (FloatTensor): None (not supported yet)\n\n Returns:\n (FloatTensor, FloatTensor):\n\n * Computed vector ``(tgt_len, batch, dim)``\n * Attention distribtutions for each query\n ``(tgt_len, batch, src_len)``\n \"\"\"\n\n # one step input\n if source.dim() == 2:\n one_step = True\n source = source.unsqueeze(1)\n else:\n one_step = False\n\n batch, source_l, dim = memory_bank.size()\n batch_, target_l, dim_ = source.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n aeq(self.dim, dim)\n if coverage is not None:\n batch_, source_l_ = coverage.size()\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n if coverage is not None:\n cover = coverage.view(-1).unsqueeze(1)\n memory_bank += self.linear_cover(cover).view_as(memory_bank)\n memory_bank = torch.tanh(memory_bank)\n\n # compute attention scores, as in Luong et al. source: torch.Size([83, 1, 512]), memory_bank: torch.Size([83, 47, 512])\n align = self.score(source, memory_bank) # torch.Size([83, 1, 47])\n\n if memory_lengths is not None:\n mask = sequence_mask(memory_lengths, max_len=align.size(-1)) # torch.Size([83, 47])\n mask = mask.unsqueeze(1) # Make it broadcastable. torch.Size([83, 1, 47])\n align.masked_fill_(~mask, -float('inf'))\n\n # Softmax or sparsemax to normalize attention weights\n if self.attn_func == \"softmax\":\n align_vectors = F.softmax(align.view(batch*target_l, source_l), -1) # torch.Size([83, 47])\n else:\n align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)\n align_vectors = align_vectors.view(batch, target_l, source_l) # torch.Size([83, 1, 47])\n\n # each context vector c_t is the weighted average\n # over all the source hidden states, torch.Size([83, 1, 512])\n c = torch.bmm(align_vectors, memory_bank)\n\n # concatenate, torch.Size([83, 1024])\n concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)\n attn_h = self.linear_out(concat_c).view(batch, target_l, dim) # torch.Size([83, 1, 512])\n if self.attn_type in [\"general\", \"dot\"]:\n attn_h = torch.tanh(attn_h)\n\n if one_step:\n attn_h = attn_h.squeeze(1)\n align_vectors = align_vectors.squeeze(1)\n\n # Check output sizes\n batch_, dim_ = attn_h.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n batch_, source_l_ = align_vectors.size()\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n else:\n attn_h = attn_h.transpose(0, 1).contiguous()\n align_vectors = align_vectors.transpose(0, 1).contiguous()\n # Check output sizes\n target_l_, batch_, dim_ = attn_h.size()\n aeq(target_l, target_l_)\n aeq(batch, batch_)\n aeq(dim, dim_)\n target_l_, batch_, source_l_ = align_vectors.size()\n aeq(target_l, target_l_)\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n return attn_h, align_vectors\n"
] | [
[
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.functional.relu",
"torch.nn.utils.rnn.pack_padded_sequence"
],
[
"torch.tanh",
"torch.nn.Linear",
"torch.bmm",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DmytroSytnyk/hivemind | [
"595b831bcaac6b4d8da215de70b8138ac548c562"
] | [
"hivemind/moe/client/moe.py"
] | [
"from __future__ import annotations\n\nimport time\nfrom queue import Empty, Queue\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport grpc\nimport torch\nimport torch.nn as nn\nfrom torch.autograd.function import once_differentiable\n\nimport hivemind\nfrom hivemind.compression import deserialize_torch_tensor, serialize_torch_tensor\nfrom hivemind.moe.client.beam_search import MoEBeamSearcher\nfrom hivemind.moe.client.expert import DUMMY, RemoteExpert, _get_expert_stub\nfrom hivemind.moe.server.expert_uid import UID_DELIMITER\nfrom hivemind.proto import runtime_pb2, runtime_pb2_grpc as runtime_grpc\nfrom hivemind.utils import nested_flatten, nested_map, nested_pack\nfrom hivemind.utils.logging import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass RemoteMixtureOfExperts(nn.Module):\n \"\"\"\n A torch module that performs Mixture-of-Experts inference with a local gating function and multiple remote experts.\n Natively supports pytorch autograd.\n\n :note: By default, not all experts are guaranteed to perform forward pass. Moreover, not all of those who ran\n forward pass are guaranteed to perform backward pass. In the latter case, gradient will be averaged without\n the missing experts\n\n :param in_features: common input size for experts and gating function\n :param grid_size: dimensions that form expert uid (see below)\n :param uid_prefix: common prefix for all expert uids (must end with '.')\n :note: expert uid follows the pattern {uid_prefix}.{0...grid_size[0]}.{0...grid_size[1]}...{0...grid_size[-1]}\n :param dht: a DHT instance used to search for best experts\n :param k_best: average this many highest-scoring experts to compute activations\n :param k_min: make sure at least this many experts returned output (i.e. didn't fail)\n :param timeout_after_k_min: wait for this many seconds after k_min experts returned results.\n Any expert that didn't manage to return output after that delay is considered unavailable\n :param detect_anomalies: whether to check input/output tensors for NaN and infinity values\n :param allow_zero_outputs: whether to return zeros if no experts respond on forward pass\n \"\"\"\n\n def __init__(\n self,\n *,\n in_features,\n grid_size: Tuple[int, ...],\n dht: hivemind.DHT,\n uid_prefix: str,\n k_best: int,\n k_min: int = 1,\n forward_timeout: Optional[float] = None,\n timeout_after_k_min: Optional[float] = None,\n backward_k_min: int = 1,\n backward_timeout: Optional[float] = None,\n detect_anomalies: bool = False,\n allow_zero_outputs: bool = False,\n **dht_kwargs,\n ):\n super().__init__()\n self.dht = dht\n self.beam_search = MoEBeamSearcher(dht, uid_prefix, grid_size, **dht_kwargs)\n self.k_best, self.k_min, self.backward_k_min = k_best, k_min, backward_k_min\n self.forward_timeout, self.backward_timeout = forward_timeout, backward_timeout\n self.timeout_after_k_min = timeout_after_k_min\n self.detect_anomalies = detect_anomalies\n self.allow_zero_outputs = allow_zero_outputs\n\n # jointly predict logits for all grid dimensions\n self.proj = nn.Linear(in_features, self.beam_search.total_grid_size)\n self._expert_info = None # expert['info'] from one of experts in the grid\n\n def forward(self, input: torch.Tensor, *args: torch.Tensor, **kwargs: torch.Tensor):\n \"\"\"\n Choose k best experts with beam search, then call chosen experts and average their outputs.\n Input tensor is averaged over all dimensions except for first and last\n (we assume that extra dimensions represent sequence length or image height/width)\n\n :param input: a tensor of values that are used to estimate gating function, batch-first.\n :param args: extra positional parameters that will be passed to each expert after input, batch-first\n :param kwargs: extra keyword parameters that will be passed to each expert, batch-first\n :returns: averaged predictions of all experts that delivered result on time, nested structure of batch-first\n \"\"\"\n if input.ndim != 2:\n input_for_gating = input.mean(dim=tuple(range(1, input.ndim - 1)))\n else:\n input_for_gating = input\n\n # 1. compute scores and find most appropriate experts with beam search\n grid_scores = self.proj(input_for_gating).split_with_sizes(self.beam_search.grid_size, dim=-1)\n\n chosen_experts: List[List[RemoteExpert]] = self.beam_search.batch_find_best_experts(\n [scores.detach().cpu().numpy() for scores in grid_scores], self.k_best\n )\n\n if self._expert_info is None:\n try:\n self._expert_info = next((expert.info for experts_i in chosen_experts for expert in experts_i))\n except StopIteration:\n raise RuntimeError(\n \"No responding experts found during beam search. Check that UID prefixes and \"\n \"the grid size are consistent with running Server instances.\"\n )\n except grpc.RpcError as e:\n logger.warning(f\"Failed to get RemoteMixtureOfExperts.output_shape: {e}\")\n\n expert_mask, *expert_outputs = _RemoteCallMany.apply(\n DUMMY,\n chosen_experts,\n self.k_min,\n self.backward_k_min,\n self.timeout_after_k_min,\n self.forward_timeout,\n self.backward_timeout,\n self.detect_anomalies,\n self.allow_zero_outputs,\n self.info,\n *nested_flatten(((input, *args), kwargs)),\n )\n # ^-- multiple tensors of shape [batch_size, max_experts, ...output_shape]\n\n expert_logits = self.compute_expert_scores(grid_scores, chosen_experts)\n masked_logits = torch.full((1,), float(\"-inf\"), device=expert_logits.device, dtype=expert_logits.dtype)\n expert_logits = torch.where(expert_mask, expert_logits, masked_logits)\n expert_weights = torch.softmax(expert_logits, dim=1)\n averaged_outputs_flat = [\n (expert_weights[..., None] * tensor.flatten(start_dim=2)).view(tensor.shape).sum(dim=1)\n for tensor in expert_outputs\n ] # ^-- multiply by softmax weights along first 2 axes\n\n return nested_pack(averaged_outputs_flat, self.info[\"outputs_schema\"])\n\n def compute_expert_scores(\n self, grid_scores: List[torch.Tensor], batch_experts: List[List[RemoteExpert]]\n ) -> torch.Tensor:\n \"\"\"\n Compute scores for each expert by adding up grid scores, autograd-friendly\n :param grid_scores: list of torch tensors, i-th tensor contains scores for i-th grid dimension\n :param batch_experts: list(batch) of lists(k) of up to k experts selected for this batch\n :returns: a tensor of scores, float32[batch_size, k]\n :note: if some rows in batch have less than max number of experts, their scores will be padded with -inf\n \"\"\"\n expert_counts = list(map(len, batch_experts))\n batch_size = len(batch_experts)\n max_num_experts = max(expert_counts)\n total_num_experts = sum(expert_counts)\n\n device = grid_scores[0].device\n\n expert_index_in_batch = torch.arange(total_num_experts, device=device)\n expert_strides = torch.cumsum(torch.as_tensor([0] + expert_counts, device=device), dim=-1)[:-1]\n flat_batch_indices = (expert_index_in_batch >= expert_strides[:, None]).to(torch.int32).sum(0) - 1\n flat_local_indices = expert_index_in_batch - expert_strides[flat_batch_indices]\n flat_experts = [expert for row in batch_experts for expert in row]\n\n grid_indices = torch.zeros([len(flat_experts), len(grid_scores)], dtype=torch.int64)\n for i, expert in enumerate(flat_experts):\n expert_indices = expert.uid[len(self.beam_search.uid_prefix) :]\n expert_indices = list(map(int, expert_indices.split(UID_DELIMITER)))\n grid_indices[i] = torch.as_tensor(expert_indices, dtype=grid_indices.dtype)\n\n scores_per_dim = [\n dim_scores[flat_batch_indices, dim_indices] if len(flat_batch_indices) else torch.zeros(0, device=device)\n for dim_scores, dim_indices in zip(grid_scores, grid_indices.T)\n ]\n flat_scores = torch.sum(torch.stack(scores_per_dim, dim=0), dim=0)\n\n scores = torch.full((batch_size, max_num_experts), fill_value=-float(\"inf\"), device=device)\n scores[flat_batch_indices, flat_local_indices] = flat_scores # backprop-able w.r.t. flat_scores\n return scores\n\n @property\n def info(self):\n if self._expert_info is None:\n # grab some expert to set ensemble output shape\n proj_device = self.proj.weight.device\n dummy_scores_concat = self.proj(torch.randn(1, self.proj.in_features, device=proj_device))\n dummy_scores = dummy_scores_concat.cpu().split_with_sizes(self.beam_search.grid_size, dim=-1)\n dummy_experts = self.beam_search.find_best_experts(dummy_scores, beam_size=1)\n self._expert_info = dummy_experts[0].info\n return self._expert_info\n\n\nclass _RemoteCallMany(torch.autograd.Function):\n \"\"\"\n Internal autograd-friendly function that calls multiple experts on a batch of inputs and awaits responses\n This function that can recover from individual failures during forward and/or backward pass as long as at least\n one expert succeeds for each input. For user-friendly version of this function, use RemoteMixtureOfExperts module.\n\n Note: experts that failed during forward will be assigned zero outputs and marked as mask[i, j] = 0,\n experts that failed during backward will be treated as constants (i.e. gradients through them are zeros)\n \"\"\"\n\n @classmethod\n def forward(\n cls,\n ctx,\n dummy,\n experts_per_sample: List[List[RemoteExpert]],\n k_min: int,\n backward_k_min: int,\n timeout_after_k_min: float,\n forward_timeout: Optional[float],\n backward_timeout: Optional[float],\n detect_anomalies: bool,\n allow_zero_outputs: bool,\n info: Dict[str, Any],\n *flat_inputs: torch.Tensor,\n ) -> Tuple[torch.Tensor]:\n assert not torch.is_grad_enabled()\n num_samples, max_experts = len(experts_per_sample), max(map(len, experts_per_sample))\n\n flat_inputs_cpu = []\n for tensor in flat_inputs:\n if detect_anomalies and not tensor.isfinite().all():\n raise ValueError(\"One of inputs has nan/inf values\")\n flat_inputs_cpu.append(tensor.cpu())\n\n flat_inputs_per_sample = list(zip(*(x.split(1, dim=0) for x in flat_inputs_cpu)))\n assert len(experts_per_sample) == len(flat_inputs_per_sample) == num_samples\n\n # dispatch tasks to all remote experts collect responses\n pending_tasks: Dict[grpc.Future, Tuple[int, int]] = {}\n for i in range(num_samples):\n for j, expert in enumerate(experts_per_sample[i]):\n input_tensors = [\n serialize_torch_tensor(tensor, proto.compression)\n for tensor, proto in zip(flat_inputs_per_sample[i], nested_flatten(info[\"forward_schema\"]))\n ]\n stub: runtime_grpc.ConnectionHandlerStub = _get_expert_stub(expert.endpoint)\n new_task = stub.forward.future(runtime_pb2.ExpertRequest(uid=expert.uid, tensors=input_tensors))\n pending_tasks[new_task] = (i, j)\n\n responded_inds, alive_flat_outputs = cls._collect_responses(\n pending_tasks, num_samples, k_min, forward_timeout, timeout_after_k_min, detect_anomalies\n )\n if len(responded_inds) < k_min:\n raise TimeoutError(f\"Forward pass: less than {k_min} responded within timeout\")\n\n if not isinstance(info[\"outputs_schema\"], tuple):\n outputs_schema = (info[\"outputs_schema\"],)\n else:\n outputs_schema = info[\"outputs_schema\"]\n outputs = nested_map(\n lambda descriptor: descriptor.make_empty(num_samples, max_experts, device=flat_inputs[0].device).zero_(),\n outputs_schema,\n )\n\n # assemble responses\n if len(responded_inds) > 0 or allow_zero_outputs:\n batch_inds, expert_inds = map(\n lambda x: torch.as_tensor(x, device=flat_inputs[0].device, dtype=torch.long),\n list(zip(*responded_inds)) or ([], []),\n )\n\n alive_flat_outputs_stacked = (torch.cat(outputs) for outputs in zip(*alive_flat_outputs))\n # torch tensors, i-th tensor is of shape [num_responded, *expert_outputs[i].shape]\n\n for output, response_stacked in zip(outputs, alive_flat_outputs_stacked):\n output[batch_inds, expert_inds] = response_stacked.to(output.device)\n\n else:\n raise RuntimeError(\"Forward pass: 0 experts responded within timeout and allow_zero_outputs is False\")\n\n mask = torch.zeros([num_samples, max_experts], dtype=torch.bool, device=flat_inputs[0].device)\n mask[batch_inds, expert_inds] = True\n\n # save individual outputs for backward pass\n ctx.save_for_backward(batch_inds, expert_inds, *flat_inputs_cpu)\n ctx._saved_non_tensors = (\n info,\n backward_k_min,\n backward_timeout,\n timeout_after_k_min,\n experts_per_sample,\n detect_anomalies,\n )\n\n return (mask,) + outputs\n\n @classmethod\n @once_differentiable\n def backward(cls, ctx, *raw_grads: torch.Tensor) -> Tuple[Optional[torch.Tensor], ...]:\n assert not torch.is_grad_enabled()\n (\n info,\n backward_k_min,\n backward_timeout,\n timeout_after_k_min,\n expert_per_sample,\n detect_anomalies,\n ) = ctx._saved_non_tensors\n alive_ii, alive_jj, *flat_inputs_cpu = ctx.saved_tensors\n\n dummy_grad_mask, *flat_grad_outputs = raw_grads\n\n flat_grad_outputs_cpu = []\n for tensor in flat_grad_outputs:\n if detect_anomalies and not tensor.isfinite().all():\n raise ValueError(\"One of gradients has nan/inf values\")\n flat_grad_outputs_cpu.append(tensor.cpu())\n\n num_samples, max_experts = dummy_grad_mask.shape\n\n inputs_per_expert = zip(*(tensor[alive_ii].split(1, dim=0) for tensor in flat_inputs_cpu))\n grad_outputs_per_expert = zip(\n *(tensor[alive_ii, alive_jj].split(1, dim=0) for tensor in flat_grad_outputs_cpu)\n )\n backward_schema = tuple(nested_flatten((info[\"forward_schema\"], info[\"outputs_schema\"])))\n\n # dispatch tasks to all remote experts, collect responses\n pending_tasks = {}\n for i, j, inputs_ij, grad_outputs_ij in zip(\n alive_ii.cpu().numpy(), alive_jj.cpu().numpy(), inputs_per_expert, grad_outputs_per_expert\n ):\n expert = expert_per_sample[i.item()][j.item()]\n stub = _get_expert_stub(expert.endpoint)\n inputs_and_grad_outputs = tuple(nested_flatten((inputs_ij, grad_outputs_ij)))\n tensors_serialized = [\n serialize_torch_tensor(tensor, proto.compression)\n for tensor, proto in zip(inputs_and_grad_outputs, backward_schema)\n ]\n new_task = stub.backward.future(runtime_pb2.ExpertRequest(uid=expert.uid, tensors=tensors_serialized))\n pending_tasks[new_task] = (i, j)\n\n survivor_inds, survivor_grad_inputs = cls._collect_responses(\n pending_tasks, num_samples, backward_k_min, backward_timeout, timeout_after_k_min, detect_anomalies\n )\n if len(survivor_inds) < backward_k_min:\n raise TimeoutError(f\"Backward pass: less than {backward_k_min} experts responded within timeout\")\n\n # assemble responses\n batch_inds, expert_inds = map(\n lambda x: torch.as_tensor(x, dtype=torch.long), list(zip(*survivor_inds)) or ([], [])\n )\n\n survivor_grad_inputs_stacked = (torch.cat(grad_inputs) for grad_inputs in zip(*survivor_grad_inputs))\n # torch tensors, i-th tensor is of shape [num_backward_survivors, *flat_inputs_cpu[i].shape]\n\n grad_inputs = nested_map(\n lambda descr: descr.make_empty(num_samples, device=flat_grad_outputs[0].device).zero_(),\n list(nested_flatten(info[\"forward_schema\"])),\n )\n\n for grad_input, survivor_grad_stacked in zip(grad_inputs, survivor_grad_inputs_stacked):\n grad_input_per_expert = torch.zeros( # gradient tensor with individual contributions from each expert\n (num_samples, max_experts, *grad_input.shape[1:]),\n device=survivor_grad_stacked.device,\n dtype=survivor_grad_stacked.dtype,\n )\n grad_input_per_expert[batch_inds, expert_inds] = survivor_grad_stacked\n grad_input.copy_(grad_input_per_expert.to(flat_grad_outputs[0].device).sum(dim=1))\n\n return (DUMMY, None, None, None, None, None, None, None, None, None, *grad_inputs)\n\n @staticmethod\n def _collect_responses(\n task_to_indices: Dict[grpc.Future, Tuple[int, int]],\n num_samples: int,\n k_min: int,\n timeout_total: Optional[float],\n timeout_after_k_min: Optional[float],\n detect_anomalies: bool,\n ) -> Tuple[List[Tuple[int, int]], List[Tuple[torch.Tensor, ...]]]:\n \"\"\"await up to k_min results and any result submitted within timeout_after_k_min, cancel stragglers\"\"\"\n timeout_total = float(\"inf\") if timeout_total is None else timeout_total\n timeout_after_k_min = float(\"inf\") if timeout_after_k_min is None else timeout_after_k_min\n num_successful_tasks = [0 for _ in range(num_samples)]\n pending_samples = num_samples # samples for which we have less than k_min results\n finished_indices, finished_outputs = [], []\n t_finish = time.perf_counter() + timeout_total\n pending_tasks = set(task_to_indices.keys())\n finished_tasks = Queue()\n\n try:\n # the algorithm below is essentially futures.as_completed, but for grpc.Future\n for task in pending_tasks:\n task.add_done_callback(finished_tasks.put)\n\n for _ in range(len(task_to_indices)):\n timeout = max(0.0, t_finish - time.perf_counter()) if t_finish != float(\"inf\") else None\n task = finished_tasks.get(timeout=timeout)\n pending_tasks.discard(task)\n\n task_output = _process_dispatched_task(task, detect_anomalies)\n if task_output is not None:\n finished_indices.append(task_to_indices[task])\n finished_outputs.append(task_output)\n\n # count how many successes we have for each input sample\n sample_index = task_to_indices[task][0]\n num_successful_tasks[sample_index] += 1\n if num_successful_tasks[sample_index] == k_min:\n pending_samples -= 1\n if (\n pending_samples <= 0\n ): # all tasks finished, await stragglers for at most timeout_after_k_min\n t_finish = min(t_finish, time.perf_counter() + timeout_after_k_min)\n\n except Empty:\n pass # we reached t_finish, this is normal behavior\n finally:\n for task in pending_tasks:\n task.cancel()\n return finished_indices, finished_outputs\n\n\ndef _process_dispatched_task(task: grpc.Future, detect_anomalies: bool) -> Optional[Tuple[torch.Tensor]]:\n if task.exception() or task.cancelled():\n logger.warning(f\"Task {task} failed: {type(task.exception())}\")\n return None\n\n deserialized_outputs = []\n for tensor in task.result().tensors:\n deserialized_tensor = deserialize_torch_tensor(tensor)\n if detect_anomalies and not deserialized_tensor.isfinite().all():\n logger.error(f\"Task {task} failed: output tensor contains nan/inf values\")\n return None\n deserialized_outputs.append(deserialized_tensor)\n\n return tuple(deserialized_outputs)\n"
] | [
[
"torch.softmax",
"torch.zeros",
"torch.cat",
"torch.randn",
"torch.nn.Linear",
"torch.where",
"torch.arange",
"torch.stack",
"torch.is_grad_enabled",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fabian57fabian/tempoGAN | [
"6ad6c289aac11ffff5e73a8d398f6cd7d848b9cc"
] | [
"tensorflow/datagen/gen_sim_2006.py"
] | [
"#\n# tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow\n# Copyright 2018 You Xie, Erik Franz, Mengyu Chu, Nils Thuerey\n#\n# Plume data generation, 2D\n# \nfrom manta import *\nimport os, shutil, math, sys\nimport numpy as np\nsys.path.append(\"../tools\")\nimport paramhelpers as ph\n\nsimId = 2006\nsimPath = '../2ddata_sim/'\nsimPath,simId = ph.getNextSimPath(simId, simPath)\n\n# how much to reduce target sim size\ntargetFac = 0.25\nsavenpz = 1\n\n# source solver params\ndim = 2\n#res = 128\nres = 512\ngs = vec3(res,int(1.0*res),res)\nif (dim==2): gs.z = 1 # 2D\n\nsm = Solver(name='main', gridSize = gs, dim=dim)\nsm.timestep = 1.5 \nsm.timestep = 0.75 \n\n# inflow noise field\nnoise = NoiseField( parent=sm, fixedSeed=265, loadFromFile=True)\nnoise.posScale = vec3(24)\nnoise.clamp = True\nnoise.clampNeg = 0\nnoise.clampPos = 2\nnoise.valScale = 1\nnoise.valOffset = 0.075\nnoise.timeAnim = 0.3\nnoise.timeAnim = 0.5\n\ncylWidth = 0.13\nsource = Cylinder(parent=sm, center=gs*vec3(0.5,0.1,0.5), radius=res*cylWidth, z=gs*vec3(0, 0.04, 0))\n\n\n# target solver, recompute sizes...\n\ntarget_gs = vec3(targetFac*gs.x,targetFac*gs.y,targetFac*gs.z)\nif (dim==2): target_gs.z = 1 # 2D\ntargs = Solver(name='target', gridSize = target_gs, dim=dim)\ntargs.timestep = sm.timestep \n\ndummy = targs.create(MACGrid)\ntarget_flags = targs.create(FlagGrid)\ntarget_vel = targs.create(MACGrid)\ntarget_density = targs.create(RealGrid)\n\ntarget_flags.initDomain()\ntarget_flags.fillGrid()\n\ntarget_source = Cylinder(parent=targs, center=target_gs*vec3(0.5,0.1,0.5), radius=res*targetFac*cylWidth, z=target_gs*vec3(0, 0.04, 0))\n\nif savenpz:\n\tarR = np.zeros([int(gs.z), int(gs.y), int(gs.x), 1])\n\tarV = np.zeros([int(gs.z), int(gs.y), int(gs.x), 3])\n\ttarget_arR = np.zeros([int(target_gs.z), int(target_gs.y), int(target_gs.x), 1])\n\ttarget_arV = np.zeros([int(target_gs.z), int(target_gs.y), int(target_gs.x), 3])\n\n# allocate other grids\nflags = sm.create(FlagGrid)\nvel = sm.create(MACGrid)\ndensity = sm.create(RealGrid)\npressure = sm.create(RealGrid)\nblurden = sm.create(RealGrid)\nblurvel = sm.create(MACGrid)\n\nbWidth=0\nflags.initDomain(boundaryWidth=bWidth)\nflags.fillGrid() \nsetOpenBound(flags,bWidth,'yY',FlagOutflow|FlagEmpty) \n\n\nif (GUI):\n\tgui = Gui()\n\tgui.setCamPos(0., 0., -1.3)\n\tgui.show()\n\n# main loop\nfor t in range(400):\n\tmantaMsg('\\nFrame %i, simulation time %f' % (sm.frame, sm.timeTotal))\n\t\t\n\tadvectSemiLagrange(flags=flags, vel=vel, grid=density, order=2, clampMode=2 ) \n\tadvectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2, clampMode=2 , openBounds=True, boundaryWidth=bWidth )\n\t\n\tapplyInflow=False\n\tif (sm.timeTotal>=0 and sm.timeTotal<150.):\n\t\tdensityInflow( flags=flags, density=density, noise=noise, shape=source, scale=1, sigma=0.5 )\n\t\tapplyInflow=True\n\t\n\tsetWallBcs(flags=flags, vel=vel) \n\t#addBuoyancy(density=density, vel=vel, gravity=vec3(0,-1e-3,0), flags=flags)\n\taddBuoyancy(density=density, vel=vel, gravity=vec3(0,-2e-4,0), flags=flags)\n\n\t#vorticityConfinement( vel=vel, flags=flags, strength=0.1 )\n\t\n\tsolvePressure(flags=flags, vel=vel, pressure=pressure , cgMaxIterFac=1.0, cgAccuracy=0.01 )\n\tsetWallBcs(flags=flags, vel=vel)\n\t\n\tsm.step()\n\t\n\t# copy to target\n\tif 1:\n\t\tblurSig = float(1./targetFac) / 3.544908 # 3.544908 = 2 * sqrt( PI )\n\t\tblurRealGrid( density, blurden, blurSig)\n\t\tinterpolateGrid( target=target_density, source=blurden )\n\n\t\tblurMacGrid( vel, blurvel, blurSig)\n\t\tinterpolateMACGrid( target=target_vel, source=blurvel )\n\t\ttarget_vel.multConst( vec3(targetFac) )\n\n\t# save\n\tif 0 and t%2==0: \n\t\tframeNr = t / 2\n\t\tframedir = \"frame_%04d\" % frameNr\n\t\tos.mkdir( framedir )\n\n\t\ttarget_vel.save(\"%s/vel_low_%04d_%04d.uni\" % (framedir,simId,frameNr) )\n\t\ttarget_density.save(\"%s/density_low_%04d_%04d.uni\" % (framedir,simId,frameNr) )\n\t\tdensity.save(\"%s/density_high_%04d_%04d.uni\" % (framedir,simId,frameNr) )\n\t\n\t\t#gui.screenshot( 'plume_%04d.png' % frameNr );\n\n\tif savenpz and t%2==0: \n\t\ttf = t / 2\n\t\tprint(\"Writing NPZs for frame %d\"%tf)\n\t\tcopyGridToArrayReal( target=target_arR, source=target_density )\n\t\tnp.savez_compressed( simPath + 'density_low_%04d.npz' % (tf), target_arR )\n\t\tcopyGridToArrayVec3( target=target_arV, source=target_vel )\n\t\tnp.savez_compressed( simPath + 'velocity_low_%04d.npz' % (tf), target_arV )\n\t\tcopyGridToArrayReal( target=arR, source=density )\n\t\tnp.savez_compressed( simPath + 'density_high_%04d.npz' % (tf), arR )\n\t\tcopyGridToArrayVec3( target=arV, source=vel )\n\t\tnp.savez_compressed( simPath + 'velocity_high_%04d.npz' % (tf), arV )\n\n\n\ttargs.step() \n\n\n"
] | [
[
"numpy.savez_compressed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
13952522076/RandLA-Net-pytorch | [
"2fc6cfa2c9376d98582678bccfebf51bd2f316e8"
] | [
"pytorch_utils.py"
] | [
"import torch.nn as nn\nfrom typing import List, Tuple\n\n\nclass SharedMLP(nn.Sequential):\n\n def __init__(\n self,\n args: List[int],\n *,\n bn: bool = False,\n activation=nn.ReLU(inplace=True),\n preact: bool = False,\n first: bool = False,\n name: str = \"\",\n instance_norm: bool = False\n ):\n super().__init__()\n\n for i in range(len(args) - 1):\n self.add_module(\n name + 'layer{}'.format(i),\n Conv2d(\n args[i],\n args[i + 1],\n bn=(not first or not preact or (i != 0)) and bn,\n activation=activation\n if (not first or not preact or (i != 0)) else None,\n preact=preact,\n instance_norm=instance_norm\n )\n )\n\n\nclass _ConvBase(nn.Sequential):\n\n def __init__(\n self,\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n activation,\n bn,\n init,\n conv=None,\n batch_norm=None,\n bias=True,\n preact=False,\n name=\"\",\n instance_norm=False,\n instance_norm_func=None\n ):\n super().__init__()\n\n bias = bias and (not bn)\n conv_unit = conv(\n in_size,\n out_size,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n bias=bias\n )\n init(conv_unit.weight)\n if bias:\n nn.init.constant_(conv_unit.bias, 0)\n\n if bn:\n if not preact:\n bn_unit = batch_norm(out_size)\n else:\n bn_unit = batch_norm(in_size)\n if instance_norm:\n if not preact:\n in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)\n else:\n in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)\n\n if preact:\n if bn:\n self.add_module(name + 'bn', bn_unit)\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n if not bn and instance_norm:\n self.add_module(name + 'in', in_unit)\n\n self.add_module(name + 'conv', conv_unit)\n\n if not preact:\n if bn:\n self.add_module(name + 'bn', bn_unit)\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n if not bn and instance_norm:\n self.add_module(name + 'in', in_unit)\n\n\nclass _BNBase(nn.Sequential):\n\n def __init__(self, in_size, batch_norm=None, name=\"\"):\n super().__init__()\n self.add_module(name + \"bn\", batch_norm(in_size, eps=1e-6, momentum=0.99))\n\n nn.init.constant_(self[0].weight, 1.0)\n nn.init.constant_(self[0].bias, 0)\n\n\nclass BatchNorm1d(_BNBase):\n\n def __init__(self, in_size: int, *, name: str = \"\"):\n super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)\n\n\nclass BatchNorm2d(_BNBase):\n\n def __init__(self, in_size: int, name: str = \"\"):\n super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)\n\n\nclass Conv1d(_ConvBase):\n\n def __init__(\n self,\n in_size: int,\n out_size: int,\n *,\n kernel_size: int = 1,\n stride: int = 1,\n padding: int = 0,\n activation=nn.LeakyReLU(negative_slope=0.2, inplace=True),\n bn: bool = False,\n init=nn.init.kaiming_normal_,\n bias: bool = True,\n preact: bool = False,\n name: str = \"\",\n instance_norm=False\n ):\n super().__init__(\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n activation,\n bn,\n init,\n conv=nn.Conv1d,\n batch_norm=BatchNorm1d,\n bias=bias,\n preact=preact,\n name=name,\n instance_norm=instance_norm,\n instance_norm_func=nn.InstanceNorm1d\n )\n\n\nclass Conv2d(_ConvBase):\n\n def __init__(\n self,\n in_size: int,\n out_size: int,\n *,\n kernel_size: Tuple[int, int] = (1, 1),\n stride: Tuple[int, int] = (1, 1),\n padding: Tuple[int, int] = (0, 0),\n activation=nn.LeakyReLU(negative_slope=0.2, inplace=True),\n bn: bool = False,\n init=nn.init.kaiming_normal_,\n bias: bool = True,\n preact: bool = False,\n name: str = \"\",\n instance_norm=False\n ):\n super().__init__(\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n activation,\n bn,\n init,\n conv=nn.Conv2d,\n batch_norm=BatchNorm2d,\n bias=bias,\n preact=preact,\n name=name,\n instance_norm=instance_norm,\n instance_norm_func=nn.InstanceNorm2d\n )\n\n\nclass FC(nn.Sequential):\n\n def __init__(\n self,\n in_size: int,\n out_size: int,\n *,\n activation=nn.ReLU(inplace=True),\n bn: bool = False,\n init=None,\n preact: bool = False,\n name: str = \"\"\n ):\n super().__init__()\n\n fc = nn.Linear(in_size, out_size, bias=not bn)\n if init is not None:\n init(fc.weight)\n if not bn:\n nn.init.constant(fc.bias, 0)\n\n if preact:\n if bn:\n self.add_module(name + 'bn', BatchNorm1d(in_size))\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n self.add_module(name + 'fc', fc)\n\n if not preact:\n if bn:\n self.add_module(name + 'bn', BatchNorm1d(out_size))\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n\ndef set_bn_momentum_default(bn_momentum):\n\n def fn(m):\n if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n m.momentum = bn_momentum\n\n return fn\n\n\nclass BNMomentumScheduler(object):\n\n def __init__(\n self, model, bn_lambda, last_epoch=-1,\n setter=set_bn_momentum_default\n ):\n if not isinstance(model, nn.Module):\n raise RuntimeError(\n \"Class '{}' is not a PyTorch nn Module\".format(\n type(model).__name__\n )\n )\n\n self.model = model\n self.setter = setter\n self.lmbd = bn_lambda\n\n self.step(last_epoch + 1)\n self.last_epoch = last_epoch\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n\n self.last_epoch = epoch\n self.model.apply(self.setter(self.lmbd(epoch)))\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.init.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
s-santoro/lunch-crawler | [
"1e39b1d35d76067a55b2c034d0488a6ec53f8a45"
] | [
"classification/scripts/MLTuningAdaboost.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# Imports\nfrom luigi.contrib.spark import PySparkTask\nfrom luigi.parameter import IntParameter\nfrom luigi import LocalTarget, Task, WrapperTask\nfrom luigi.format import UTF8\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nfrom configs.Configurations import Configurations\n\n'''bigrams'''\nfrom Preprocessor import Preprocessor\n\n'''Features'''\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import MaxAbsScaler\n\n'''Classifiers'''\nfrom sklearn.linear_model import SGDClassifier, Perceptron\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n\n'''Metrics/Evaluation'''\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix\nfrom scipy import interp\nfrom itertools import cycle\n\n'''Plotting'''\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nclass MLTuningAdaboost(Task):\n # Date for Output-File prefix\n from datetime import date\n date = datetime.datetime.now()\n configId = IntParameter(default=0)\n\n # Method to declare the Output-File\n def output(self):\n prefix = self.date.strftime(\"%Y-%m-%dT%H%M%S\")\n return LocalTarget(\"../data/%s_configID_%s_MLTuningAdaboost.csv\" % (prefix, self.configId), format=UTF8)\n\n # Method to define the required Task (Importer)\n def requires(self):\n return Preprocessor(self.configId)\n\n # Prepare prprocessed data for ML evaluation\n def run(self):\n # use configID from commandline\n configs = Configurations().configs[self.configId]\n eval_dict = {}\n\n # parameters for config\n prefix = self.date.strftime(\"%Y-%m-%dT%H%M%S\")\n # train test split\n test_size = configs.get(\"test_size\")\n shuffle = configs.get(\"shuffle\")\n random_state = configs.get(\"random_state\")\n # bag of words\n max_features = configs.get(\"maxFeatures\")\n binary = configs.get(\"binary\")\n # tfidf\n ngram_range = configs.get(\"ngram_range\")\n min_df = configs.get(\"min_df\")\n max_df = configs.get(\"max_df\")\n max_features_tfidf = configs.get(\"maxFeaturesTFIDF\")\n # dimension reduction (truncated svd)\n n_components = configs.get(\"n_components\")\n n_iter = configs.get(\"n_iter\")\n\n input_df = pd.read_csv(self.input().path)\n cleaned_df = pd.DataFrame(columns=('text', 'cleaned_text', 'url', 'title', 'class'))\n\n # convert document['cleaned_text'] from string to list of words\n for index, document in input_df.iterrows():\n text = document['cleaned_text']\n #print(\"MLClassifier\")\n #print(text)\n text = re.sub(r\"[',\\[\\]]\", \"\", text)\n wordlist = text.split(\" \")\n row = [document.text, wordlist, document.url, document.title, document['class']]\n cleaned_df.loc[index] = row\n\n # Preparing the dataframes\n # Splitting the df into the different classes\n df_menu = cleaned_df.loc[cleaned_df['class'] == 1]\n df_no_menu = cleaned_df.loc[cleaned_df['class'] == 0]\n\n # holdout is only used to have same scores as previously measured\n # without holdout scores are not identical to excel-table\n # Holding out 10 articles from each class for prediction at the end\n df_menu_holdout = df_menu.iloc[:20]\n df_no_menu_holdout = df_no_menu.iloc[:20]\n\n # the rest is used for ML evaluation\n df_menu = df_menu.iloc[20:]\n df_no_menu = df_no_menu.iloc[20:]\n\n # Appending the dfs back together\n cleaned_df = pd.concat([df_menu, df_no_menu])\n df_holdout = pd.concat([df_menu_holdout, df_no_menu_holdout])\n\n # Turning the labels into numbers\n labelEncoder = LabelEncoder()\n cleaned_df['class_num'] = labelEncoder.fit_transform(cleaned_df['class'])\n\n # Feature Extraction\n\n # Creating the features (tf-idf weights) for the processed text\n # Create tf-idf weights for data and target\n\n X = cleaned_df['cleaned_text'].astype('str') # features\n y = cleaned_df['class_num'].values # target\n\n # Train test split with stratified sampling for evaluation\n X_train, X_validation, y_train, y_validation = train_test_split(X,\n y,\n test_size=test_size,\n shuffle=shuffle,\n stratify=y,\n random_state=random_state)\n\n # Bag of Words\n if configs.get(\"use_BoW\"):\n vectorizer = CountVectorizer(max_features=max_features, binary=binary)\n\n X_train = vectorizer.fit_transform(X_train).toarray()\n X_validation = vectorizer.transform(X_validation).toarray()\n\n # Tf-Idf\n if configs.get(\"use_tfidf\"):\n tfidf_vectorizer = TfidfVectorizer(ngram_range=ngram_range,\n min_df=min_df,\n max_df=max_df,\n max_features=max_features_tfidf)\n\n X_train = tfidf_vectorizer.fit_transform(X_train).toarray()\n X_validation = tfidf_vectorizer.transform(X_validation).toarray()\n\n # Dimensionality reduction\n if configs.get(\"use_dimension_reduction\"):\n lsa = TruncatedSVD(n_components=n_components,\n n_iter=n_iter,\n random_state=random_state)\n\n X_train = lsa.fit_transform(X_train)\n X_validation = lsa.transform(X_validation)\n scaler = MinMaxScaler()\n X_train = scaler.fit_transform(X_train)\n X_validation = scaler.transform(X_validation)\n\n # calculate class weights\n # Bring unequal distribution of pos/neg samples into account\n negatives = np.count_nonzero(cleaned_df['class_num']==0)\n positives = np.count_nonzero(cleaned_df['class_num']==1)\n if positives > 0:\n ratio = negatives / positives\n class_weight = {0: 1., 1: ratio}\n else:\n class_weight = {0: 1., 1: 1.}\n\n\n params = configs.get(\"params\")\n\n gridsearch = GridSearchCV(\n AdaBoostClassifier(random_state=random_state),\n params,\n cv = 5, \n n_jobs = -1,\n scoring=\"f1\")\n gridsearch.fit(X_train, y_train)\n\n before_tuning = AdaBoostClassifier(random_state=random_state)\n before_tuning.fit(X_train, y_train)\n pred_not_tuned = before_tuning.predict(X_validation)\n\n best_model = gridsearch.best_estimator_\n pred_tuned = best_model.predict(X_validation)\n\n models_report = \"Adaboost\"\n models_report += \"\\n\"\n models_report += \"----------------------------------------------------\"\n models_report += \"\\n\"\n models_report += \"Default parameters:\"\n models_report += \"\\n\"\n models_report += str(before_tuning.get_params)\n models_report += \"\\n\"\n models_report += \"----------------------------------------------------\"\n models_report += \"\\n\"\n models_report += str(\"f1 score before hyperparameter-tuning: %s\" % str(f1_score(y_validation, pred_not_tuned)))\n models_report += \"\\n\"\n models_report += str(\"precision score before hyperparameter-tuning: %s\" % str(precision_score(y_validation, pred_not_tuned)))\n models_report += \"\\n\"\n models_report += str(\"recall score before hyperparameter-tuning: %s\" % str(recall_score(y_validation, pred_not_tuned)))\n models_report += \"\\n\"\n models_report += \"----------------------------------------------------\"\n models_report += \"\\n\"\n\n models_report += \"\\n\"\n models_report += \"Best parameters set found:\"\n models_report += \"\\n\"\n models_report += str(gridsearch.best_params_)\n models_report += \"\\n\"\n\n models_report += \"----------------------------------------------------\"\n models_report += \"\\n\"\n models_report += str(\"f1 score after hyperparameter-tuning: %s\" % str(f1_score(y_validation, pred_tuned)))\n models_report += \"\\n\"\n models_report += str(\"precision score after hyperparameter-tuning: %s\" % str(precision_score(y_validation, pred_tuned)))\n models_report += \"\\n\"\n models_report += str(\"recall score after hyperparameter-tuning: %s\" % str(recall_score(y_validation, pred_tuned)))\n models_report += \"\\n\"\n models_report += \"\\n\"\n models_report += \"config:\\n\"\n for key in configs:\n models_report += \"\\t%s:\" % str(key)\n x = len(str(key))\n while x < 35:\n x += 1\n models_report += \" \"\n models_report += \"%s\\n\" % str(configs.get(key))\n\n # write report to file\n filename = \"../data/parameter_tuning/adaboost_%s.txt\" % (prefix)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, \"w\")\n f.write(models_report)\n f.close()\n #fig.savefig(\"../data/models_report/models-comparison_%s_%s.png\" % (self.configId, prefix))\n\n # Write .csv-File\n with self.output().open(\"w\") as out:\n cleaned_df.to_csv(out, encoding=\"utf-8\")\n\n"
] | [
[
"sklearn.decomposition.TruncatedSVD",
"pandas.concat",
"sklearn.metrics.precision_score",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.ensemble.AdaBoostClassifier",
"numpy.count_nonzero",
"sklearn.metrics.f1_score",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.recall_score",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Chrisa142857/CompressAI | [
"75760096b9700a58d346351251d544050f3418fb"
] | [
"examples/codec.py"
] | [
"# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport struct\nimport sys\nimport time\n\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\n\nfrom PIL import Image\nfrom torchvision.transforms import ToPILImage, ToTensor\n\nimport compressai\n\nfrom compressai.zoo import models\n\nmodel_ids = {k: i for i, k in enumerate(models.keys())}\n\nmetric_ids = {\n \"mse\": 0,\n}\n\n\ndef inverse_dict(d):\n # We assume dict values are unique...\n assert len(d.keys()) == len(set(d.keys()))\n return {v: k for k, v in d.items()}\n\n\ndef filesize(filepath: str) -> int:\n if not Path(filepath).is_file():\n raise ValueError(f'Invalid file \"{filepath}\".')\n return Path(filepath).stat().st_size\n\n\ndef load_image(filepath: str) -> Image.Image:\n return Image.open(filepath).convert(\"RGB\")\n\n\ndef img2torch(img: Image.Image) -> torch.Tensor:\n return ToTensor()(img).unsqueeze(0)\n\n\ndef torch2img(x: torch.Tensor) -> Image.Image:\n return ToPILImage()(x.clamp_(0, 1).squeeze())\n\n\ndef write_uints(fd, values, fmt=\">{:d}I\"):\n fd.write(struct.pack(fmt.format(len(values)), *values))\n\n\ndef write_uchars(fd, values, fmt=\">{:d}B\"):\n fd.write(struct.pack(fmt.format(len(values)), *values))\n\n\ndef read_uints(fd, n, fmt=\">{:d}I\"):\n sz = struct.calcsize(\"I\")\n return struct.unpack(fmt.format(n), fd.read(n * sz))\n\n\ndef read_uchars(fd, n, fmt=\">{:d}B\"):\n sz = struct.calcsize(\"B\")\n return struct.unpack(fmt.format(n), fd.read(n * sz))\n\n\ndef write_bytes(fd, values, fmt=\">{:d}s\"):\n if len(values) == 0:\n return\n fd.write(struct.pack(fmt.format(len(values)), values))\n\n\ndef read_bytes(fd, n, fmt=\">{:d}s\"):\n sz = struct.calcsize(\"s\")\n return struct.unpack(fmt.format(n), fd.read(n * sz))[0]\n\n\ndef get_header(model_name, metric, quality):\n \"\"\"Format header information:\n - 1 byte for model id\n - 4 bits for metric\n - 4 bits for quality param\n \"\"\"\n metric = metric_ids[metric]\n code = (metric << 4) | (quality - 1 & 0x0F)\n return model_ids[model_name], code\n\n\ndef parse_header(header):\n \"\"\"Read header information from 2 bytes:\n - 1 byte for model id\n - 4 bits for metric\n - 4 bits for quality param\n \"\"\"\n model_id, code = header\n quality = (code & 0x0F) + 1\n metric = code >> 4\n return (\n inverse_dict(model_ids)[model_id],\n inverse_dict(metric_ids)[metric],\n quality,\n )\n\n\ndef pad(x, p=2 ** 6):\n h, w = x.size(2), x.size(3)\n H = (h + p - 1) // p * p\n W = (w + p - 1) // p * p\n padding_left = (W - w) // 2\n padding_right = W - w - padding_left\n padding_top = (H - h) // 2\n padding_bottom = H - h - padding_top\n return F.pad(\n x,\n (padding_left, padding_right, padding_top, padding_bottom),\n mode=\"constant\",\n value=0,\n )\n\n\ndef crop(x, size):\n H, W = x.size(2), x.size(3)\n h, w = size\n padding_left = (W - w) // 2\n padding_right = W - w - padding_left\n padding_top = (H - h) // 2\n padding_bottom = H - h - padding_top\n return F.pad(\n x,\n (-padding_left, -padding_right, -padding_top, -padding_bottom),\n mode=\"constant\",\n value=0,\n )\n\n\ndef _encode(image, model, metric, quality, coder, output):\n compressai.set_entropy_coder(coder)\n enc_start = time.time()\n\n img = load_image(image)\n start = time.time()\n net = models[model](quality=quality, metric=metric, pretrained=True).eval()\n load_time = time.time() - start\n\n x = img2torch(img)\n h, w = x.size(2), x.size(3)\n p = 64 # maximum 6 strides of 2\n x = pad(x, p)\n\n with torch.no_grad():\n out = net.compress(x)\n\n shape = out[\"shape\"]\n header = get_header(model, metric, quality)\n\n with Path(output).open(\"wb\") as f:\n write_uchars(f, header)\n # write original image size\n write_uints(f, (h, w))\n # write shape and number of encoded latents\n write_uints(f, (shape[0], shape[1], len(out[\"strings\"])))\n for s in out[\"strings\"]:\n write_uints(f, (len(s[0]),))\n write_bytes(f, s[0])\n\n enc_time = time.time() - enc_start\n size = filesize(output)\n bpp = float(size) * 8 / (img.size[0] * img.size[1])\n print(\n f\"{bpp:.3f} bpp |\"\n f\" Encoded in {enc_time:.2f}s (model loading: {load_time:.2f}s)\"\n )\n\n\ndef _decode(inputpath, coder, show, output=None):\n compressai.set_entropy_coder(coder)\n\n dec_start = time.time()\n with Path(inputpath).open(\"rb\") as f:\n model, metric, quality = parse_header(read_uchars(f, 2))\n original_size = read_uints(f, 2)\n shape = read_uints(f, 2)\n strings = []\n n_strings = read_uints(f, 1)[0]\n for _ in range(n_strings):\n s = read_bytes(f, read_uints(f, 1)[0])\n strings.append([s])\n\n print(f\"Model: {model:s}, metric: {metric:s}, quality: {quality:d}\")\n start = time.time()\n net = models[model](quality=quality, metric=metric, pretrained=True).eval()\n load_time = time.time() - start\n\n with torch.no_grad():\n out = net.decompress(strings, shape)\n\n x_hat = crop(out[\"x_hat\"], original_size)\n img = torch2img(x_hat)\n dec_time = time.time() - dec_start\n print(f\"Decoded in {dec_time:.2f}s (model loading: {load_time:.2f}s)\")\n\n if show:\n show_image(img)\n if output is not None:\n img.save(output)\n\n\ndef show_image(img: Image.Image):\n from matplotlib import pyplot as plt\n\n fig, ax = plt.subplots()\n ax.axis(\"off\")\n ax.title.set_text(\"Decoded image\")\n ax.imshow(img)\n fig.tight_layout()\n plt.show()\n\n\ndef encode(argv):\n parser = argparse.ArgumentParser(description=\"Encode image to bit-stream\")\n parser.add_argument(\"image\", type=str)\n parser.add_argument(\n \"--model\",\n choices=models.keys(),\n default=list(models.keys())[0],\n help=\"NN model to use (default: %(default)s)\",\n )\n parser.add_argument(\n \"-m\",\n \"--metric\",\n choices=[\"mse\"],\n default=\"mse\",\n help=\"metric trained against (default: %(default)s\",\n )\n parser.add_argument(\n \"-q\",\n \"--quality\",\n choices=list(range(1, 9)),\n type=int,\n default=3,\n help=\"Quality setting (default: %(default)s)\",\n )\n parser.add_argument(\n \"-c\",\n \"--coder\",\n choices=compressai.available_entropy_coders(),\n default=compressai.available_entropy_coders()[0],\n help=\"Entropy coder (default: %(default)s)\",\n )\n parser.add_argument(\"-o\", \"--output\", help=\"Output path\")\n args = parser.parse_args(argv)\n if not args.output:\n args.output = Path(Path(args.image).resolve().name).with_suffix(\".bin\")\n\n _encode(args.image, args.model, args.metric, args.quality, args.coder, args.output)\n\n\ndef decode(argv):\n parser = argparse.ArgumentParser(description=\"Decode bit-stream to imager\")\n parser.add_argument(\"input\", type=str)\n parser.add_argument(\n \"-c\",\n \"--coder\",\n choices=compressai.available_entropy_coders(),\n default=compressai.available_entropy_coders()[0],\n help=\"Entropy coder (default: %(default)s)\",\n )\n parser.add_argument(\"--show\", action=\"store_true\")\n parser.add_argument(\"-o\", \"--output\", help=\"Output path\")\n args = parser.parse_args(argv)\n _decode(args.input, args.coder, args.show, args.output)\n\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"command\", choices=[\"encode\", \"decode\"])\n args = parser.parse_args(argv)\n return args\n\n\ndef main(argv):\n args = parse_args(argv[1:2])\n argv = argv[2:]\n torch.set_num_threads(1) # just to be sure\n if args.command == \"encode\":\n encode(argv)\n elif args.command == \"decode\":\n decode(argv)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n"
] | [
[
"matplotlib.pyplot.subplots",
"torch.no_grad",
"torch.set_num_threads",
"matplotlib.pyplot.show",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mindspore-ai/mindarmour | [
"a5db0825fa06e4da870c0a850a18b374e8cdd086"
] | [
"mindarmour/adv_robustness/detectors/ensemble_detector.py"
] | [
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nEnsemble Detector.\n\"\"\"\nimport numpy as np\n\nfrom mindarmour.utils.logger import LogUtil\nfrom mindarmour.utils._check_param import check_numpy_param, \\\n check_param_multi_types\nfrom .detector import Detector\n\nLOGGER = LogUtil.get_instance()\nTAG = 'EnsembleDetector'\n\n\nclass EnsembleDetector(Detector):\n \"\"\"\n Ensemble detector.\n\n Args:\n detectors (Union[tuple, list]): List of detector methods.\n policy (str): Decision policy, could be 'vote', 'all' or 'any'.\n Default: 'vote'\n \"\"\"\n\n def __init__(self, detectors, policy=\"vote\"):\n super(EnsembleDetector, self).__init__()\n self._detectors = check_param_multi_types('detectors', detectors,\n [list, tuple])\n self._num_detectors = len(detectors)\n self._policy = policy\n\n def fit(self, inputs, labels=None):\n \"\"\"\n Fit detector like a machine learning model. This method is not available\n in this class.\n\n Args:\n inputs (numpy.ndarray): Data to calculate the threshold.\n labels (numpy.ndarray): Labels of data. Default: None.\n\n Raises:\n NotImplementedError: This function is not available in ensemble.\n \"\"\"\n msg = 'The function fit() is not available in the class ' \\\n '`EnsembleDetector`.'\n LOGGER.error(TAG, msg)\n raise NotImplementedError(msg)\n\n def detect(self, inputs):\n \"\"\"\n Detect adversarial examples from input samples.\n\n Args:\n inputs (numpy.ndarray): Input samples.\n\n Returns:\n list[int], whether a sample is adversarial. if res[i]=1, then the\n input sample with index i is adversarial.\n\n Raises:\n ValueError: If policy is not supported.\n \"\"\"\n\n inputs = check_numpy_param('inputs', inputs)\n x_len = inputs.shape[0]\n counts = np.zeros(x_len)\n res = np.zeros(x_len, dtype=np.int)\n for detector in list(self._detectors):\n idx = detector.detect(inputs)\n counts[idx] += 1\n\n if self._policy == \"vote\":\n idx_adv = np.argwhere(counts > self._num_detectors / 2)\n elif self._policy == \"all\":\n idx_adv = np.argwhere(counts == self._num_detectors)\n elif self._policy == \"any\":\n idx_adv = np.argwhere(counts > 0)\n else:\n msg = 'Policy {} is not supported.'.format(self._policy)\n LOGGER.error(TAG, msg)\n raise ValueError(msg)\n res[idx_adv] = 1\n return list(res)\n\n def detect_diff(self, inputs):\n \"\"\"\n This method is not available in this class.\n\n Args:\n inputs (Union[numpy.ndarray, list, tuple]): Data been used as\n references to create adversarial examples.\n\n Raises:\n NotImplementedError: This function is not available in ensemble.\n \"\"\"\n msg = 'The function detect_diff() is not available in the class ' \\\n '`EnsembleDetector`.'\n LOGGER.error(TAG, msg)\n raise NotImplementedError(msg)\n\n def transform(self, inputs):\n \"\"\"\n Filter adversarial noises in input samples.\n This method is not available in this class.\n\n Args:\n inputs (Union[numpy.ndarray, list, tuple]): Data been used as\n references to create adversarial examples.\n\n Raises:\n NotImplementedError: This function is not available in ensemble.\n \"\"\"\n msg = 'The function transform() is not available in the class ' \\\n '`EnsembleDetector`.'\n LOGGER.error(TAG, msg)\n raise NotImplementedError(msg)\n"
] | [
[
"numpy.zeros",
"numpy.argwhere"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zachkeer/ReAgent | [
"3e5eb0391050c39b9d4707020f9ee15d860f28cb"
] | [
"reagent/test/workflow/test_query_data.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nimport unittest\n\nimport numpy as np\n\n# pyre-fixme[21]: Could not find `pytest`.\nimport pytest\n\n# pyre-fixme[21]: Could not find `pyspark`.\nfrom pyspark.sql.functions import asc\n\n# pyre-fixme[21]: Could not find `workflow`.\nfrom reagent.test.workflow.reagent_sql_test_base import ReagentSQLTestBase\n\n# pyre-fixme[21]: Could not find module `reagent.test.workflow.test_data.ex_mdps`.\nfrom reagent.test.workflow.test_data.ex_mdps import generate_discrete_mdp_pandas_df\nfrom reagent.workflow.data_fetcher import query_data\nfrom reagent.workflow.types import Dataset, TableSpec\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_data_discrete(sqlCtx, multi_steps: bool, table_name: str):\n # pyre-fixme[16]: Module `test` has no attribute `workflow`.\n df, _ = generate_discrete_mdp_pandas_df(\n multi_steps=multi_steps, use_seq_num_diff_as_time_diff=False\n )\n df = sqlCtx.createDataFrame(df)\n logger.info(\"Created dataframe\")\n df.show()\n df.createOrReplaceTempView(table_name)\n\n\n# pyre-fixme[11]: Annotation `ReagentSQLTestBase` is not defined as a type.\nclass TestQueryData(ReagentSQLTestBase):\n def setUp(self):\n super().setUp()\n logging.getLogger(__name__).setLevel(logging.INFO)\n self.table_name = \"test_table\"\n logger.info(f\"Table name is {self.table_name}\")\n\n def generate_data(self, multi_steps=False):\n generate_data_discrete(\n self.sqlCtx, multi_steps=multi_steps, table_name=self.table_name\n )\n\n def _discrete_read_data(\n self, custom_reward_expression=None, gamma=None, multi_steps=None\n ):\n ts = TableSpec(table_name=self.table_name)\n dataset: Dataset = query_data(\n input_table_spec=ts,\n discrete_action=True,\n actions=[\"L\", \"R\", \"U\", \"D\"],\n custom_reward_expression=custom_reward_expression,\n multi_steps=multi_steps,\n gamma=gamma,\n )\n df = self.sqlCtx.read.parquet(dataset.parquet_url)\n df = df.orderBy(asc(\"sequence_number\"))\n logger.info(\"Read parquet dataframe: \")\n df.show()\n return df\n\n @pytest.mark.serial\n def test_query_data(self):\n # single step\n self.generate_data()\n df = self._discrete_read_data()\n df = df.toPandas()\n self.verify_discrete_single_step_except_rewards(df)\n self.assertEq(df[\"reward\"], np.array([0.0, 1.0, 4.0, 5.0], dtype=\"float32\"))\n logger.info(\"discrete single-step seems fine\")\n\n # single step with reward := reward^3 + 10\n df = self._discrete_read_data(custom_reward_expression=\"POWER(reward, 3) + 10\")\n df = df.toPandas()\n self.verify_discrete_single_step_except_rewards(df)\n self.assertEq(\n df[\"reward\"], np.array([10.0, 11.0, 74.0, 135.0], dtype=\"float32\")\n )\n logger.info(\"discrete single-step custom reward seems fine\")\n\n # multi-step\n gamma = 0.9\n self.generate_data(multi_steps=True)\n df = self._discrete_read_data(multi_steps=2, gamma=gamma)\n df = df.toPandas()\n self.verify_discrete_multi_steps_except_rewards(df)\n self.assertAllClose(\n df[\"reward\"],\n np.array(\n [gamma * 1, 1 * 1.0 + gamma * 4, 1 * 4.0 + gamma * 5, 1 * 5.0],\n dtype=\"float32\",\n ),\n )\n logger.info(\"discrete multi-step seems fine.\")\n\n def verify_discrete_single_step_except_rewards(self, df):\n \"\"\" expects a pandas dataframe \"\"\"\n self.assertEq(df[\"sequence_number\"], np.array([1, 2, 3, 4], dtype=\"int32\"))\n\n state_features_presence = np.array(\n [\n [True, False, False, False, False],\n [False, True, False, False, False],\n [False, False, True, False, False],\n [False, False, False, True, False],\n ],\n dtype=\"bool\",\n )\n self.assertEq(df[\"state_features_presence\"], state_features_presence)\n state_features = np.array(\n [\n [1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0],\n ],\n dtype=\"float32\",\n )\n self.assertEqWithPresence(\n df[\"state_features\"], state_features_presence, state_features\n )\n\n self.assertEq(df[\"action\"], np.array([0, 1, 2, 3]))\n self.assertEq(\n df[\"action_probability\"], np.array([0.3, 0.4, 0.5, 0.6], dtype=\"float32\")\n )\n self.assertEq(df[\"not_terminal\"], np.array([1, 1, 1, 0], dtype=\"bool\"))\n next_state_features_presence = np.array(\n [\n [False, True, False, False, False],\n [False, False, True, False, False],\n [False, False, False, True, False],\n [False, False, False, False, True],\n ],\n dtype=\"bool\",\n )\n self.assertEq(df[\"next_state_features_presence\"], next_state_features_presence)\n next_state_features = np.array(\n [\n [0.0, 1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0],\n ],\n dtype=\"float32\",\n )\n self.assertEqWithPresence(\n df[\"next_state_features\"], next_state_features_presence, next_state_features\n )\n\n self.assertEq(df[\"next_action\"], np.array([1, 2, 3, 4]))\n self.assertEq(df[\"time_diff\"], np.array([1, 3, 1, 1]))\n self.assertEq(df[\"step\"], np.array([1, 1, 1, 1]))\n self.assertEq(\n df[\"possible_actions_mask\"],\n np.array([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]]),\n )\n self.assertEq(\n df[\"possible_next_actions_mask\"],\n np.array([[0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]),\n )\n\n def verify_discrete_multi_steps_except_rewards(self, df):\n self.assertEq(df[\"sequence_number\"], np.array([1, 2, 3, 4], dtype=\"int32\"))\n\n state_features_presence = np.array(\n [\n [True, False, False, False, False],\n [False, True, False, False, False],\n [False, False, True, False, False],\n [False, False, False, True, False],\n ],\n dtype=\"bool\",\n )\n self.assertEq(df[\"state_features_presence\"], state_features_presence)\n state_features = np.array(\n [\n [1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0],\n ],\n dtype=\"float32\",\n )\n self.assertEqWithPresence(\n df[\"state_features\"], state_features_presence, state_features\n )\n\n self.assertEq(df[\"action\"], np.array([0, 1, 2, 3]))\n self.assertEq(\n df[\"action_probability\"], np.array([0.3, 0.4, 0.5, 0.6], dtype=\"float32\")\n )\n self.assertEq(df[\"not_terminal\"], np.array([1, 1, 0, 0], dtype=\"bool\"))\n\n next_state_features_presence = np.array(\n [\n [False, False, True, False, False],\n [False, False, False, True, False],\n [False, False, False, False, True],\n [False, False, False, False, True],\n ],\n dtype=\"bool\",\n )\n self.assertEq(df[\"next_state_features_presence\"], next_state_features_presence)\n next_state_features = np.array(\n [\n [0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.0],\n ],\n dtype=\"float32\",\n )\n self.assertEqWithPresence(\n df[\"next_state_features\"], next_state_features_presence, next_state_features\n )\n\n self.assertEq(df[\"next_action\"], np.array([2, 3, 4, 4]))\n self.assertEq(df[\"time_diff\"], np.array([1, 1, 1, 1]))\n self.assertEq(df[\"step\"], np.array([2, 2, 2, 1]))\n self.assertEq(\n df[\"possible_actions_mask\"],\n np.array([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]]),\n )\n self.assertEq(\n df[\"possible_next_actions_mask\"],\n np.array([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LianShuaiLong/deeplearning_tools | [
"5ce326c61efeb0b79c8afbb56a8256d3c38c9889"
] | [
"start_training_with_all_avaliable_gpus/start.py"
] | [
"#-*-coding:utf-8-*-\nimport sys\nimport os\nimport json\nimport numpy as np\nimport multiprocessing\nimport datetime\nimport pynvml\nfrom pynvml import *\n\nnvmlInit()\n\nMEMORY_THESHOLD = 15 # GB\ndef get_aviliable_gpus():\n print (\"Driver Version:\", nvmlSystemGetDriverVersion())\n deviceCount = nvmlDeviceGetCount()\n GPU_AVILIABLE=[]\n for i in range(deviceCount):\n handle = nvmlDeviceGetHandleByIndex(i)\n meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\n memo_total = meminfo.total/(1024*1024*1024)\n memo_used = meminfo.used/(1024*1024*1024)\n if memo_total>=MEMORY_THESHOLD and memo_used/memo_total<=0.2:\n GPU_AVILIABLE.append(i)\n if len(GPU_AVILIABLE)==0: \n print('No GPU Is Avilable!')\n sys.exit(0)\n else:\n print('Avilable GPUS:',GPU_AVILIABLE)\n return GPU_AVILIABLE\n\ndef get_search_space(cfg_path='search_space.json'):\n if not os.path.isfile(cfg_path):\n print('Cannot find search space file:{}'.format(cfg_path))\n sys.exit(0)\n search_space = json.loads(open(cfg_path,'r').read())\n print('search space\\n:',search_space)\n return search_space\n\ndef get_params(search_space):\n params={}\n for k,v in search_space.items():\n if v[\"type\"] == \"uniform\":\n value = np.random.uniform(v[\"value\"][0],v[\"value\"][1])\n elif v[\"type\"] == \"choice\":\n value = np.random.choice(v[\"value\"],1)\n params[k] = value[0] if isinstance(value,np.ndarray) else value\n return params\n \ndef start_running(*args,**kwargs):\n GPU_ID = int(args[0])\n print('start run train.py on GPU_ID:',GPU_ID)\n learning_rate = kwargs['learning_rate']\n batch_size = kwargs['batch_size']\n optimizer = kwargs['optimizer']\n max_number_of_steps = kwargs['max_number_of_steps']\n learning_rate_decay_type = kwargs['learning_rate_decay_type']\n print('pid:',os.getpid(),'running config:\\nlearning rate:',learning_rate,'batch_size:',batch_size,'optimizer:',optimizer,'max_num_of_steps:',max_number_of_steps,'learning_rate_decay_type:',learning_rate_decay_type)\n today = datetime.date.today()\n checkpoint_dir = '{}/{}'.format(today,os.getpid())\n try: \n os.makedirs(checkpoint_dir)\n except OSError:\n if not os.path.isdir(checkpoint_dir):\n raise\n os.popen('CUDA_VISIBLE_DEVICES={} python train.py --batch_size={} --max_number_of_steps={} --learning_rate={} --optimizer={} --checkpoint_dir={} --learning_rate_decay_type={}'.format(GPU_ID,batch_size,max_number_of_steps,learning_rate,optimizer,checkpoint_dir,learning_rate_decay_type),mode='w')\n\nif __name__=='__main__':\n\n GPU_AVILIABLE = get_aviliable_gpus()\n search_space = get_search_space('search_space.json')\n pp1=[multiprocessing.Process(target = start_running,args=(str(GPU_ID)),kwargs=get_params(search_space)) for i,GPU_ID in enumerate(GPU_AVILIABLE)]\n\n for p in pp1:\n p.start()\n\n for p in pp1:\n p.join()\n \n \n\n \n\n"
] | [
[
"numpy.random.uniform",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GEbb4/matlab2py | [
"0522fc75c30d52dd724259df0f29f51a2b7219fe"
] | [
"src/matlab2py/figure_process.py"
] | [
"\"\"\"\nPlotting machinery\n\"\"\"\nimport ast\nimport logging\nimport queue\nimport sys\nimport threading\nimport tkinter as tk\nimport time\nfrom argparse import ArgumentParser\n\nimport matplotlib.pyplot as plt\nimport numpy as np # Needed for dynamic code.\nfrom matplotlib.font_manager import FontProperties\n\nfrom matlab2py.debug import create_filelog\n\nLOGGER = logging.getLogger(__name__)\n\n# Set the font family to Helvetica to match MATLAB.\nfont = FontProperties()\nfont.set_name(\"Helvetica\")\n\n\n\ndef rate_limited_true():\n \"\"\"Returns true after a given delay.\"\"\"\n time.sleep(0.05)\n return True\n\n\ndef parser():\n \"\"\"The argparser.\"\"\"\n parser = ArgumentParser()\n parser.add_argument(\"--num\", type=int)\n parser.add_argument(\"--color\", type=ast.literal_eval)\n parser.add_argument(\"--dpi\", type=float)\n return parser\n\n\ndef add_stdin_to_queue(input_queue):\n \"\"\"Listen to stdin for new data.\"\"\"\n while rate_limited_true():\n input_queue.put(sys.stdin.readline())\n print(input_queue)\n\n\ndef update_plot(input_queue, fig):\n \"\"\"Wait for things in the queue.\"\"\"\n while rate_limited_true():\n if not input_queue.empty():\n command = input_queue.get()\n if command:\n LOGGER.debug(f\"exec: {command}\")\n exec(command)\n\n\ndef make_thread(fn, args):\n \"\"\"Makes a new thread for this process.\"\"\"\n thread = threading.Thread(target=fn, args=args)\n thread.daemon = True\n thread.start()\n return thread\n\n\ndef main(args):\n fig = plt.figure(num=args.num, facecolor=args.color, dpi=args.dpi)\n\n # Set the toolbar to show at the top of the figure.\n # (I think this only works for Tk backends...)\n fig.canvas.toolbar.pack(side=tk.TOP, fill=tk.X)\n\n input_queue = queue.Queue()\n\n input_thread = make_thread(fn=add_stdin_to_queue, args=(input_queue,))\n plot_thread = make_thread(fn=update_plot, args=(input_queue, fig))\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(handlers=[create_filelog(\"figure\")])\n LOGGER.setLevel(logging.DEBUG)\n main(parser().parse_args())\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nluedema/kge | [
"0c7670692736af6d2073d32fab99c7361b66911f"
] | [
"kge/dataset.py"
] | [
"from __future__ import annotations\n\nimport os\nimport uuid\n\nimport torch\nfrom torch import Tensor\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport inspect\n\nfrom kge import Config, Configurable\nimport kge.indexing\nfrom kge.indexing import create_default_index_functions\nfrom kge.misc import kge_base_dir\n\nfrom typing import Dict, List, Any, Callable, Union, Optional\n\n\nclass Dataset(Configurable):\n \"\"\"Stores information about a dataset.\n\n This includes the number of entities, number of relations, splits containing tripels\n (e.g., to train, validate, test), indexes, and various metadata about these objects.\n Most of these objects can be lazy-loaded on first use.\n\n \"\"\"\n\n #: whether to about when an outdated cached dataset or index file is found\n _abort_when_cache_outdated = False\n\n def __init__(self, config, folder=None):\n \"\"\"Constructor for internal use.\n\n To load a dataset, use `Dataset.create()`.\"\"\"\n super().__init__(config, \"dataset\")\n\n #: directory in which dataset is stored\n self.folder = folder\n\n # read the number of entities and relations from the config, if present\n try:\n self._num_entities: Int = config.get(\"dataset.num_entities\")\n if self._num_entities < 0:\n self._num_entities = None\n except KeyError:\n self._num_entities: Int = None\n\n try:\n self._num_relations: Int = config.get(\"dataset.num_relations\")\n if self._num_relations < 0:\n self._num_relations = None\n except KeyError:\n self._num_relations: Int = None\n\n #: split-name to (n,3) int32 tensor\n self._triples: Dict[str, Tensor] = {}\n\n #: meta data that is part if this dataset. Indexed by key.\n self._meta: Dict[str, Any] = {}\n\n #: data derived automatically from the splits or meta data. Indexed by key.\n self._indexes: Dict[str, Any] = {}\n\n #: functions that compute and add indexes as needed; arguments are dataset and\n #: key. Index functions are expected to not recompute an index that is already\n #: present. Indexed by key (same key as in self._indexes)\n self.index_functions: Dict[str, Callable] = {}\n create_default_index_functions(self)\n\n ## LOADING ##########################################################################\n\n def ensure_available(self, key):\n \"\"\"Checks if key can be loaded\"\"\"\n if self.folder is None or not os.path.exists(self.folder):\n raise IOError(\n \"Dataset {} not found\".format(self.config.get(\"dataset.name\"))\n )\n filename = self.config.get(f\"dataset.files.{key}.filename\")\n if filename is None:\n raise IOError(\"Filename for key {} not specified in config\".format(key))\n if not os.path.exists(os.path.join(self.folder, filename)):\n raise IOError(\n \"File {} for key {} could not be found\".format(\n os.path.join(self.folder, filename), key\n )\n )\n\n @staticmethod\n def create(config: Config, preload_data: bool = True, folder: Optional[str] = None):\n \"\"\"Loads a dataset.\n\n If preload_data is set, loads entity and relation maps as well as all splits.\n Otherwise, this data is lazy loaded on first use.\n\n \"\"\"\n name = config.get(\"dataset.name\")\n if folder is None:\n folder = os.path.join(kge_base_dir(), \"data\", name)\n if os.path.isfile(os.path.join(folder, \"dataset.yaml\")):\n config.log(\"Loading configuration of dataset \" + name + \"...\")\n config.load(os.path.join(folder, \"dataset.yaml\"))\n\n dataset = Dataset(config, folder)\n if preload_data:\n dataset.entity_ids()\n dataset.relation_ids()\n for split in [\"train\", \"valid\", \"test\"]:\n dataset.split(split)\n return dataset\n\n @staticmethod\n def create_from(\n checkpoint: Dict,\n config: Config = None,\n dataset: Optional[Dataset] = None,\n preload_data=False,\n ) -> Dataset:\n \"\"\"Creates dataset based on a checkpoint.\n\n If a dataset is provided, only (!) its meta data will be updated with the values\n from the checkpoint. No further checks are performed.\n\n Args:\n checkpoint: loaded checkpoint\n config: config (should match the one of checkpoint if set)\n dataset: dataset to update\n preload_data: preload data\n\n Returns: created/updated dataset\n\n \"\"\"\n if config is None:\n config = Config.create_from(checkpoint)\n if dataset is None:\n dataset = Dataset.create(config, preload_data)\n if \"dataset\" in checkpoint:\n dataset_checkpoint = checkpoint[\"dataset\"]\n if (\n \"dataset.meta\" in dataset_checkpoint\n and dataset_checkpoint[\"meta\"] is not None\n ):\n dataset._meta.update(dataset_checkpoint[\"meta\"])\n dataset._num_entities = dataset_checkpoint[\"num_entities\"]\n dataset._num_relations = dataset_checkpoint[\"num_relations\"]\n return dataset\n\n def save_to(self, checkpoint: Dict, meta_keys: Optional[List[str]] = None) -> Dict:\n \"\"\"Adds meta data to a checkpoint\"\"\"\n dataset_checkpoint = {\n \"num_entities\": self.num_entities(),\n \"num_relations\": self.num_relations(),\n }\n checkpoint[\"dataset\"] = dataset_checkpoint\n if meta_keys is None:\n return checkpoint\n meta_checkpoint = {}\n for key in meta_keys:\n meta_checkpoint[key] = self.map_indexes(None, key)\n checkpoint[\"dataset\"][\"meta\"] = meta_checkpoint\n return checkpoint\n\n @staticmethod\n def _to_valid_filename(s):\n invalid_chars = \"\\n\\t\\\\/\"\n replacement_chars = \"ntbf\"\n trans = invalid_chars.maketrans(invalid_chars, replacement_chars)\n return s.translate(trans)\n\n @staticmethod\n def _load_triples(filename: str, delimiter=\"\\t\", use_pickle=False) -> Tensor:\n if use_pickle:\n # check if there is a pickled, up-to-date version of the file\n pickle_suffix = Dataset._to_valid_filename(f\"-{delimiter}.pckl\")\n pickle_filename = filename + pickle_suffix\n triples = Dataset._pickle_load_if_uptodate(None, pickle_filename, filename)\n if triples is not None:\n return triples\n\n # numpy loadtxt is very slow, use pandas instead\n triples = pd.read_csv(\n filename, sep=delimiter, dtype=np.int32, header=None, usecols=range(0, 3)\n ).to_numpy()\n triples = torch.from_numpy(triples)\n if use_pickle:\n Dataset._pickle_dump_atomic(triples, pickle_filename)\n return triples\n\n def load_triples(self, key: str) -> Tensor:\n \"Load or return the triples with the specified key.\"\n if key not in self._triples:\n self.ensure_available(key)\n filename = self.config.get(f\"dataset.files.{key}.filename\")\n filetype = self.config.get(f\"dataset.files.{key}.type\")\n if filetype != \"triples\":\n raise ValueError(\n \"Unexpected file type: \"\n f\"dataset.files.{key}.type='{filetype}', expected 'triples'\"\n )\n triples = Dataset._load_triples(\n os.path.join(self.folder, filename),\n use_pickle=self.config.get(\"dataset.pickle\"),\n )\n self.config.log(f\"Loaded {len(triples)} {key} triples\")\n self._triples[key] = triples\n\n return self._triples[key]\n\n @staticmethod\n def _load_map(\n filename: str,\n as_list: bool = False,\n delimiter: str = \"\\t\",\n ignore_duplicates=False,\n use_pickle=False,\n ) -> Union[List, Dict]:\n if use_pickle:\n # check if there is a pickled, up-to-date version of the file\n pickle_suffix = Dataset._to_valid_filename(\n f\"-{as_list}-{delimiter}-{ignore_duplicates}.pckl\"\n )\n pickle_filename = filename + pickle_suffix\n result = Dataset._pickle_load_if_uptodate(None, pickle_filename, filename)\n if result is not None:\n return result\n\n n = 0\n dictionary = {}\n warned_overrides = False\n duplicates = 0\n with open(filename, \"r\") as file:\n for line in file:\n key, value = line.split(delimiter, maxsplit=1)\n value = value.rstrip(\"\\n\")\n if as_list:\n key = int(key)\n n = max(n, key + 1)\n if key in dictionary:\n duplicates += 1\n if not ignore_duplicates:\n raise KeyError(f\"{filename} contains duplicated keys\")\n else:\n dictionary[key] = value\n if as_list:\n array = [None] * n\n for index, value in dictionary.items():\n array[index] = value\n result = (array, duplicates)\n else:\n result = (dictionary, duplicates)\n\n if use_pickle:\n Dataset._pickle_dump_atomic(result, pickle_filename)\n return result\n\n def load_map(\n self,\n key: str,\n as_list: bool = False,\n maptype=None,\n ids_key=None,\n ignore_duplicates=False,\n ) -> Union[List, Dict]:\n \"\"\"Load or return the map with the specified key.\n\n If `as_list` is set, the map is converted to an array indexed by the map's keys.\n\n If `maptype` is set ensures that the map being loaded has the specified type.\n Valid map types are `map` (keys are indexes) and `idmap` (keys are ids).\n\n If the map is of type `idmap`, its keys can be converted to indexes by setting\n `ids_key` to either `entity_ids` or `relation_ids` and `as_list` to `True`.\n\n If ignore_duplicates is set to `False` and the map contains duplicate keys,\n raise a `KeyError`. Otherwise, logs a warning and picks first occurrence of a\n key.\n\n \"\"\"\n if key not in self._meta:\n self.ensure_available(key)\n filename = self.config.get(f\"dataset.files.{key}.filename\")\n filetype = self.config.get(f\"dataset.files.{key}.type\")\n if (maptype and filetype != maptype) or (\n not maptype and filetype not in [\"map\", \"idmap\"]\n ):\n if not maptype:\n maptype = \"map' or 'idmap\"\n raise ValueError(\n \"Unexpected file type: \"\n f\"dataset.files.{key}.type='{filetype}', expected {maptype}\"\n )\n if filetype == \"idmap\" and as_list and ids_key:\n map_, duplicates = Dataset._load_map(\n os.path.join(self.folder, filename),\n as_list=False,\n ignore_duplicates=ignore_duplicates,\n use_pickle=self.config.get(\"dataset.pickle\"),\n )\n ids = self.load_map(ids_key, as_list=True)\n map_ = [map_.get(ids[i], None) for i in range(len(ids))]\n nones = map_.count(None)\n if nones > 0:\n self.config.log(\n f\"Warning: could not find {nones} ids in map {key}; \"\n \"filling with None.\"\n )\n else:\n map_, duplicates = Dataset._load_map(\n os.path.join(self.folder, filename),\n as_list=as_list,\n ignore_duplicates=ignore_duplicates,\n use_pickle=self.config.get(\"dataset.pickle\"),\n )\n\n if duplicates > 0:\n self.config.log(\n f\"Warning: map {key} contains {duplicates} duplicate keys, \"\n \"all which have been ignored\"\n )\n self.config.log(f\"Loaded {len(map_)} keys from map {key}\")\n self._meta[key] = map_\n\n return self._meta[key]\n\n def shallow_copy(self):\n \"\"\"Returns a dataset that shares the underlying splits and indexes.\n\n Changes to splits and indexes are also reflected on this and the copied dataset.\n \"\"\"\n copy = Dataset(self.config, self.folder)\n copy._num_entities = self.num_entities()\n copy._num_relations = self.num_relations()\n copy._triples = self._triples\n copy._meta = self._meta\n copy._indexes = self._indexes\n copy.index_functions = self.index_functions\n return copy\n\n def _get_newest_mtime(self, data_filenames=None):\n \"\"\"Return the timestamp of latest modification of relevant data files.\n\n If `data_filenames` is `None`, return latest modification of relevant modules or\n any of the dataset files given in the configuration.\n\n Otherwise, return latest modification of relevant modules or any of the\n specified files.\n\n \"\"\"\n newest_timestamp = max(\n os.path.getmtime(inspect.getfile(Dataset)),\n os.path.getmtime(inspect.getfile(kge.indexing)),\n )\n if data_filenames is None:\n data_filenames = []\n for key, entry in self.config.get(\"dataset.files\").items():\n filename = os.path.join(self.folder, entry[\"filename\"])\n data_filenames.append(filename)\n\n if isinstance(data_filenames, str):\n data_filenames = [data_filenames]\n\n for filename in data_filenames:\n if os.path.isfile(filename):\n timestamp = os.path.getmtime(filename)\n newest_timestamp = max(newest_timestamp, timestamp)\n\n return newest_timestamp\n\n def _pickle_load_if_uptodate(\n self, pickle_filename: str, data_filenames: List[str] = None\n ):\n \"\"\"Load the specified pickle file if it's up-to-date.\n\n The `data_filenames` argument is as specified in `_get_newest_mtime`. If\n `data_filenames` is not `None`, `self` can be `None`.\n\n Returns `None` if the pickled file is not present or if it is outdated.\n\n \"\"\"\n if os.path.isfile(pickle_filename):\n if os.path.getmtime(pickle_filename) > Dataset._get_newest_mtime(\n self, data_filenames\n ): # self may be None\n with open(pickle_filename, \"rb\") as f:\n return pickle.load(f)\n elif Dataset._abort_when_cache_outdated:\n pickle_filename = os.path.abspath(pickle_filename)\n pickle_dir = os.path.dirname(pickle_filename)\n raise ValueError(\n f\"\"\"Cached dataset file\n {pickle_filename}\nis outdated.\n\nIf unsure what to do, remove the command line option '--abort-when-cache-outdated' and\nrerun to recompute the outdated file.\n\nBEWARE: If you are an expert user who understands clearly why the file is outdated AND\nthat it does not need to be recomputed, you can update the timestamp of the filename as\nfollows:\n\n touch {pickle_filename}\n\nNOT RECOMMENDED: You can update the timestamp of all cached files using:\n\n touch {pickle_dir}/*.pckl\n\"\"\"\n )\n else:\n return None\n\n @staticmethod\n def _pickle_dump_atomic(data, pickle_filename):\n # first write to temporary file\n tmpfile = pickle_filename + str(uuid.uuid4()) + \".tmp\"\n with open(tmpfile, \"wb\") as f:\n pickle.dump(data, f)\n\n # then do an atomic replace\n os.replace(tmpfile, pickle_filename)\n\n ## ACCESS ###########################################################################\n\n def files_of_type(self, file_type: str) -> List[str]:\n \"Return all keys of files with the specified type.\"\n return [\n key\n for key, entry in self.config.get(\"dataset.files\").items()\n if entry[\"type\"] == file_type\n ]\n\n def num_entities(self) -> int:\n \"Return the number of entities in this dataset.\"\n if not self._num_entities:\n self._num_entities = len(self.entity_ids())\n return self._num_entities\n\n def num_relations(self) -> int:\n \"Return the number of relations in this dataset.\"\n if not self._num_relations:\n self._num_relations = len(self.relation_ids())\n return self._num_relations\n\n def split(self, split: str) -> Tensor:\n \"\"\"Return the split of the specified name.\n\n If the split is not yet loaded, load it. Returns an Nx3 IntTensor of\n spo-triples.\n\n \"\"\"\n return self.load_triples(split)\n\n def entity_ids(\n self, indexes: Optional[Union[int, Tensor]] = None\n ) -> Union[str, List[str], np.ndarray]:\n \"\"\"Decode indexes to entity ids.\n\n See `Dataset#map_indexes` for a description of the `indexes` argument.\n \"\"\"\n return self.map_indexes(indexes, \"entity_ids\")\n\n def relation_ids(\n self, indexes: Optional[Union[int, Tensor]] = None\n ) -> Union[str, List[str], np.ndarray]:\n \"\"\"Decode indexes to relation ids.\n\n See `Dataset#map_indexes` for a description of the `indexes` argument.\n \"\"\"\n return self.map_indexes(indexes, \"relation_ids\")\n\n def entity_strings(\n self, indexes: Optional[Union[int, Tensor]] = None\n ) -> Union[str, List[str], np.ndarray]:\n \"\"\"Decode indexes to entity strings.\n\n See `Dataset#map_indexes` for a description of the `indexes` argument.\n\n \"\"\"\n map_ = self.load_map(\n \"entity_strings\", as_list=True, ids_key=\"entity_ids\", ignore_duplicates=True\n )\n return self._map_indexes(indexes, map_)\n\n def relation_strings(\n self, indexes: Optional[Union[int, Tensor]] = None\n ) -> Union[str, List[str], np.ndarray]:\n \"\"\"Decode indexes to relation strings.\n\n See `Dataset#map_indexes` for a description of the `indexes` argument.\n\n \"\"\"\n map_ = self.load_map(\n \"relation_strings\",\n as_list=True,\n ids_key=\"relation_ids\",\n ignore_duplicates=True,\n )\n return self._map_indexes(indexes, map_)\n\n def meta(self, key: str) -> Any:\n \"\"\"Return metadata stored under the specified key.\"\"\"\n return self._meta[key]\n\n def index(self, key: str) -> Any:\n \"\"\"Return the index stored under the specified key.\n\n Index means any data structure that is derived from the dataset, including\n statistics and indexes.\n\n If the index has not yet been computed, computes it by calling the function\n specified in `self.index_functions`.\n\n See `kge.indexing.create_default_index_functions()` for the indexes available by\n default.\n\n \"\"\"\n if key not in self._indexes:\n use_pickle = self.config.get(\"dataset.pickle\")\n if use_pickle:\n pickle_filename = os.path.join(\n self.folder, Dataset._to_valid_filename(f\"index-{key}.pckl\")\n )\n index = self._pickle_load_if_uptodate(pickle_filename)\n if index is not None:\n self._indexes[key] = index\n # call index function solely to print log messages. It's\n # expected to note recompute the index (which we just loaded)\n if key in self.index_functions:\n self.index_functions[key](self)\n\n return self._indexes[key]\n\n self.index_functions[key](self)\n if use_pickle:\n Dataset._pickle_dump_atomic(self._indexes[key], pickle_filename)\n\n return self._indexes[key]\n\n @staticmethod\n def _map_indexes(indexes, values):\n \"Return the names corresponding to specified indexes\"\n if indexes is None:\n return values\n elif isinstance(indexes, int):\n return values[indexes]\n else:\n shape = indexes.shape\n indexes = indexes.view(-1)\n names = np.array(list(map(lambda i: values[i], indexes)), dtype=str)\n return names.reshape(shape)\n\n def map_indexes(\n self, indexes: Optional[Union[int, Tensor]], key: str\n ) -> Union[Any, List[Any], np.ndarray]:\n \"\"\"Maps indexes to values using the specified map.\n\n `key` refers to the key of a map file of the dataset, which associates a value\n with each numerical index. The map file is loaded automatically.\n\n If `indexes` is `None`, return all values. If `indexes` is an integer, return\n the corresponding value. If `indexes` is a Tensor, return an ndarray of the same\n shape holding the corresponding values.\n\n \"\"\"\n map_ = self.load_map(key, as_list=True)\n return Dataset._map_indexes(indexes, map_)\n"
] | [
[
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ferch42/PyDSRL | [
"bd9ea3e739c837db0db5052f7db23476fa21c472"
] | [
"dqn_main.py"
] | [
"'''Main module for the paper's algorithm'''\r\n\r\nimport argparse\r\nimport os\r\n\r\nfrom collections import deque\r\nfrom datetime import datetime\r\n\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tqdm\r\n\r\nfrom gym import logger\r\n\r\nimport cross_circle_gym\r\n\r\nfrom components.state_builder import StateRepresentationBuilder\r\nfrom components.agent import TabularAgent, DQNAgent\r\nfrom utils import prepare_training\r\n\r\n# Experiment Parameters\r\nparser = argparse.ArgumentParser(description=None)\r\nparser.add_argument('--experiment_name', type=str, default='default', help='Name of the experiment')\r\nparser.add_argument('--load', type=str, help='load existing model from filename provided')\r\nparser.add_argument('--image_dir', type=str, help='laod images from directory provided')\r\nparser.add_argument('--episodes', '-e', type=int, default=1000,\r\n help='number of DQN training episodes')\r\nparser.add_argument('--load-train', action='store_true',\r\n help='load existing model from filename provided and keep training')\r\nparser.add_argument('--new-images', action='store_true', help='make new set of training images')\r\nparser.add_argument('--enhancements', action='store_true',\r\n help='activate own improvements over original paper')\r\nparser.add_argument('--visualize', '--vis', action='store_true',\r\n help='plot autoencoder input & output')\r\nparser.add_argument('--save', type=str, help='save model to directory provided')\r\nparser.add_argument('--logdir',type=str,default='./logs', help='Log directory')\r\nparser.add_argument('--log_level',type=str,default='warn',help='Detail of logging output')\r\nparser.add_argument('--evaluation_frequency', type=int, default=100,\r\n help='How often to evaluate the agent')\r\nparser.add_argument('--tensorboard', action='store_true', default=False,\r\n help='Switch on tensorboard for the autoencoder training')\r\nparser.add_argument('--play', action='store_true', default=False,\r\n help='Choose the agents action for 20 timesteps to see what the autoencoder does')\r\n\r\n# Environment\r\nparser.add_argument('--random', action='store_true', default=False,\r\n help='Should the position of the entities be random')\r\nparser.add_argument('--double', action='store_true', default=False,\r\n help='Only negative objects (circles) or also positive ones (cross)')\r\nparser.add_argument('--n_entities', type=int, default=16,\r\n help='Number of entities in the environment')\r\nparser.add_argument('--entity_size', type=int, default=10, help='Size of the entities')\r\nparser.add_argument('--neighborhood_size', type=int, default=10,\r\n help='Size of the neighborhood')\r\nparser.add_argument('--step_size', type=float, default=1.0, help='Step-Size')\r\nparser.add_argument('--overlap_factor', type=float, default=0.01,\r\n help='How much must an gent overlap with an entitiy to collect it')\r\nparser.add_argument('--colour_state', action='store_true', default=False,\r\n help='Whether to use the colour image as a state or a one-channel black and white image')\r\n\r\n# Training parameters\r\nparser.add_argument('--alpha', type=float, default=0.01, help='Learning Rate')\r\nparser.add_argument('--epsilon_decay', type=float, default=0.99995,\r\n help='Decay rate of epsilon')\r\nparser.add_argument('--timesteps', type=int, default=100, help='Length of a training episode')\r\n\r\n# Autoencdoer\r\nparser.add_argument('--filter_size', default=10, type=int, help='Size of the filter')\r\n\r\n\r\nargs = parser.parse_args()\r\n\r\nnow = datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\r\nargs.logdir = os.path.join(args.logdir,args.experiment_name,now)\r\n\r\n# Choose environment\r\nif args.random and args.double:\r\n env_id = 'CrossCircle-MixedRand-v0'\r\nelif args.random and not args.double:\r\n env_id = 'CrossCircle-NegRand-v0'\r\nelif not args.random and args.double:\r\n env_id = 'CrossCircle-MixedGrid-v0'\r\nelse:\r\n env_id = 'CrossCircle-NegGrid-v0'\r\nargs.env_id = env_id\r\n\r\n# Set logger\r\nif args.log_level=='warn':\r\n logger.setLevel(logger.WARN)\r\nelif args.log_level=='info':\r\n logger.setLevel(logger.INFO)\r\nelse:\r\n raise NotImplementedError('Log-level not implemented')\r\nargs.logger = logger\r\n\r\n_ ,env = prepare_training(args)\r\n\r\n#state_builder = StateRepresentationBuilder(neighbor_radius=args.neighborhood_size)\r\naction_size = env.action_space.n\r\n\r\nstate_dim = env.reset().shape\r\nagent = DQNAgent(state_dim, action_size)\r\n\r\ndone = False\r\ntime_steps = args.timesteps\r\n\r\nnumber_of_evaluations = 0\r\nbuffered_rewards = deque(maxlen=200)\r\n\r\nsummary_writer = tf.summary.create_file_writer(args.logdir)\r\n\r\nfor e in tqdm.tqdm(range(args.episodes)):\r\n #state_builder.restart()\r\n state = env.reset()\r\n #state = state_builder.build_state(*autoencoder.get_entities(state))\r\n total_reward = 0\r\n\r\n for t in range(time_steps):\r\n action = agent.act(state)\r\n next_state, reward, done, _ = env.step(action)\r\n total_reward += reward\r\n #next_state = state_builder.build_state(*autoencoder.get_entities(next_state))\r\n agent.update(state, action, reward, next_state, done)\r\n state = next_state\r\n if done:\r\n break\r\n\r\n env.close()\r\n buffered_rewards.append(total_reward)\r\n\r\n with summary_writer.as_default():\r\n tf.summary.scalar('Averaged Reward',np.mean(buffered_rewards),e)\r\n tf.summary.scalar('Epsilon',agent.epsilon,e)\r\n\r\n\r\n if e % args.evaluation_frequency == 0:\r\n number_of_evaluations += 1\r\n agent.save(os.path.join(args.logdir,'dqn_agent.h5'))\r\n evaluation_reward = []\r\n with summary_writer.as_default():\r\n for i in range(10):\r\n done = False\r\n #state_builder.restart()\r\n image = env.reset()\r\n #state = state_builder.build_state(*autoencoder.get_entities(image))\r\n total_reward = 0\r\n for t in range(time_steps):\r\n action = agent.act(state,random_act=False)\r\n env.render()\r\n next_image, reward, done, _ = env.step(action)\r\n if i==0:\r\n tf.summary.image(f'Agent Behaviour {number_of_evaluations}',np.reshape(image,(1,)+image.shape),t)\r\n \r\n total_reward += reward\r\n #next_state = state_builder.build_state(*autoencoder.get_entities(next_image))\r\n state = next_image\r\n image = next_image\r\n evaluation_reward.append(total_reward)\r\n\r\n env.close()\r\n\r\n tf.summary.scalar('Evaluation Reward',np.mean(evaluation_reward),number_of_evaluations)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"numpy.reshape",
"tensorflow.summary.scalar",
"numpy.mean",
"tensorflow.summary.create_file_writer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
idiap/sentence-planner | [
"bafdef50043b97e28ae550e44e595dff3f4eb6ad"
] | [
"src/models/predictor.py"
] | [
"#\n# Original version by Yang Liu.\n# Modifications by Andreas Marfurt <[email protected]>\n#\n\n#!/usr/bin/env python\n\"\"\" Translator Class and builder \"\"\"\nfrom __future__ import print_function\nimport codecs\nimport os\nimport math\n\nimport torch\n\nfrom tensorboardX import SummaryWriter\n\nfrom others.utils import rouge_results_to_str, test_rouge, tile\nfrom translate.beam import GNMTGlobalScorer\nfrom python_rouge import RougeScores\n\ndef build_predictor(args, tokenizer, symbols, model, logger=None):\n scorer = GNMTGlobalScorer(args.alpha,length_penalty='wu')\n\n translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)\n return translator\n\n\nclass Translator(object):\n \"\"\"\n Uses a model to translate a batch of sentences.\n\n\n Args:\n model (:obj:`onmt.modules.NMTModel`):\n NMT model to use for translation\n fields (dict of Fields): data fields\n beam_size (int): size of beam to use\n n_best (int): number of translations produced\n max_length (int): maximum length output to produce\n global_scores (:obj:`GlobalScorer`):\n object to rescore final translations\n copy_attn (bool): use copy attention during translation\n cuda (bool): use cuda\n beam_trace (bool): trace beam search for debugging\n logger(logging.Logger): logger.\n \"\"\"\n\n def __init__(self,\n args,\n model,\n vocab,\n symbols,\n global_scorer=None,\n logger=None,\n dump_beam=\"\"):\n self.logger = logger\n self.cuda = args.visible_gpus != '-1'\n\n self.args = args\n self.model = model\n self.generator = self.model.generator\n self.vocab = vocab\n self.symbols = symbols\n self.start_token = symbols['BOS']\n self.end_token = symbols['EOS']\n\n self.global_scorer = global_scorer\n self.beam_size = args.beam_size\n self.min_length = args.min_length\n self.max_length = args.max_length\n\n self.dump_beam = dump_beam\n\n # for debugging\n self.beam_trace = self.dump_beam != \"\"\n self.beam_accum = None\n\n tensorboard_log_dir = os.path.dirname(args.log_file)\n\n self.tensorboard_writer = SummaryWriter(tensorboard_log_dir, comment=\"Unmt\")\n\n if self.beam_trace:\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n\n def _build_target_tokens(self, pred):\n # vocab = self.fields[\"tgt\"].vocab\n tokens = []\n for tok in pred:\n tok = int(tok)\n tokens.append(tok)\n if tokens[-1] == self.end_token:\n tokens = tokens[:-1]\n break\n tokens = [t for t in tokens if t < len(self.vocab)]\n tokens = self.vocab.DecodeIds(tokens).split(' ')\n return tokens\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert (len(translation_batch[\"gold_score\"]) ==\n len(translation_batch[\"predictions\"]))\n batch_size = batch.batch_size\n\n preds, pred_score, gold_score, tgt_str, src = translation_batch[\"predictions\"],translation_batch[\"scores\"],translation_batch[\"gold_score\"],batch.tgt_str, batch.src\n\n translations = []\n for b in range(batch_size):\n pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])\n pred_sents = ' '.join(pred_sents).replace(' ##','')\n gold_sent = ' '.join(tgt_str[b].split())\n # translation = Translation(fname[b],src[:, b] if src is not None else None,\n # src_raw, pred_sents,\n # attn[b], pred_score[b], gold_sent,\n # gold_score[b])\n # src = self.spm.DecodeIds([int(t) for t in translation_batch['batch'].src[0][5] if int(t) != len(self.spm)])\n raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]\n raw_src = ' '.join(raw_src)\n translation = (pred_sents, gold_sent, raw_src)\n # translation = (pred_sents[0], gold_sent)\n translations.append(translation)\n\n return translations\n\n def translate(self,\n data_iter, step,\n max_batches=0):\n\n self.model.eval()\n gold_path = self.args.result_path + '.%d.gold' % step\n can_path = self.args.result_path + '.%d.candidate' % step\n self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')\n self.can_out_file = codecs.open(can_path, 'w', 'utf-8')\n\n # raw_gold_path = self.args.result_path + '.%d.raw_gold' % step\n # raw_can_path = self.args.result_path + '.%d.raw_candidate' % step\n self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')\n self.can_out_file = codecs.open(can_path, 'w', 'utf-8')\n\n raw_src_path = self.args.result_path + '.%d.raw_src' % step\n self.src_out_file = codecs.open(raw_src_path, 'w', 'utf-8')\n\n # pred_results, gold_results = [], []\n ct = 0\n with torch.no_grad():\n for i, batch in enumerate(data_iter):\n if i >= max_batches > 0:\n break\n\n if(self.args.recall_eval):\n gold_tgt_len = batch.tgt.size(1)\n self.min_length = gold_tgt_len + 20\n self.max_length = gold_tgt_len + 60\n batch_data = self.translate_batch(batch)\n translations = self.from_batch(batch_data)\n\n for trans in translations:\n pred, gold, src = trans\n pred_str = pred.replace('[unused0]', '').replace('[unused3]', '').replace('[PAD]', '').replace('[unused1]', '').replace(r' +', ' ').replace(' [unused2] ', '<q>').replace('[unused2]', '').strip()\n gold_str = gold.strip()\n if(self.args.recall_eval):\n _pred_str = ''\n gap = 1e3\n for sent in pred_str.split('<q>'):\n can_pred_str = _pred_str+ '<q>'+sent.strip()\n can_gap = math.fabs(len(_pred_str.split())-len(gold_str.split()))\n # if(can_gap>=gap):\n if(len(can_pred_str.split())>=len(gold_str.split())+10):\n pred_str = _pred_str\n break\n else:\n gap = can_gap\n _pred_str = can_pred_str\n\n\n\n # pred_str = ' '.join(pred_str.split()[:len(gold_str.split())])\n # self.raw_can_out_file.write(' '.join(pred).strip() + '\\n')\n # self.raw_gold_out_file.write(' '.join(gold).strip() + '\\n')\n self.can_out_file.write(pred_str + '\\n')\n self.gold_out_file.write(gold_str + '\\n')\n self.src_out_file.write(src.strip() + '\\n')\n ct += 1\n self.can_out_file.flush()\n self.gold_out_file.flush()\n self.src_out_file.flush()\n\n self.can_out_file.close()\n self.gold_out_file.close()\n self.src_out_file.close()\n\n if (step != -1):\n rouges = self._report_rouge(gold_path, can_path)\n self.logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n if self.tensorboard_writer is not None:\n self.tensorboard_writer.add_scalar('test/rouge1-F', rouges['rouge_1_f_score'], step)\n self.tensorboard_writer.add_scalar('test/rouge2-F', rouges['rouge_2_f_score'], step)\n self.tensorboard_writer.add_scalar('test/rougeL-F', rouges['rouge_l_f_score'], step)\n return rouges['rouge_1_f_score'] * 100, rouges['rouge_2_f_score'] * 100, rouges['rouge_l_f_score'] * 100\n else:\n return -1, -1, -1\n\n def _report_rouge(self, gold_path, can_path):\n self.logger.info(\"Calculating Rouge\")\n # results_dict = test_rouge(self.args.temp_dir, can_path, gold_path)\n scores = RougeScores(use_stemmer=True)\n results_dict = scores.compute_scores(can_path, gold_path, newline_token='<q>')\n return results_dict\n\n def translate_batch(self, batch, fast=False):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.\n \"\"\"\n with torch.no_grad():\n return self._fast_translate_batch(\n batch,\n self.max_length,\n min_length=self.min_length)\n\n def _fast_translate_batch(self,\n batch,\n max_length,\n min_length=0):\n # TODO: faster code path for beam_size == 1.\n\n # TODO: support these blacklisted features.\n assert not self.dump_beam\n\n beam_size = self.beam_size\n batch_size = batch.batch_size\n src = batch.src\n segs = batch.segs\n mask_src = batch.mask_src\n\n if self.args.model == 'sentsumm':\n src_features = self.model.inference_init(src, segs, mask_src)\n else:\n src_features = self.model.bert(src, segs, mask_src)\n device = src_features.device\n\n # Tile states and memory beam_size times.\n if self.args.model == 'presumm':\n dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)\n dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim))\n\n if self.args.model != 'sentsumm':\n src_features = tile(src_features, beam_size, dim=0)\n batch_offset = torch.arange(\n batch_size, dtype=torch.long, device=device)\n beam_offset = torch.arange(\n 0,\n batch_size * beam_size,\n step=beam_size,\n dtype=torch.long,\n device=device)\n alive_seq = torch.full(\n [batch_size * beam_size, 1],\n self.start_token,\n dtype=torch.long,\n device=device)\n\n # Give full probability to the first beam on the first step.\n topk_log_probs = (\n torch.tensor([0.0] + [float(\"-inf\")] * (beam_size - 1),\n device=device).repeat(batch_size))\n\n # Structure that holds finished hypotheses.\n hypotheses = [[] for _ in range(batch_size)] # noqa: F812\n\n results = {}\n results[\"predictions\"] = [[] for _ in range(batch_size)] # noqa: F812\n results[\"scores\"] = [[] for _ in range(batch_size)] # noqa: F812\n results[\"gold_score\"] = [0] * batch_size\n results[\"batch\"] = batch\n\n for step in range(max_length):\n decoder_input = alive_seq[:, -1].view(1, -1)\n\n # Decoder forward.\n decoder_input = decoder_input.transpose(0,1)\n\n if self.args.model == 'presumm':\n dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step)\n elif self.args.model == 'sentsumm':\n mask_tgt = torch.ones(alive_seq.size(), dtype=torch.uint8, device=device)\n dec_out, _ = self.model.inference_step(src_features, mask_src, alive_seq, mask_tgt)\n dec_out = dec_out[:, -1:, :]\n else:\n raise ValueError('Unknown model: %s' % self.args.model)\n\n # Generator forward.\n log_probs = self.generator.forward(dec_out.transpose(0,1).squeeze(0))\n vocab_size = log_probs.size(-1)\n\n if step < min_length:\n log_probs[:, self.end_token] = -1e20\n\n # Multiply probs by the beam probability.\n log_probs += topk_log_probs.view(-1).unsqueeze(1)\n\n alpha = self.global_scorer.alpha\n length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha\n\n # Flatten probs into a list of possibilities.\n curr_scores = log_probs / length_penalty\n\n if(self.args.block_trigram):\n cur_len = alive_seq.size(1)\n if(cur_len>3):\n for i in range(alive_seq.size(0)):\n fail = False\n words = [int(w) for w in alive_seq[i]]\n words = [self.vocab.ids_to_tokens[w] for w in words]\n words = ' '.join(words).replace(' ##','').split()\n if(len(words)<=3):\n continue\n trigrams = [(words[i-1],words[i],words[i+1]) for i in range(1,len(words)-1)]\n trigram = tuple(trigrams[-1])\n if trigram in trigrams[:-1]:\n fail = True\n if fail:\n curr_scores[i] = -10e20\n\n curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)\n topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)\n\n # Recover log probs.\n topk_log_probs = topk_scores * length_penalty\n\n # Resolve beam origin and true word ids.\n topk_beam_index = topk_ids.div(vocab_size)\n topk_ids = topk_ids.fmod(vocab_size)\n\n # Map beam_index to batch_index in the flat representation.\n batch_index = (\n topk_beam_index\n + beam_offset[:topk_beam_index.size(0)].unsqueeze(1))\n select_indices = batch_index.view(-1)\n\n # Append last prediction.\n alive_seq = torch.cat(\n [alive_seq.index_select(0, select_indices),\n topk_ids.view(-1, 1)], -1)\n\n is_finished = topk_ids.eq(self.end_token)\n if step + 1 == max_length:\n is_finished.fill_(1)\n # End condition is top beam is finished.\n end_condition = is_finished[:, 0].eq(1)\n # Save finished hypotheses.\n if is_finished.any():\n predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))\n for i in range(is_finished.size(0)):\n b = batch_offset[i]\n if end_condition[i]:\n is_finished[i].fill_(1)\n finished_hyp = is_finished[i].nonzero().view(-1)\n # Store finished hypotheses for this batch.\n for j in finished_hyp:\n hypotheses[b].append((\n topk_scores[i, j],\n predictions[i, j, 1:]))\n # If the batch reached the end, save the n_best hypotheses.\n if end_condition[i]:\n best_hyp = sorted(\n hypotheses[b], key=lambda x: x[0], reverse=True)\n score, pred = best_hyp[0]\n\n results[\"scores\"][b].append(score)\n results[\"predictions\"][b].append(pred)\n non_finished = end_condition.eq(0).nonzero().view(-1)\n # If all sentences are translated, no need to go further.\n if len(non_finished) == 0:\n break\n # Remove finished batches for the next step.\n topk_log_probs = topk_log_probs.index_select(0, non_finished)\n batch_index = batch_index.index_select(0, non_finished)\n batch_offset = batch_offset.index_select(0, non_finished)\n alive_seq = predictions.index_select(0, non_finished) \\\n .view(-1, alive_seq.size(-1))\n # Reorder states.\n select_indices = batch_index.view(-1)\n if self.args.model != 'sentsumm':\n src_features = src_features.index_select(0, select_indices)\n if self.args.model == 'presumm':\n dec_states.map_batch_fn(\n lambda state, dim: state.index_select(dim, select_indices))\n\n return results\n\n\nclass Translation(object):\n \"\"\"\n Container for a translated sentence.\n\n Attributes:\n src (`LongTensor`): src word ids\n src_raw ([str]): raw src words\n\n pred_sents ([[str]]): words from the n-best translations\n pred_scores ([[float]]): log-probs of n-best translations\n attns ([`FloatTensor`]) : attention dist for each translation\n gold_sent ([str]): words from gold translation\n gold_score ([float]): log-prob of gold translation\n\n \"\"\"\n\n def __init__(self, fname, src, src_raw, pred_sents,\n attn, pred_scores, tgt_sent, gold_score):\n self.fname = fname\n self.src = src\n self.src_raw = src_raw\n self.pred_sents = pred_sents\n self.attns = attn\n self.pred_scores = pred_scores\n self.gold_sent = tgt_sent\n self.gold_score = gold_score\n\n def log(self, sent_number):\n \"\"\"\n Log translation.\n \"\"\"\n\n output = '\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n output += 'PRED {}: {}\\n'.format(sent_number, pred_sent)\n output += \"PRED SCORE: {:.4f}\\n\".format(best_score)\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n output += 'GOLD {}: {}\\n'.format(sent_number, tgt_sent)\n output += (\"GOLD SCORE: {:.4f}\\n\".format(self.gold_score))\n if len(self.pred_sents) > 1:\n output += '\\nBEST HYP:\\n'\n for score, sent in zip(self.pred_scores, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n\n return output\n"
] | [
[
"torch.no_grad",
"torch.full",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jiayuan-Gu/habitat-lab | [
"5ce36a6c6502fe8e86d6732ba8bab9a5db471574"
] | [
"test/test_baseline_trainers.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport gc\nimport itertools\nimport math\nimport os\nimport random\nfrom copy import deepcopy\nfrom glob import glob\n\nimport pytest\n\nfrom habitat.core.vector_env import VectorEnv\n\ntry:\n import torch\n import torch.distributed\n\n from habitat_baselines.common.base_trainer import BaseRLTrainer\n from habitat_baselines.common.baseline_registry import baseline_registry\n from habitat_baselines.config.default import get_config\n from habitat_baselines.rl.ddppo.ddp_utils import find_free_port\n from habitat_baselines.run import execute_exp, run_exp\n from habitat_baselines.utils.common import (\n ObservationBatchingCache,\n batch_obs,\n )\n\n baseline_installed = True\nexcept ImportError:\n baseline_installed = False\n\n\ndef _powerset(s):\n return [\n combo\n for r in range(len(s) + 1)\n for combo in itertools.combinations(s, r)\n ]\n\n\[email protected](\n not baseline_installed, reason=\"baseline sub-module not installed\"\n)\[email protected](\n \"test_cfg_path,mode,gpu2gpu,observation_transforms\",\n list(\n itertools.product(\n glob(\"habitat_baselines/config/test/*\"),\n [\"train\", \"eval\"],\n [False],\n [\n [],\n [\n \"CenterCropper\",\n \"ResizeShortestEdge\",\n ],\n ],\n )\n )\n + list(\n itertools.product(\n [\"habitat_baselines/config/test/ppo_pointnav_test.yaml\"],\n [\"train\", \"eval\"],\n [True],\n [\n [],\n [\n \"CenterCropper\",\n \"ResizeShortestEdge\",\n ],\n ],\n )\n ),\n)\ndef test_trainers(test_cfg_path, mode, gpu2gpu, observation_transforms):\n # For testing with world_size=1\n os.environ[\"MAIN_PORT\"] = str(find_free_port())\n\n if gpu2gpu:\n try:\n import habitat_sim\n except ImportError:\n pytest.skip(\"GPU-GPU requires Habitat-Sim\")\n\n if not habitat_sim.cuda_enabled:\n pytest.skip(\"GPU-GPU requires CUDA\")\n\n run_exp(\n test_cfg_path,\n mode,\n [\n \"TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.GPU_GPU\",\n str(gpu2gpu),\n \"RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS\",\n str(tuple(observation_transforms)),\n ],\n )\n\n # Needed to destroy the trainer\n gc.collect()\n\n # Deinit processes group\n if torch.distributed.is_initialized():\n torch.distributed.destroy_process_group()\n\n\[email protected](\n not baseline_installed, reason=\"baseline sub-module not installed\"\n)\[email protected](\n \"test_cfg_path,mode\",\n [\n [\n \"habitat_baselines/config/test/ppo_pointnav_test.yaml\",\n \"train\",\n ],\n ],\n)\[email protected](\"camera\", [\"equirect\", \"fisheye\", \"cubemap\"])\[email protected](\"sensor_type\", [\"RGB\", \"DEPTH\"])\ndef test_cubemap_stiching(\n test_cfg_path: str, mode: str, camera: str, sensor_type: str\n):\n meta_config = get_config(config_paths=test_cfg_path)\n meta_config.defrost()\n config = meta_config.TASK_CONFIG\n CAMERA_NUM = 6\n orient = [\n [0, math.pi, 0], # Back\n [-math.pi / 2, 0, 0], # Down\n [0, 0, 0], # Front\n [0, math.pi / 2, 0], # Right\n [0, 3 / 2 * math.pi, 0], # Left\n [math.pi / 2, 0, 0], # Up\n ]\n sensor_uuids = []\n\n if f\"{sensor_type}_SENSOR\" not in config.SIMULATOR.AGENT_0.SENSORS:\n config.SIMULATOR.AGENT_0.SENSORS.append(f\"{sensor_type}_SENSOR\")\n sensor = getattr(config.SIMULATOR, f\"{sensor_type}_SENSOR\")\n for camera_id in range(CAMERA_NUM):\n camera_template = f\"{sensor_type}_{camera_id}\"\n camera_config = deepcopy(sensor)\n camera_config.ORIENTATION = orient[camera_id]\n camera_config.UUID = camera_template.lower()\n sensor_uuids.append(camera_config.UUID)\n setattr(config.SIMULATOR, camera_template, camera_config)\n config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)\n\n meta_config.TASK_CONFIG = config\n meta_config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS\n if camera == \"equirect\":\n meta_config.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.SENSOR_UUIDS = tuple(\n sensor_uuids\n )\n elif camera == \"fisheye\":\n meta_config.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.SENSOR_UUIDS = tuple(\n sensor_uuids\n )\n meta_config.freeze()\n if camera in [\"equirect\", \"fisheye\"]:\n execute_exp(meta_config, mode)\n # Deinit processes group\n if torch.distributed.is_initialized():\n torch.distributed.destroy_process_group()\n\n elif camera == \"cubemap\":\n # 1) Generate an equirect image from cubemap images.\n # 2) Generate cubemap images from the equirect image.\n # 3) Compare the input and output cubemap\n env_fn_args = []\n for split in [\"train\", \"val\"]:\n tmp_config = config.clone()\n tmp_config.defrost()\n tmp_config.DATASET[\"SPLIT\"] = split\n tmp_config.freeze()\n env_fn_args.append((tmp_config, None))\n\n with VectorEnv(env_fn_args=env_fn_args) as envs:\n observations = envs.reset()\n batch = batch_obs(observations)\n orig_batch = deepcopy(batch)\n\n # ProjectionTransformer\n obs_trans_to_eq = baseline_registry.get_obs_transformer(\n \"CubeMap2Equirect\"\n )\n cube2equirect = obs_trans_to_eq(sensor_uuids, (256, 512))\n obs_trans_to_cube = baseline_registry.get_obs_transformer(\n \"Equirect2CubeMap\"\n )\n equirect2cube = obs_trans_to_cube(\n cube2equirect.target_uuids, (256, 256)\n )\n\n # Cubemap to Equirect to Cubemap\n batch_eq = cube2equirect(batch)\n batch_cube = equirect2cube(batch_eq)\n\n # Extract input and output cubemap\n output_cube = batch_cube[cube2equirect.target_uuids[0]]\n input_cube = [orig_batch[key] for key in sensor_uuids]\n input_cube = torch.stack(input_cube, axis=1)\n input_cube = torch.flatten(input_cube, end_dim=1)\n\n # Apply blur to absorb difference (blur, etc.) caused by conversion\n if sensor_type == \"RGB\":\n output_cube = output_cube.float() / 255\n input_cube = input_cube.float() / 255\n output_cube = output_cube.permute((0, 3, 1, 2)) # NHWC => NCHW\n input_cube = input_cube.permute((0, 3, 1, 2)) # NHWC => NCHW\n apply_blur = torch.nn.AvgPool2d(5, 3, 2)\n output_cube = apply_blur(output_cube)\n input_cube = apply_blur(input_cube)\n\n # Calculate the difference\n diff = torch.abs(output_cube - input_cube)\n assert diff.mean().item() < 0.01\n else:\n raise ValueError(f\"Unknown camera name: {camera}\")\n\n\[email protected](\n not baseline_installed, reason=\"baseline sub-module not installed\"\n)\ndef test_eval_config():\n ckpt_opts = [\"VIDEO_OPTION\", \"[]\"]\n eval_opts = [\"VIDEO_OPTION\", \"['disk']\"]\n\n ckpt_cfg = get_config(None, ckpt_opts)\n assert ckpt_cfg.VIDEO_OPTION == []\n assert ckpt_cfg.CMD_TRAILING_OPTS == [\"VIDEO_OPTION\", \"[]\"]\n\n eval_cfg = get_config(None, eval_opts)\n assert eval_cfg.VIDEO_OPTION == [\"disk\"]\n assert eval_cfg.CMD_TRAILING_OPTS == [\"VIDEO_OPTION\", \"['disk']\"]\n\n trainer = BaseRLTrainer(get_config())\n assert trainer.config.VIDEO_OPTION == [\"disk\", \"tensorboard\"]\n returned_config = trainer._setup_eval_config(checkpoint_config=ckpt_cfg)\n assert returned_config.VIDEO_OPTION == []\n\n trainer = BaseRLTrainer(eval_cfg)\n returned_config = trainer._setup_eval_config(ckpt_cfg)\n assert returned_config.VIDEO_OPTION == [\"disk\"]\n\n\ndef __do_pause_test(num_envs, envs_to_pause):\n class PausableShim(VectorEnv):\n def __init__(self, num_envs):\n self._running = list(range(num_envs))\n\n @property\n def num_envs(self):\n return len(self._running)\n\n def pause_at(self, idx):\n self._running.pop(idx)\n\n envs = PausableShim(num_envs)\n test_recurrent_hidden_states = (\n torch.arange(num_envs).view(num_envs, 1, 1).expand(num_envs, 4, 512)\n )\n not_done_masks = torch.arange(num_envs).view(num_envs, 1)\n current_episode_reward = torch.arange(num_envs).view(num_envs, 1)\n prev_actions = torch.arange(num_envs).view(num_envs, 1)\n batch = {\n k: torch.arange(num_envs)\n .view(num_envs, 1, 1, 1)\n .expand(num_envs, 3, 256, 256)\n for k in [\"a\", \"b\"]\n }\n rgb_frames = [[idx] for idx in range(num_envs)]\n\n (\n envs,\n test_recurrent_hidden_states,\n not_done_masks,\n current_episode_reward,\n prev_actions,\n batch,\n rgb_frames,\n ) = BaseRLTrainer._pause_envs(\n envs_to_pause,\n envs,\n test_recurrent_hidden_states,\n not_done_masks,\n current_episode_reward,\n prev_actions,\n batch,\n rgb_frames,\n )\n\n expected = sorted(set(range(num_envs)) - set(envs_to_pause))\n\n assert envs._running == expected\n\n assert list(test_recurrent_hidden_states.size()) == [len(expected), 4, 512]\n assert test_recurrent_hidden_states[:, 0, 0].numpy().tolist() == expected\n\n assert not_done_masks[:, 0].numpy().tolist() == expected\n assert current_episode_reward[:, 0].numpy().tolist() == expected\n assert prev_actions[:, 0].numpy().tolist() == expected\n assert [v[0] for v in rgb_frames] == expected\n\n for _, v in batch.items():\n assert list(v.size()) == [len(expected), 3, 256, 256]\n assert v[:, 0, 0, 0].numpy().tolist() == expected\n\n\[email protected](\n not baseline_installed, reason=\"baseline sub-module not installed\"\n)\ndef test_pausing():\n random.seed(0)\n for _ in range(100):\n num_envs = random.randint(1, 13)\n envs_to_pause = list(range(num_envs))\n\n random.shuffle(envs_to_pause)\n envs_to_pause = envs_to_pause[: random.randint(0, num_envs)]\n # envs_to_pause is assumed to be sorted in the function\n envs_to_pause = sorted(envs_to_pause)\n\n __do_pause_test(num_envs, envs_to_pause)\n\n num_envs = 8\n __do_pause_test(num_envs, [])\n __do_pause_test(num_envs, list(range(num_envs)))\n\n\[email protected](\n not baseline_installed, reason=\"baseline sub-module not installed\"\n)\[email protected](\n \"sensor_device,batched_device\",\n [(\"cpu\", \"cpu\"), (\"cpu\", \"cuda\"), (\"cuda\", \"cuda\")],\n)\ndef test_batch_obs(sensor_device, batched_device):\n if (\n \"cuda\" in (sensor_device, batched_device)\n and not torch.cuda.is_available()\n ):\n pytest.skip(\"CUDA not avaliable\")\n\n sensor_device = torch.device(sensor_device)\n batched_device = torch.device(batched_device)\n\n numpy_if = lambda t: t.numpy() if sensor_device.type == \"cpu\" else t\n\n cache = ObservationBatchingCache()\n sensors = [\n {\n f\"{s}\": numpy_if(torch.randn(128, 128, device=sensor_device))\n for s in range(4)\n }\n for _ in range(4)\n ]\n\n _ = batch_obs(sensors, device=batched_device, cache=cache)\n"
] | [
[
"torch.abs",
"torch.randn",
"torch.distributed.is_initialized",
"torch.arange",
"torch.nn.AvgPool2d",
"torch.distributed.destroy_process_group",
"torch.flatten",
"torch.device",
"torch.cuda.is_available",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lastmeta/Satori | [
"cb321ee53a15fe8cba8fcdd483eeb6acc8dab3ea"
] | [
"satori/lib/spoof/streamr.py"
] | [
"# > python satori\\spoof\\streamr.py \n\nimport time \nimport json\nimport requests\nimport datetime as dt\nimport pandas as pd\nfrom satori import config \nfrom satori.lib.apis import disk\n\nclass Streamr():\n def __init__(self, sourceId:str=None, streamId:str=None):\n self.sourceId = sourceId or 'streamrSpoof'\n self.streamId = streamId or 'simpleEURCleaned'\n df = pd.read_csv(config.root('lib', 'spoof', f'{streamId}.csv'))\n existing = disk.Api(source=self.sourceId, stream=self.streamId).read()\n past = existing.shape[0] if existing is not None else 0\n self.past = df.iloc[:past]\n self.future = df.iloc[past:]\n self.port = config.flaskPort()\n self.incremental = self.getNewData()\n\n def getNewData(self): # -> pd.DataFrame:\n ''' incrementally returns mock future data to simulate the passage of time '''\n for i in self.future.index:\n yield pd.DataFrame(self.future.loc[i]).T\n\n def providePast(self):\n ''' provides the past as json '''\n return self.past.T.to_json()\n\n def provideIncremental(self):\n ''' observation with row id '''\n return next(self.incremental).T.to_json()\n \n def provideObservation(self): # -> int, string:\n d = next(self.incremental).T.to_dict()\n index = list(d.keys())[0]\n return index, json.dumps(d[index])\n\n def provideIncrementalWithId(self):\n # todo: in the real stream, if the observationId is obviously\n # a datetime in UTC, we could use that as the observed\n # time, otherwise, we'll just use our own on update.\n key, content = self.provideObservation()\n return (\n '{'\n f'\"source-id\":\"{self.sourceId}\",'\n f'\"stream-id\":\"{self.streamId}\",'\n '\"observed-time\":\"' + str(dt.datetime.utcnow()) + '\",'\n '\"observation-id\":' + str(key) + ','\n '\"content\":' + content + '}')\n\n def run(self):\n while True:\n time.sleep(3)\n x = self.provideIncrementalWithId()\n response = requests.post(\n url=f'http://127.0.0.1:{self.port}/subscription/update', \n json=x)\n response.raise_for_status()\n\n'''\nfrom satori.lib.engine.structs import Observation\nJSON = (\n '{'\n '\"source-id\":\"streamrSpoof\",'\n '\"stream-id\":\"simpleEURCleaned\",'\n '\"observed-time\":\"2022-04-14 13:53:37.186105\",'\n '\"observation-id\":3675,'\n '\"content\":{'\n '\"High\": 0.81856,'\n '\"Low\": 0.81337,'\n '\"Close\": 0.81512}}')\n'''"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yuehaixiao/models | [
"3a14ee7ded00162c416b1bc84de3f2a158ec3278"
] | [
"PaddleCV/image_classification/reader_cv2.py"
] | [
"#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport os\nimport math\nimport random\nimport functools\nimport numpy as np\nimport cv2\nimport io\n\nimport paddle\nimport paddle.fluid as fluid\n\nrandom.seed(0)\nnp.random.seed(0)\n\nDATA_DIM = 224\n\nTHREAD = 8\nBUF_SIZE = 2048\n\nDATA_DIR = './data/ILSVRC2012'\n\nimg_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\nimg_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n\n\ndef rotate_image(img):\n \"\"\" rotate_image \"\"\"\n (h, w) = img.shape[:2]\n center = (w / 2, h / 2)\n angle = np.random.randint(-10, 11)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(img, M, (w, h))\n return rotated\n\n\ndef random_crop(img, size, settings, scale=None, ratio=None):\n \"\"\" random_crop \"\"\"\n lower_scale = settings.lower_scale\n lower_ratio = settings.lower_ratio\n upper_ratio = settings.upper_ratio\n scale = [lower_scale, 1.0] if scale is None else scale\n ratio = [lower_ratio, upper_ratio] if ratio is None else ratio\n\n aspect_ratio = math.sqrt(np.random.uniform(*ratio))\n w = 1. * aspect_ratio\n h = 1. / aspect_ratio\n\n bound = min((float(img.shape[0]) / img.shape[1]) / (h**2),\n (float(img.shape[1]) / img.shape[0]) / (w**2))\n\n scale_max = min(scale[1], bound)\n scale_min = min(scale[0], bound)\n\n target_area = img.shape[0] * img.shape[1] * np.random.uniform(scale_min,\n scale_max)\n target_size = math.sqrt(target_area)\n w = int(target_size * w)\n h = int(target_size * h)\n i = np.random.randint(0, img.shape[0] - h + 1)\n j = np.random.randint(0, img.shape[1] - w + 1)\n\n img = img[i:i + h, j:j + w, :]\n\n resized = cv2.resize(\n img,\n (size, size)\n #, interpolation=cv2.INTER_LANCZOS4\n )\n return resized\n\n\ndef distort_color(img):\n return img\n\n\ndef resize_short(img, target_size):\n \"\"\" resize_short \"\"\"\n percent = float(target_size) / min(img.shape[0], img.shape[1])\n resized_width = int(round(img.shape[1] * percent))\n resized_height = int(round(img.shape[0] * percent))\n resized = cv2.resize(\n img,\n (resized_width, resized_height),\n #interpolation=cv2.INTER_LANCZOS4\n )\n return resized\n\n\ndef crop_image(img, target_size, center):\n \"\"\" crop_image \"\"\"\n height, width = img.shape[:2]\n size = target_size\n if center == True:\n w_start = (width - size) // 2\n h_start = (height - size) // 2\n else:\n w_start = np.random.randint(0, width - size + 1)\n h_start = np.random.randint(0, height - size + 1)\n w_end = w_start + size\n h_end = h_start + size\n img = img[h_start:h_end, w_start:w_end, :]\n return img\n\n\ndef create_mixup_reader(settings, rd):\n class context:\n tmp_mix = []\n tmp_l1 = []\n tmp_l2 = []\n tmp_lam = []\n\n batch_size = settings.batch_size\n alpha = settings.mixup_alpha\n\n def fetch_data():\n\n data_list = []\n for i, item in enumerate(rd()):\n data_list.append(item)\n if i % batch_size == batch_size - 1:\n yield data_list\n data_list = []\n\n def mixup_data():\n\n for data_list in fetch_data():\n if alpha > 0.:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.\n l1 = np.array(data_list)\n l2 = np.random.permutation(l1)\n mixed_l = [\n l1[i][0] * lam + (1 - lam) * l2[i][0] for i in range(len(l1))\n ]\n yield mixed_l, l1, l2, lam\n\n def mixup_reader():\n\n for context.tmp_mix, context.tmp_l1, context.tmp_l2, context.tmp_lam in mixup_data(\n ):\n for i in range(len(context.tmp_mix)):\n mixed_l = context.tmp_mix[i]\n l1 = context.tmp_l1[i]\n l2 = context.tmp_l2[i]\n lam = context.tmp_lam\n yield mixed_l, l1[1], l2[1], lam\n\n return mixup_reader\n\n\ndef process_image(sample,\n settings,\n mode,\n color_jitter,\n rotate,\n crop_size=224,\n mean=None,\n std=None):\n \"\"\" process_image \"\"\"\n\n mean = [0.485, 0.456, 0.406] if mean is None else mean\n std = [0.229, 0.224, 0.225] if std is None else std\n\n img_path = sample[0]\n img = cv2.imread(img_path)\n\n if mode == 'train':\n if rotate:\n img = rotate_image(img)\n if crop_size > 0:\n img = random_crop(img, crop_size, settings)\n if color_jitter:\n img = distort_color(img)\n if np.random.randint(0, 2) == 1:\n img = img[:, ::-1, :]\n else:\n if crop_size > 0:\n target_size = settings.resize_short_size\n img = resize_short(img, target_size)\n\n img = crop_image(img, target_size=crop_size, center=True)\n\n img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255\n img_mean = np.array(mean).reshape((3, 1, 1))\n img_std = np.array(std).reshape((3, 1, 1))\n img -= img_mean\n img /= img_std\n\n if mode == 'train' or mode == 'val':\n return (img, sample[1])\n elif mode == 'test':\n return (img, )\n\n\ndef image_mapper(**kwargs):\n \"\"\" image_mapper \"\"\"\n return functools.partial(process_image, **kwargs)\n\n\ndef process_batch_data(input_data, settings, mode, color_jitter, rotate):\n batch_data = []\n for sample in input_data:\n batch_data.append(\n process_image(sample, settings, mode, color_jitter, rotate))\n return batch_data\n\n\ndef _reader_creator(settings,\n file_list,\n batch_size,\n mode,\n shuffle=False,\n color_jitter=False,\n rotate=False,\n data_dir=DATA_DIR,\n shuffle_seed=0):\n def reader():\n def read_file_list():\n with open(file_list) as flist:\n full_lines = [line.strip() for line in flist]\n if shuffle:\n if shuffle_seed is not None:\n np.random.seed(shuffle_seed)\n np.random.shuffle(full_lines)\n batch_data = []\n for line in full_lines:\n img_path, label = line.split()\n img_path = os.path.join(data_dir, img_path)\n batch_data.append([img_path, int(label)])\n if len(batch_data) == batch_size:\n if mode == 'train' or mode == 'val' or mode == 'test':\n yield batch_data\n\n batch_data = []\n\n return read_file_list\n\n data_reader = reader()\n num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))\n if mode == 'train' and num_trainers > 1:\n assert shuffle_seed is not None, \\\n \"If num_trainers > 1, the shuffle_seed must be set, because \" \\\n \"the order of batch data generated by reader \" \\\n \"must be the same in the respective processes.\"\n data_reader = fluid.contrib.reader.distributed_batch_reader(data_reader)\n\n mapper = functools.partial(\n process_batch_data,\n settings=settings,\n mode=mode,\n color_jitter=color_jitter,\n rotate=rotate)\n\n return paddle.reader.xmap_readers(\n mapper, data_reader, THREAD, BUF_SIZE, order=False)\n\n\ndef train(settings, batch_size, data_dir=DATA_DIR, shuffle_seed=0):\n file_list = os.path.join(data_dir, 'train_list.txt')\n reader = _reader_creator(\n settings,\n file_list,\n batch_size,\n 'train',\n shuffle=True,\n color_jitter=False,\n rotate=False,\n data_dir=data_dir,\n shuffle_seed=shuffle_seed)\n if settings.use_mixup == True:\n reader = create_mixup_reader(settings, reader)\n return reader\n\n\ndef val(settings, batch_size, data_dir=DATA_DIR):\n file_list = os.path.join(data_dir, 'val_list.txt')\n return _reader_creator(\n settings,\n file_list,\n batch_size,\n 'val',\n shuffle=False,\n data_dir=data_dir)\n\n\ndef test(settings, batch_size, data_dir=DATA_DIR):\n file_list = os.path.join(data_dir, 'val_list.txt')\n return _reader_creator(\n settings,\n file_list,\n batch_size,\n 'test',\n shuffle=False,\n data_dir=data_dir)\n"
] | [
[
"numpy.random.beta",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.random.permutation",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
egoolish/cuml | [
"fab74ca94fdbc5b49281660ce32a48cfd3d66f46"
] | [
"python/cuml/test/dask/utils.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport dask.dataframe as dd\nimport pandas as pd\n\nimport cudf\n\nimport dask_cudf\n\nfrom dask.distributed import default_client, wait\n\nfrom sklearn.datasets import make_blobs\n\nimport numpy as np\n\nimport random\nimport math\n\n\ndef create_df(f, m, n, centers, cluster_std, random_state, r):\n X, y = make_blobs(m, n, centers=centers, cluster_std=cluster_std,\n random_state=random_state)\n ret = pd.DataFrame(X)\n return ret\n\n\ndef get_meta(df):\n ret = df.iloc[:0]\n return ret\n\n\ndef to_cudf(df, r):\n return cudf.from_pandas(df)\n\n\ndef dask_make_blobs(nrows, ncols, n_centers=8, cluster_std=1.0,\n center_box=(-10, 10), random_state=None, verbose=False):\n\n client = default_client()\n\n workers = client.has_what().keys()\n\n centers = np.random.uniform(center_box[0], center_box[1],\n size=(n_centers, ncols)).astype(np.float32)\n\n if verbose:\n print(\"Generating %d samples on %d workers (total=%d samples)\" %\n (math.ceil(nrows/len(workers)), len(workers), nrows))\n\n # Create dfs on each worker (gpu)\n dfs = [client.submit(create_df, n, math.ceil(nrows/len(workers)), ncols,\n centers, cluster_std,\n random_state, random.random(), workers=[worker])\n for worker, n in list(zip(workers, list(range(len(workers)))))]\n # Wait for completion\n wait(dfs)\n\n ddfs = [client.submit(to_cudf, df, random.random()) for df in dfs]\n # Wait for completion\n wait(ddfs)\n\n meta_ddf = client.submit(get_meta, dfs[0]).result()\n meta_cudf = client.submit(get_meta, ddfs[0]).result()\n\n d_df = dd.from_delayed(dfs, meta=meta_ddf)\n d_cudf = dask_cudf.from_delayed(ddfs, meta=meta_cudf)\n\n return d_df, d_cudf\n"
] | [
[
"numpy.random.uniform",
"sklearn.datasets.make_blobs",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jkurdek/TensorFlowASR | [
"a1999ac7f1eb5112c9557ca8043152828b9e5815"
] | [
"examples/conformer/inference/run_saved_model.py"
] | [
"# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\n\nfrom tensorflow_asr.utils import env_util\n\nlogger = env_util.setup_environment()\nimport tensorflow as tf\n\nDEFAULT_YAML = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"config.yml\")\n\ntf.keras.backend.clear_session()\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--saved_model\", type=str, default=None, help=\"The file path of saved model\")\n\nparser.add_argument(\"filename\", type=str, default=None, help=\"Audio file path\")\n\nargs = parser.parse_args()\n\nfrom tensorflow_asr.featurizers.speech_featurizers import read_raw_audio\n\nmodule = tf.saved_model.load(export_dir=args.saved_model)\n\nsignal = read_raw_audio(args.filename)\ntranscript = module.pred(signal)\n\nprint(\"Transcript: \", \"\".join([chr(u) for u in transcript]))\n"
] | [
[
"tensorflow.keras.backend.clear_session",
"tensorflow.saved_model.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Islanderrobotics/string_to_date_time | [
"d84bef720b586ce730c413b72d5f50b59a3d7960"
] | [
"code/stringtodatetime.py"
] | [
"import pandas as pd\nimport datetime\n\nclass StringToDateTime:\n ''' this class is designed to make converting strings to datetime more accessable\n this is done by creating an instance of the class StringToDateTime(df, column_names)\n df is where you will define the pandas dataframe that you will work with\n column_names is when you have a column names for columns you wish have converted to datetime\n that is not not [\"date\",\"dates\",\"starttime\",\"start_time\",\"start time\"], to use this input argument\n successfully you must pass in a list'''\n\n def __init__(self,df, column_names = []):\n if isinstance(column_names,list) is False:\n raise AttributeError(\"column_names needs to be a list\")\n self.df = df\n self.copy_df = df.copy()\n self.time_spot = {}\n self.time_list = [\"date\",\"dates\",\"starttime\",\"start_time\",\"start time\"]\n self.possible = [\" \" ,\",\",\"/\",\"-\", \":\"]\n self.greates = [i*0 for i in range(len(self.possible))]\n if (len(column_names)>0):\n for i in column_names:\n self.time_list.append(i)\n\n def _finding_the_columns(self):\n count = 0\n # time_spot = {}\n for i in self.df.columns:\n if i.lower() in set(self.time_list):\n self.time_spot[i] = count\n count+=1\n\n def _making_the_changes(self):\n for j in self.time_spot.keys():\n for i in self.df[j]:\n self.greates[0] = self.greates[0]+i.count(self.possible[0])\n self.greates[1] = self.greates[1]+i.count(self.possible[1])\n self.greates[2] = self.greates[2]+i.count(self.possible[2])\n self.greates[3] = self.greates[3]+i.count(self.possible[3])\n self.greates[4] = self.greates[4]+i.count(self.possible[4])\n for i in range(len(self.greates)):\n if self.greates[i] == max(self.greates):\n most_used_char = self.possible[i]\n for w in self.df[j]:\n rep = \"\"\n for l in w:\n try:\n int(l)\n rep = rep+l\n except ValueError:\n rep =rep+most_used_char\n\n self.df[j].replace(to_replace={w:rep}, inplace=True)\n # data[\"Date\"] = pd.to_datetime(data[\"Date\"],infer_datetime_format=True)\n self.df[j] = pd.to_datetime(self.df[j],infer_datetime_format=True)\n self.greates = [i*0 for i in range(len(self.possible))]\n def check(self):\n self._finding_the_columns()\n self._making_the_changes()\n return self.df\n def resource(self):\n links ={}\n links[\"youtube\"] = \"\"\n links[\"github\"] = \"\"\n for i in links.keys():\n print(f\"the link for {i} is {links[i]}\") \n\nif __name__ ==\"__main__\":\n import pandas as pd\n import datetime\n from stringtodatetime import StringToDateTime\n data = pd.read_csv(\"landslide_data3.csv\")\n# data = pd.read_csv('https://raw.githubusercontent.com/jldbc/coffee-quality-database/master/data/arabica_data_cleaned.csv')\n# data = pd.read_csv(\"travel_times.csv\"\n\n stringtodate = StringToDateTime(df = data)\n data = stringtodate.check()"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
haikusw/jaqalpaq | [
"d507e894cb897756a1e51c99582b736254995b4e"
] | [
"jaqalpaq/emulator/_validator.py"
] | [
"# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains\n# certain rights in this software.\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom jaqalpaq import JaqalError\nfrom jaqalpaq.parser import JaqalParseError\n\nfrom .noiseless import run_jaqal_string\n\n\ndef assertAlmostEqual(a, b):\n if np.isclose(a, b):\n return\n\n raise ValueError(f\"{a} and {b} differ by {a-b}\")\n\n\ndef assertEqual(a, b):\n if a == b:\n return\n\n raise ValueError(f\"{a} != {b}\")\n\n\ndef assertisinstance(a, b):\n if isinstance(a, b):\n return\n\n raise TypeError(f\"{type(a)} is not an instance of {b}\")\n\n\ndef generate_jaqal_validation(exe):\n \"\"\"[undocumented] Generate a description of the execution of a circuit\n\n :param exe: the ExecutionResult object to describe\n :return: a string that can appended to a Jaqal program and validated\n\n \"\"\"\n output = []\n emit = output.append\n\n emit(\"// EXPECTED READOUTS\")\n emit(\n \"\\n\".join(\n \" \".join((\"//\", mr.as_str, str(mr.as_int), str(mr.subcircuit.index),))\n for mr in exe.readouts\n )\n )\n\n emit(\"\\n// EXPECTED PROBABILITIES\")\n\n for sc_index, se in enumerate(exe.subcircuits):\n emit(f\"// SUBCIRCUIT {sc_index}\")\n for (n, ((s, ps), p)) in enumerate(\n zip(se.probability_by_str.items(), se.probability_by_int)\n ):\n assert ps == p\n emit(f\"// {s} {n} {p}\")\n\n return \"\\n\".join(output)\n\n\ndef parse_jaqal_validation(txt):\n \"\"\"[undocumented] parse Jaqal validation comments\n\n :param txt: a full Jaqal program, possibly with validation comments\n :return: a dictionary describing the validation\n\n \"\"\"\n section = None\n expected = {}\n s_idx = -1\n\n for line in txt.split(\"\\n\"):\n line = line.strip()\n\n # Resest on non-comments\n if not line.startswith(\"//\"):\n section = None\n s_idx = -1\n continue\n\n line = line[2:].strip()\n\n # Resest on empty comments\n if len(line) == 0:\n section = None\n s_idx = -1\n continue\n\n if section == \"meas\":\n true_str, true_int, subcirc = line.split()\n true_str_list.append(true_str)\n true_int_list.append(int(true_int))\n subcirc_list.append(int(subcirc))\n elif section == \"prob\":\n if line.startswith(\"SUBCIRCUIT \"):\n s_idx_n = int(line[11:].strip())\n if s_idx_n != s_idx + 1:\n raise ValueError(\"Malformed validation.\")\n\n s_idx = s_idx_n\n\n str_prob[s_idx] = OrderedDict()\n int_prob[s_idx] = OrderedDict()\n continue\n\n key_str, key_int, val = line.split()\n val = float(val)\n str_prob[s_idx][key_str] = val\n int_prob[s_idx][int(key_int)] = val\n elif section == \"error\":\n exc_name, *exc_message = line.split(\": \", 1)\n if exc_name == \"jaqalpaq.error.JaqalError\":\n exc = JaqalError\n elif exc_name == \"jaqalpaq.parser.tree.JaqalParseError\":\n exc = JaqalParseError\n else:\n raise NotImplementedError(f\"Unwhitelisted exception {exc_name}\")\n expected[\"error\"] = exc, exc_message\n else:\n if section is not None:\n raise ValueError(\"Malformed validation.\")\n\n if line == \"EXPECTED READOUTS\":\n section = \"meas\"\n true_str_list = expected[\"true_str_list\"] = []\n true_int_list = expected[\"true_int_list\"] = []\n subcirc_list = expected[\"subcirc_list\"] = []\n elif line == \"EXPECTED PROBABILITIES\":\n section = \"prob\"\n str_prob = expected[\"str_prob\"] = {}\n int_prob = expected[\"int_prob\"] = {}\n elif line == \"EXPECTED ERROR\":\n section = \"error\"\n\n return expected\n\n\ndef validate_jaqal_string(txt):\n \"\"\"[undocumented] validate a Jaqal program with validation comments\n\n :param txt: a full Jaqal program, possibly with validation comments\n :return: a list of validations performed\n\n \"\"\"\n expected = parse_jaqal_validation(txt)\n\n if \"error\" in expected:\n exc, exc_message = expected[\"error\"]\n try:\n exe = run_jaqal_string(txt)\n except Exception as e:\n assertisinstance(e, exc)\n if len(exc_message) > 0:\n assertEqual(exc_message[0], str(e))\n else:\n raise ValueError(\"Expected an exception, but none thrown.\")\n return [\"raised expected exception\"]\n\n exe = run_jaqal_string(txt)\n\n validated = []\n if \"true_str_list\" in expected:\n true_str_list = expected[\"true_str_list\"]\n true_int_list = expected[\"true_int_list\"]\n subcirc_list = expected[\"subcirc_list\"]\n\n assertEqual(true_str_list, [a.as_str for a in exe.readouts])\n assertEqual(true_int_list, [a.as_int for a in exe.readouts])\n\n for n, t_str in enumerate(true_str_list):\n assertEqual(t_str, exe.readouts[n].as_str)\n assertEqual(true_int_list[n], exe.readouts[n].as_int)\n assertEqual(subcirc_list[n], exe.readouts[n].subcircuit.index)\n validated.append(\"measurements agree\")\n\n if \"str_prob\" in expected:\n str_prob = expected[\"str_prob\"]\n for n, act_P in enumerate(exe.subcircuits):\n exp_P = str_prob[n]\n for (ka, va), (kb, vb) in zip(\n exp_P.items(), act_P.probability_by_str.items()\n ):\n assertEqual(ka, kb)\n assertAlmostEqual(va, vb)\n\n int_prob = expected[\"int_prob\"]\n for n, act_P in enumerate(exe.subcircuits):\n exp_P = int_prob[n]\n for (ka, va), (kb, vb) in zip(\n exp_P.items(), enumerate(act_P.probability_by_int),\n ):\n assertEqual(ka, kb)\n assertAlmostEqual(va, vb)\n\n validated.append(\"probabilities agree\")\n\n return validated\n"
] | [
[
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aboerzel/ANPR-keras | [
"c49fe4031752227d99d4acccccb35f6cb896db68"
] | [
"pyimagesearch/io/hdf5datasetloader.py"
] | [
"import h5py\nimport numpy as np\n\n\nclass Hdf5DatasetLoader:\n def __init__(self, preprocessors=None):\n self.preprocessors = preprocessors\n\n # if the preprocessors are None, initialize them as an empty list\n if self.preprocessors is None:\n self.preprocessors = []\n\n def load(self, db_path, shuffle=False, max_items=np.inf):\n\n db = h5py.File(db_path, 'r')\n images = np.array(db[\"images\"])\n labels = np.array(db[\"labels\"])\n db.close()\n\n if shuffle:\n randomized_indexes = np.arange(len(images))\n np.random.shuffle(randomized_indexes)\n images = images[randomized_indexes]\n labels = labels[randomized_indexes]\n\n if max_items == np.inf or max_items > len(images):\n max_items = len(images)\n\n images = images[0:max_items]\n labels = labels[0:max_items]\n\n # preprocess images\n for i, (image, label) in enumerate(zip(images, labels)):\n\n for p in self.preprocessors:\n image = p.preprocess(image)\n images[i] = image\n\n return images, labels\n"
] | [
[
"numpy.array",
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ColfaxResearch/qarbo | [
"bc905ecba7aa1abf04d79db7ef3e2c9178ba7acf"
] | [
"qarpo/demoutils.py"
] | [
"from IPython.core.display import HTML\nimport threading\nfrom IPython.display import display, Image\nimport ipywidgets as widgets\nimport time\nimport queue\nimport subprocess\nimport datetime\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom IPython.display import set_matplotlib_formats\nimport os \nimport warnings\nfrom .disclaimer import *\n#Global variable PROG_START\nPROG_START = 0\n\ndef videoHTML(title, videos_list, stats=None):\n '''\n title: string, h2 size\n video: source to videos to display in form of list []\n stats: path to txt file to display any metrics for the output\n '''\n if stats:\n with open(stats) as f:\n time = f.readline()\n frames = f.readline()\n stats_line = \"<p>{frames} frames processed in {time} seconds</p>\".format(frames=frames, time=time)\n\n else:\n stats_line = \"\"\n video_string = \"\"\n height = '480' if len(videos_list) == 1 else '240'\n for x in range(len(videos_list)):\n video_string += \"<video alt=\\\"\\\" controls autoplay muted height=\\\"\"+height+\"\\\"><source src=\\\"\"+videos_list[x]+\"\\\" type=\\\"video/mp4\\\" /></video>\"\n return HTML('''<h2>{title}</h2>\n {stats_line}\n {videos}\n '''.format(title=title, videos=video_string, stats_line=stats_line))\n\n\n\ndef outputHTML(title, result_path, output_type, stats=None):\n\t\t'''\n\t\tdevice: tuple of edge and accelerator\n\t\t'''\n\t\top_list = []\n\t\tstats = result_path+'/stats.txt'\n\t\tfor vid in os.listdir(result_path):\n\t\t\tif vid.endswith(output_type):\n\t\t\t\top_list.append(result_path+'/'+vid)\n\t\tif os.path.isfile(stats):\n\t\t\twith open(stats) as f:\n\t\t\t\ttime = f.readline()\n\t\t\t\tframes = f.readline()\n\t\t\t\ttext = f.readline()\n\t\t\tif text:\n\t\t\t\tstats_line = text\n\t\t\telse:\n\t\t\t\tstats_line = \"<p>{frames} frames processed in {time} seconds</p>\".format(frames=frames, time=time)\n\t\telse:\n\t\t\tstats_line = \"\"\n\t\top_string = \"\"\n\t\theight = '480' if len(op_list) == 1 else '120'\n\t\tif output_type == \".mp4\":\n\t\t\tfor x in range(len(op_list)):\n\t\t\t\top_string += \"<video alt=\\\"\\\" controls autoplay muted height=\\\"\"+height+\"\\\"><source src=\\\"\"+op_list[x]+\"\\\" type=\\\"video/mp4\\\" /></video>\"\n\t\telif output_type == \".png\":\n\t\t\tfor x in range(len(op_list)):\n\t\t\t\top_string += \"<img src='{img}' width='783' height='{height}'>\".format(img=op_list[x], height=height)\n\t\treturn HTML('''<h2>{title}</h2>\n \t\t\t\t{stats_line}\n \t\t\t\t{op}\n \t\t\t\t'''.format(title=title, op=op_string, stats_line=stats_line))\n\n\n\n\ndef summaryPlot(results_list, x_axis, y_axis, title, plot, colors=None, disclaimer=None):\n ''' Bar plot input:\n\tresults_dict: dictionary of path to result file and label {path_to_result:label}\n\tx_axis: label of the x axis\n\ty_axis: label of the y axis\n\ttitle: title of the graph\n '''\n warnings.filterwarnings('ignore')\n if plot=='time':\n clr = 'xkcd:blue'\n else:\n clr = 'xkcd:azure'\n if colors is not None:\n clr = colors\n # If the disclaimer is not specified, it defaults to None\n # and then replaced with the default text below:\n if disclaimer is None:\n disclaimer=defaultDisclaimer()\n\n plt.figure(figsize=(15, 8))\n plt.title(title , fontsize=28, color='black', fontweight='bold')\n plt.ylabel(y_axis, fontsize=16, color=clr)\n plt.xlabel(x_axis, fontsize=16, color=clr)\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n val = []\n arch = []\n diff = 0\n for path, hw in results_list:\n #Check if the stat file exist and not empty\n if os.path.isfile(path) and os.path.getsize(path) != 0:\n f = open(path, \"r\")\n l1_time = float(f.readline())\n l2_count = float(f.readline())\n if plot==\"time\":\n val.append(l1_time)\n elif plot==\"fps\":\n val.append((l2_count/l1_time))\n f.close()\n else:\n val.append(0)\n arch.append(hw)\n\n offset = max(val)/100\n for v in val:\n if v == 0:\n data = 'N/A'\n y = 0\n else:\n precision = 2 \n if v >= pow(10, precision):\n data = '{:.0f}'.format(round(v/pow(10, precision+1), precision)*pow(10, precision+1))\n else:\n data = '{{:.{:d}g}}'.format(round(precision)).format(v)\n y = v + offset \n plt.text(diff, y, data, fontsize=14, multialignment=\"center\",horizontalalignment=\"center\", verticalalignment=\"bottom\", color='black')\n diff += 1\n plt.ylim(top=(max(val)+10*offset))\n plt.bar(arch, val, width=0.8, align='center', color=clr)\n plt.pause(1)\n # to disable the disclaimer display, supply \"disclaimer=False\" argument\n if disclaimer:\n display(widgets.HTML(value=disclaimer))\n\n\n\ndef summaryPlotWithURL(results_list, x_axis, y_axis, title, plot, colors=None, disclaimer=None):\n ''' Bar plot input:\n\tresults_dict: dictionary of path to result file and label {path_to_result:label}\n\tx_axis: label of the x axis\n\ty_axis: label of the y axis\n\ttitle: title of the graph\n plot: plot type (time, fps)\n '''\n warnings.filterwarnings('ignore')\n set_matplotlib_formats(\"svg\")\n if plot=='time':\n clr = 'xkcd:blue'\n else:\n clr = 'xkcd:azure'\n if colors is not None:\n clr = colors\n # If the disclaimer is not specified, it defaults to None\n # and then replaced with the default text below:\n if disclaimer is None:\n disclaimer=defaultDisclaimer()\n\n plt.figure(figsize=(11, 7))\n plt.title(title , fontsize=24, color='black', fontweight='bold')\n plt.ylabel(y_axis, fontsize=12, color=clr)\n plt.xlabel(x_axis, fontsize=12, color=clr, labelpad=60, verticalalignment='top')\n #plt.xticks(fontsize=16)\n plt.xticks([])\n plt.yticks(fontsize=12)\n\n val = []\n arch = []\n xlab = []\n URLs = []\n xv = 0\n diff = 0\n \n for path, hw, url in results_list:\n #Check if the stat file exist and not empty\n if os.path.isfile(path) and os.path.getsize(path) != 0:\n f = open(path, \"r\")\n l1_time = float(f.readline())\n l2_count = float(f.readline())\n if plot==\"time\":\n val.append(l1_time)\n elif plot==\"fps\":\n val.append((l2_count/l1_time))\n f.close()\n else:\n val.append(0)\n arch.append(hw)\n URLs.append(url)\n xlab.append(xv)\n xv += 1\n\n offset = max(val)/100\n \n for v in val:\n if v == 0:\n data = 'N/A'\n y = 0\n else:\n precision = 2 \n if v >= pow(10, precision):\n data = '{:.0f}'.format(round(v/pow(10, precision+1), precision)*pow(10, precision+1))\n else:\n data = '{{:.{:d}g}}'.format(round(precision)).format(v)\n y = v + offset \n plt.text(diff, y, data, fontsize=12, multialignment=\"center\",horizontalalignment=\"center\", verticalalignment=\"bottom\", color='black')\n diff += 1\n plt.ylim(top=(max(val)+10*offset))\n \n plt.bar(xlab, val, width=0.8, align='center', color=clr)\n d = -0.35\n for name, link in zip(arch, URLs):\n plt.annotate(name, xy=(2,2), xytext=(d, -3.0),url=link, color='tab:blue', fontsize=12, verticalalignment='top', bbox=dict(color='w', alpha=1e-6, url=link))\n d += 1\n plt.pause(1)\n # to disable the disclaimer display, supply \"disclaimer=False\" argument\n if disclaimer:\n display(widgets.HTML(value=disclaimer))\n\n\n\ndef liveQstat():\n cmd = ['qstat']\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output,_ = p.communicate()\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n qstat = widgets.Output(layout=widgets.Layout(width='100%', height='200px', border='1px solid gray', overflow_y='auto'))\n stop_signal_q = queue.Queue()\n\n def _work(qstat,stop_signal_q):\n while stop_signal_q.empty():\n cmd = ['qstat']\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output,_ = p.communicate()\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n qstat.outputs = tuple()\n qstat.append_stdout(now+'\\n\\n'+output.decode()+'\\n\\n\\n')\n time.sleep(10.0)\n qstat.clear_output(wait=False)\n print('liveQstat stopped')\n thread = threading.Thread(target=_work, args=(qstat, stop_signal_q))\n\n thread.start()\n sb = widgets.Button(description='Stop')\n def _stop_qstat(evt):\n stop_signal_q.put(True)\n sb.on_click(_stop_qstat)\n display(qstat)\n display(sb)\n\n \ndef progressIndicator(path, file_name , title, min_, max_):\n '''\n\tProgress indicator reads first line in the file \"path\" \n\tpath: path to the progress file\n file_name: file with data to track\n\ttitle: description of the bar\n\tmin_: min_ value for the progress bar\n\tmax_: max value in the progress bar\n\n '''\n style = {'description_width': 'initial'}\n progress_bar = widgets.FloatProgress(\n value=0.0,\n min=min_,\n max=max_,\n description=title,\n bar_style='info',\n orientation='horizontal',\n style=style\n)\n remain_time = widgets.HTML(\n value='0',\n placeholder='0',\n description='Remaining:',\n style=style\n)\n est_time = widgets.HTML(\n value='0',\n placeholder='0',\n description='Total Estimated:',\n style=style\n)\n\n progress_bar.value=min_\n\n #Check if results directory exists, if not create it and create the progress data file\n if not os.path.isdir(path):\n os.makedirs(path, exist_ok=True)\n f = open(path+'/'+file_name, \"w\")\n f.close()\n \n def _work(progress_bar, est_time,remain_time, path):\n box_layout = widgets.Layout(display='flex', flex_flow='column', align_items='stretch', border='ridge', width='70%', height='')\n box = widgets.HBox([progress_bar, est_time, remain_time], layout=box_layout)\n display(box)\n # progress\n last_status = 0.0\n remain_val = '0'\n est_val = '0'\n output_file = path\n while last_status < 100:\n if os.path.isfile(output_file):\n with open(output_file, \"r\") as fh:\n line1 = fh.readline() \t#Progress \n line2 = fh.readline() \t#Remaining time\n line3 = fh.readline() \t#Estimated total time\n if line1 and line2 and line3:\n last_status = float(line1)\n remain_val = line2\n est_val = line3\n progress_bar.value = last_status\n remain_time.value = remain_val+' seconds' \n est_time.value = est_val+' seconds' \n else:\n cmd = ['ls']\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output,_ = p.communicate()\n remain_time.value = '0'+' seconds' \n time.sleep(1)\n os.remove(output_file)\n\n\n thread = threading.Thread(target=_work, args=(progress_bar, est_time, remain_time, os.path.join(path, file_name)))\n thread.start()\n time.sleep(0.1)\n\n\ndef simpleProgressUpdate(file_name, current_time, estimated_time):\n progress = round(100*current_time/estimated_time, 1)\n remaining_time = round(estimated_time-current_time, 1)\n estimated_time = round(estimated_time, 1)\n with open(file_name, \"w\") as progress_file:\n progress_file.write(str(progress)+'\\n')\n progress_file.write(str(remaining_time)+'\\n')\n progress_file.write(str(estimated_time)+'\\n')\n\n\nclass ProgressUpdate:\n\n def __init__(self):\n self.progress_data = []\n self.latest_update = []\n self.main_thread = threading.current_thread()\n def _writeToFile(progress, latest_update):\n more_data = True\n while more_data:\n for id_, (new_data, latest) in enumerate(zip(self.progress_data, self.latest_update)):\n if not self.main_thread.is_alive():\n more_data = False\n file_name, time_diff, frame_count, video_len = new_data\n file_name, last_c = latest\n if last_c == frame_count:\n continue\n else:\n self.latest_update[id_] = [file_name, frame_count]\n progress = round(100*(frame_count/video_len), 1)\n remaining_time = round((time_diff/frame_count)*(video_len-frame_count), 1)\n estimated_time = round((time_diff/frame_count)*video_len, 1)\n with open(file_name, \"w+\") as progress_file:\n progress_file.write(str(progress)+'\\n')\n progress_file.write(str(remaining_time)+'\\n')\n progress_file.write(str(estimated_time)+'\\n')\n time.sleep(1)\n \n\n self.thread = threading.Thread(target=_writeToFile, args=(self.progress_data, self.latest_update))\n if not self.thread.is_alive():\n self.thread.start()\n\n\n def progress(self, file_name, time_diff, frame_count, video_len):\n for id_, item in enumerate(self.progress_data):\n file_, _, _, _ = item\n if file_name == file_:\n self.progress_data[id_] = [file_name, time_diff, frame_count, video_len]\n return\n self.progress_data.append([file_name, time_diff, frame_count, video_len])\n self.latest_update.append([file_name, -1])\n \n \n\ndef progressUpdate(file_name, time_diff, frame_count, video_len):\n global PROG_START\n if not isinstance(PROG_START, ProgressUpdate):\n print(\"Create progress tracker\")\n PROG_START = ProgressUpdate()\n PROG_START.progress(file_name, time_diff, frame_count, video_len)\n\n"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sc420/pygame-rl | [
"f81da559385876616d99c74b43e4345f53d086d2"
] | [
"pygame_rl/scenario/soccer/envs/soccer_v0.py"
] | [
"# Third-party modules\nimport gym\nimport numpy as np\n\n# Project modules\nfrom pygame_rl.scenario.soccer.actions import Actions\nfrom pygame_rl.scenario.soccer.agent_modes import AgentModes\nfrom pygame_rl.scenario.soccer.ai_modes import AiModes\nfrom pygame_rl.scenario.soccer.map_data import MapData\nfrom pygame_rl.scenario.soccer.options import Options\nfrom pygame_rl.scenario.soccer.renderer import Renderer\nfrom pygame_rl.scenario.soccer.state import State\nfrom pygame_rl.scenario.soccer.teams import Teams\n\n\nclass SoccerV0(gym.Env):\n \"\"\"Soccer environment following OpenAI Gym API.\n \"\"\"\n ### Gym Attributes ###\n\n # Metadata\n metadata = {'render.modes': ['rgb_array']}\n # Observation space\n observation_space = None\n # Action space\n action_space = None\n\n ### Environment Attributes ###\n\n # Environment options\n options = None\n # Renderer options\n renderer_options = None\n # Map data\n map_data = None\n # Renderer\n renderer = None\n\n ### State ###\n\n # State\n state = None\n # Numpy random state\n random_state = None\n # Cached action\n cached_action = None\n # Lazy loading of renderer\n renderer_loaded = False\n\n ### Gym Methods ###\n\n def seed(self, seed=None):\n self.random_state = np.random.RandomState(seed)\n self.state.update_random_state(self.random_state)\n return self.random_state\n\n def step(self, action):\n # Cache the actions\n self.cached_action = action\n # Update agent actions\n self._update_agent_actions()\n # Get the intended positions\n intended_pos = self._get_intended_pos(self.cached_action)\n # Update the agent positions\n self._update_agent_pos(intended_pos)\n # Update taken actions\n self._update_taken_actions()\n # Update frame skipping index\n self._update_frame_skip_index()\n # Update time step\n self._update_time_step()\n # Get the reward\n reward = self._get_reward()\n # Check terminal\n done = self.state.is_terminal()\n # Return the state, reward, done, and info\n gym_state = self._gym_state()\n return gym_state, reward, done, {}\n\n def reset(self):\n self.state.reset()\n # Return the state\n gym_state = self._gym_state()\n return gym_state\n\n def render(self, mode='rgb_array'):\n # Lazy load the renderer\n if not self.renderer_loaded:\n self.renderer.load()\n self.renderer_loaded = True\n # Render\n self.renderer.render()\n # Return renderer screenshot\n return self.renderer.get_screenshot()\n\n ### Initialization Methods ###\n\n def __init__(self):\n # Use default random state\n self.random_state = np.random.RandomState(0)\n\n def load(self):\n # Save or create environment options\n self.options = self.options or Options()\n # Load map data\n self.map_data = MapData(self.options.map_path)\n # Initialize the state\n self.state = State(self, self.options,\n self.map_data, self.random_state)\n # Initialize renderer\n self.renderer = Renderer(\n self.options.map_path, self, self.renderer_options)\n # Initialize observation space\n self._init_obs_space()\n # Initialize action space\n self._init_action_space()\n\n def _init_obs_space(self):\n map_size = self.map_data.map_size\n map_len = np.prod(map_size)\n agent_size = len(Teams) * self.options.team_size\n player_goal_size = len(self.map_data.goals['PLAYER'])\n computer_goal_size = len(self.map_data.goals['COMPUTER'])\n low_map_bound = [-map_size[0] + 1, -map_size[0] + 1]\n high_map_bound = [map_size[0] - 1, map_size[1] - 1]\n # Map, agent positions, relative player goals, relative computer goals,\n # other agent positions, ball possessions, modes, actions\n low = map_len * [0] + \\\n agent_size * [0, 0] + \\\n agent_size * player_goal_size * low_map_bound + \\\n agent_size * computer_goal_size * low_map_bound + \\\n agent_size * (agent_size - 1) * low_map_bound + \\\n agent_size * [0] + \\\n agent_size * [0] + \\\n agent_size * [0]\n high = map_len * [3] + \\\n agent_size * high_map_bound + \\\n agent_size * player_goal_size * high_map_bound + \\\n agent_size * computer_goal_size * high_map_bound + \\\n agent_size * (agent_size - 1) * high_map_bound + \\\n agent_size * [1] + \\\n agent_size * [len(AgentModes) - 1] + \\\n agent_size * [len(Actions) - 1]\n self.observation_space = gym.spaces.Box(\n low=np.array(low), high=np.array(high), dtype=np.uint8)\n\n def _init_action_space(self):\n agent_size = len(Teams) * self.options.team_size\n nvec = [len(Actions)] * agent_size\n self.action_space = gym.spaces.MultiDiscrete(nvec)\n\n def _gym_state(self):\n map_size = self.map_data.map_size\n state = self.state.get_gym_state(map_size)\n return state\n\n def _update_agent_actions(self):\n for team_name in Teams:\n for team_agent_index in range(self.options.team_size):\n agent_index = self.get_agent_index(team_name, team_agent_index)\n # Skip and update if the cached action has been specified\n agent_action = self.cached_action[agent_index]\n if agent_action != Actions.NOOP:\n self.cached_action[agent_index] = Actions(agent_action)\n continue\n # Select the previous action if it's frame skipping\n if self.state.get_agent_frame_skip_index(agent_index) > 0:\n action = self.state.get_agent_action(agent_index)\n else:\n action = self._get_ai_action(team_name, team_agent_index)\n # Update the cached action\n self.cached_action[agent_index] = action\n\n def get_agent_index(self, team_name, team_agent_index):\n # Map the team name to the group index\n if team_name == Teams.PLAYER:\n group_index = 0\n elif team_name == Teams.COMPUTER:\n group_index = 1\n else:\n raise KeyError('Unknown team name {}'.format(team_name))\n # Calculate the agent index\n return self.options.team_size * group_index + team_agent_index\n\n def get_opponent_team_name(self, team_name):\n if team_name == Teams.PLAYER:\n return Teams.COMPUTER\n elif team_name == Teams.COMPUTER:\n return Teams.PLAYER\n else:\n raise KeyError('Unknown team name {}'.format(team_name))\n\n def get_team_agent_index(self, agent_index):\n return agent_index % self.options.team_size\n\n def _init_cached_action(self):\n self.cached_action = {}\n for team_name in Teams:\n for team_agent_index in range(self.options.team_size):\n agent_index = self.get_agent_index(team_name, team_agent_index)\n self.cached_action[agent_index] = None\n\n def _get_agent_actions(self, player_action):\n # Build a dict of the agent index to the actions\n actions = {}\n for team_name in Teams:\n for team_agent_index in range(self.options.team_size):\n agent_index = self.get_agent_index(team_name, team_agent_index)\n # Choose the action by the team and agent index\n if team_name == Teams.PLAYER:\n if team_agent_index <= 0:\n # The action only takes effect on the first agent in the\n # team\n agent_action = player_action\n else:\n # The collaborators have the same AI as the opponents\n agent_action = self._get_ai_action(\n team_name, team_agent_index)\n elif team_name == Teams.COMPUTER:\n agent_action = self._get_ai_action(\n team_name, team_agent_index)\n else:\n raise KeyError('Unknown team name {}'.format(team_name))\n actions[agent_index] = agent_action\n return actions\n\n def _get_walkable_moved_pos(self, pos, action):\n # Get the moved position\n moved_pos = self.get_moved_pos(pos, action)\n # Use the moved position if it's in the walkable area\n if moved_pos in self.map_data.walkable:\n return moved_pos\n else:\n return pos\n\n def _update_agent_pos(self, intended_pos):\n # Detect the overlapping positions and switch the ball\n detecting_overlap = True\n has_switched = False\n while detecting_overlap:\n # Get the overlapping position to agent index mapping\n overlapping_pos_to_agent = self._get_overlapping_pos_to_agent(\n intended_pos)\n # Update the positions\n detecting_overlap = False\n for (_, agent_index_list) in overlapping_pos_to_agent.items():\n if len(agent_index_list) > 1:\n # Update the ball possession only once\n if not has_switched:\n switch = self._update_ball_possession(agent_index_list)\n has_switched = has_switched or switch\n # Use the old positions\n for agent_index in agent_index_list:\n intended_pos[agent_index] = self.state.get_agent_pos(\n agent_index)\n # Indicate the process should continue\n detecting_overlap = True\n # Update the non-overlapping positions\n for (agent_index, pos) in intended_pos.items():\n self.state.set_agent_pos(agent_index, pos)\n\n def _update_taken_actions(self):\n for team_name in Teams:\n for team_agent_index in range(self.options.team_size):\n agent_index = self.get_agent_index(team_name, team_agent_index)\n action = self.cached_action[agent_index]\n self.state.set_agent_action(agent_index, action)\n\n def _update_frame_skip_index(self):\n for team_name in Teams:\n for team_agent_index in range(self.options.team_size):\n agent_index = self.get_agent_index(team_name, team_agent_index)\n self.state.increase_frame_skip_index(\n agent_index, self.options.ai_frame_skip)\n\n def _update_time_step(self):\n self.state.increase_time_step()\n\n def _update_ball_possession(self, agent_index_list):\n # Get the ball possessions of the agents\n has_ball_agent_index = None\n no_ball_agent_list = []\n for agent_index in agent_index_list:\n has_ball = self.state.get_agent_ball(agent_index)\n if has_ball:\n has_ball_agent_index = agent_index\n else:\n no_ball_agent_list.append(agent_index)\n # Only switch the ball possession when one agent has the ball in the\n # list\n if not has_ball_agent_index is None:\n # Randomly switch the ball\n rand_idx = self.random_state.randint(len(no_ball_agent_list))\n switch_agent_index = no_ball_agent_list[rand_idx]\n self.state.switch_ball(has_ball_agent_index, switch_agent_index)\n # Indicate the switching has occurred\n return True\n # Indicate no switch\n return False\n\n def _get_intended_pos(self, actions):\n # Build a dict of the agent index to the intended moved position\n intended_pos = {}\n for team_name in Teams:\n for team_agent_index in range(self.options.team_size):\n agent_index = self.get_agent_index(team_name, team_agent_index)\n # Get the action\n action = actions[agent_index]\n # Get the original position\n pos = self.state.get_agent_pos(agent_index)\n # Save the walkable position\n intended_pos[agent_index] = self._get_walkable_moved_pos(\n pos, action)\n return intended_pos\n\n def _get_ai_action(self, team_name, team_agent_index):\n # Get the opponent team name\n opponent_team_name = self.get_opponent_team_name(team_name)\n # Get the agent info\n agent_index = self.get_agent_index(team_name, team_agent_index)\n agent_pos = self.state.get_agent_pos(agent_index)\n agent_ball = self.state.get_agent_ball(agent_index)\n agent_mode = self.state.get_agent_mode(agent_index)\n agent_frame_skip_index = self.state.get_agent_frame_skip_index(\n agent_index)\n # Select the previous action if it's frame skipping\n if agent_frame_skip_index > 0:\n return self.state.get_agent_action(agent_index)\n # Get the position of the nearest opponent\n nearest_opponent_index = self._get_nearest_opponent_index(\n team_name, team_agent_index)\n nearest_opponent_pos = self.state.get_agent_pos(nearest_opponent_index)\n # Get the position of the defensive target\n defensive_target_agent_index = self._get_defensive_agent_index(\n team_name, team_agent_index)\n defensive_target_agent_pos = self.state.get_agent_pos(\n defensive_target_agent_index)\n # Calculate the target position and the strategic mode\n if agent_mode == AgentModes.DEFENSIVE:\n if agent_ball:\n target_pos = nearest_opponent_pos\n strategic_mode = AiModes.AVOID\n else:\n # Calculate the distance from the agent\n goals = self.map_data.goals[opponent_team_name.name]\n distances = [self.get_pos_distance(goal_pos,\n defensive_target_agent_pos)\n for goal_pos in goals]\n # Select the minimum distance\n min_distance_index = np.argmin(distances)\n target_pos = goals[min_distance_index]\n strategic_mode = AiModes.APPROACH\n elif agent_mode == AgentModes.OFFENSIVE:\n if agent_ball:\n # Calculate the distance from the opponent\n goals = self.map_data.goals[team_name.name]\n distances = [self.get_pos_distance(goal_pos,\n nearest_opponent_pos)\n for goal_pos in goals]\n # Select the maximum distance\n max_distance_index = np.argmax(distances)\n target_pos = goals[max_distance_index]\n strategic_mode = AiModes.APPROACH\n else:\n target_pos = defensive_target_agent_pos\n strategic_mode = AiModes.INTERCEPT\n else:\n raise KeyError('Unknown agent mode {}'.format(agent_mode))\n # Get the strategic action\n action = self._get_strategic_action(\n agent_pos, target_pos, strategic_mode)\n return action\n\n def _get_nearest_opponent_index(self, team_name, team_agent_index):\n # Get the opponent team name\n opponent_team_name = self.get_opponent_team_name(team_name)\n # Get the agent position\n agent_index = self.get_agent_index(team_name, team_agent_index)\n agent_pos = self.state.get_agent_pos(agent_index)\n # Find the nearest opponent position\n nearest_opponent_index = None\n nearest_dist = np.inf\n for opponent_team_agent_index in range(self.options.team_size):\n opponent_index = self.get_agent_index(\n opponent_team_name, opponent_team_agent_index)\n opponent_pos = self.state.get_agent_pos(opponent_index)\n # Calculate the distance\n dist = self.get_pos_distance(agent_pos, opponent_pos)\n if dist < nearest_dist:\n nearest_opponent_index = opponent_index\n nearest_dist = dist\n return nearest_opponent_index\n\n def _get_defensive_agent_index(self, team_name, team_agent_index):\n # Get the ball possession status\n ball_possession = self.state.get_ball_possession()\n has_ball_agent_index = ball_possession['agent_index']\n has_ball_team_name = ball_possession['team_name']\n if has_ball_team_name != team_name:\n # Defend the opponent who possesses the ball\n return has_ball_agent_index\n else:\n # Defend the nearest opponent\n return self._get_nearest_opponent_index(team_name, team_agent_index)\n\n def _get_strategic_action(self, source_pos, target_pos, mode):\n # Calculate the original Euclidean distance\n orig_dist = self.get_pos_distance(source_pos, target_pos)\n # Find the best action\n rand_idx = self.random_state.randint(len(Actions) - 1)\n best_action = Actions(rand_idx + 1)\n best_dist = orig_dist\n # Shuffle the actions except NOOP\n rand_idxs = self.random_state.choice(\n len(Actions) - 1, len(Actions) - 1, replace=False)\n shuffled_actions = [Actions(i + 1) for i in rand_idxs]\n # Find the best action\n for action in shuffled_actions:\n # Get the moved position after doing the action\n moved_pos = self.get_moved_pos(source_pos, action)\n # Check whether the moved position is walkable\n if not moved_pos in self.map_data.walkable:\n continue\n # Calculate the new Euclidean distance\n moved_dist = self.get_pos_distance(moved_pos, target_pos)\n if mode == AiModes.APPROACH:\n if moved_dist < best_dist:\n best_action = action\n best_dist = moved_dist\n elif mode == AiModes.AVOID:\n if moved_dist > best_dist:\n best_action = action\n best_dist = moved_dist\n elif mode == AiModes.INTERCEPT:\n if moved_dist < best_dist and moved_dist >= 1.0:\n best_action = action\n best_dist = moved_dist\n else:\n raise KeyError('Unknown mode {}'.format(mode))\n return best_action\n\n def _get_overlapping_pos_to_agent(self, intended_pos):\n overlapping_pos_to_agent = {}\n for (agent_index, pos) in intended_pos.items():\n # Use the old position if the new position is not walkable\n if not pos in self.map_data.walkable:\n pos = self.state.get_agent_pos(agent_index)\n # Use the tuple as the key\n pos_tuple = tuple(pos)\n if pos_tuple in overlapping_pos_to_agent:\n overlapping_pos_to_agent[pos_tuple].append(agent_index)\n else:\n overlapping_pos_to_agent[pos_tuple] = [agent_index]\n return overlapping_pos_to_agent\n\n def _get_reward(self):\n if self.state.is_team_win(Teams.PLAYER):\n return 1.0\n elif self.state.is_team_win(Teams.COMPUTER):\n return -1.0\n else:\n return 0.0\n\n @staticmethod\n def get_moved_pos(pos, action):\n # Copy the position\n pos = list(pos)\n # Move to the 4-direction grid\n if action == Actions.MOVE_RIGHT:\n pos[0] += 1\n elif action == Actions.MOVE_UP:\n pos[1] -= 1\n elif action == Actions.MOVE_LEFT:\n pos[0] -= 1\n elif action == Actions.MOVE_DOWN:\n pos[1] += 1\n elif action == Actions.STAND:\n pass\n else:\n raise KeyError('Unknown action {}'.format(action))\n return pos\n\n @staticmethod\n def get_pos_distance(pos1, pos2):\n return np.hypot(pos2[0] - pos1[0], pos2[1] - pos1[1])\n"
] | [
[
"numpy.random.RandomState",
"numpy.argmax",
"numpy.argmin",
"numpy.prod",
"numpy.array",
"numpy.hypot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cwdt-collective/eclipse | [
"922f8ea7781bbf5ffd9ac31f6af89826ae1970cd"
] | [
"eclipse/model.py"
] | [
"from collections import OrderedDict\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass Bottleneck(nn.Module):\n\texpansion = 4\n\n\tdef __init__(self, inplanes, planes, stride=1):\n\t\tsuper().__init__()\n\n\t\t# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1\n\t\tself.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)\n\t\tself.bn1 = nn.BatchNorm2d(planes)\n\n\t\tself.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)\n\t\tself.bn2 = nn.BatchNorm2d(planes)\n\n\t\tself.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()\n\n\t\tself.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)\n\t\tself.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.downsample = None\n\t\tself.stride = stride\n\n\t\tif stride > 1 or inplanes != planes * Bottleneck.expansion:\n\t\t\t# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1\n\t\t\tself.downsample = nn.Sequential(\n\t\t\t OrderedDict([(\"-1\", nn.AvgPool2d(stride)),\n\t\t\t (\"0\", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),\n\t\t\t (\"1\", nn.BatchNorm2d(planes * self.expansion))]))\n\n\tdef forward(self, x: torch.Tensor):\n\t\tidentity = x\n\n\t\tout = self.relu(self.bn1(self.conv1(x)))\n\t\tout = self.relu(self.bn2(self.conv2(out)))\n\t\tout = self.avgpool(out)\n\t\tout = self.bn3(self.conv3(out))\n\n\t\tif self.downsample is not None:\n\t\t\tidentity = self.downsample(x)\n\n\t\tout += identity\n\t\tout = self.relu(out)\n\t\treturn out\n\n\nclass AttentionPool2d(nn.Module):\n\n\tdef __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):\n\t\tsuper().__init__()\n\t\tself.positional_embedding = nn.Parameter(\n\t\t torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)\n\t\tself.k_proj = nn.Linear(embed_dim, embed_dim)\n\t\tself.q_proj = nn.Linear(embed_dim, embed_dim)\n\t\tself.v_proj = nn.Linear(embed_dim, embed_dim)\n\t\tself.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\n\t\tself.num_heads = num_heads\n\n\tdef forward(self, x):\n\t\tx = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0,\n\t\t 1) # NCHW -> (HW)NC\n\t\tx = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\n\t\tx = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\n\t\tx, _ = F.multi_head_attention_forward(\n\t\t query=x,\n\t\t key=x,\n\t\t value=x,\n\t\t embed_dim_to_check=x.shape[-1],\n\t\t num_heads=self.num_heads,\n\t\t q_proj_weight=self.q_proj.weight,\n\t\t k_proj_weight=self.k_proj.weight,\n\t\t v_proj_weight=self.v_proj.weight,\n\t\t in_proj_weight=None,\n\t\t in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),\n\t\t bias_k=None,\n\t\t bias_v=None,\n\t\t add_zero_attn=False,\n\t\t dropout_p=0,\n\t\t out_proj_weight=self.c_proj.weight,\n\t\t out_proj_bias=self.c_proj.bias,\n\t\t use_separate_proj_weight=True,\n\t\t training=self.training,\n\t\t need_weights=False)\n\n\t\treturn x[0]\n\n\nclass ModifiedResNet(nn.Module):\n\t\"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n\tdef __init__(self, layers, output_dim, heads, input_resolution=224, width=64):\n\t\tsuper().__init__()\n\t\tself.output_dim = output_dim\n\t\tself.input_resolution = input_resolution\n\n\t\t# the 3-layer stem\n\t\tself.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)\n\t\tself.bn1 = nn.BatchNorm2d(width // 2)\n\t\tself.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)\n\t\tself.bn2 = nn.BatchNorm2d(width // 2)\n\t\tself.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)\n\t\tself.bn3 = nn.BatchNorm2d(width)\n\t\tself.avgpool = nn.AvgPool2d(2)\n\t\tself.relu = nn.ReLU(inplace=True)\n\n\t\t# residual layers\n\t\tself._inplanes = width # this is a *mutable* variable used during construction\n\t\tself.layer1 = self._make_layer(width, layers[0])\n\t\tself.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n\t\tself.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n\t\tself.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n\t\tembed_dim = width * 32 # the ResNet feature dimension\n\t\tself.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)\n\n\tdef _make_layer(self, planes, blocks, stride=1):\n\t\tlayers = [Bottleneck(self._inplanes, planes, stride)]\n\n\t\tself._inplanes = planes * Bottleneck.expansion\n\t\tfor _ in range(1, blocks):\n\t\t\tlayers.append(Bottleneck(self._inplanes, planes))\n\n\t\treturn nn.Sequential(*layers)\n\n\tdef forward(self, x):\n\n\t\tdef stem(x):\n\t\t\tfor conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:\n\t\t\t\tx = self.relu(bn(conv(x)))\n\t\t\tx = self.avgpool(x)\n\t\t\treturn x\n\n\t\tx = x.type(self.conv1.weight.dtype)\n\t\tx = stem(x)\n\t\tx = self.layer1(x)\n\t\tx = self.layer2(x)\n\t\tx = self.layer3(x)\n\t\tx = self.layer4(x)\n\t\tx = self.attnpool(x)\n\n\t\treturn x\n\n\nclass LayerNorm(nn.LayerNorm):\n\t\"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n\n\tdef forward(self, x: torch.Tensor):\n\t\torig_type = x.dtype\n\t\tret = super().forward(x.type(torch.float32))\n\t\treturn ret.type(orig_type)\n\n\nclass QuickGELU(nn.Module):\n\n\tdef forward(self, x: torch.Tensor):\n\t\treturn x * torch.sigmoid(1.702 * x)\n\n\nclass ResidualAttentionBlock(nn.Module):\n\n\tdef __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):\n\t\tsuper().__init__()\n\n\t\tself.attn = nn.MultiheadAttention(d_model, n_head)\n\t\tself.ln_1 = LayerNorm(d_model)\n\t\tself.mlp = nn.Sequential(\n\t\t OrderedDict([(\"c_fc\", nn.Linear(d_model, d_model * 4)), (\"gelu\", QuickGELU()),\n\t\t (\"c_proj\", nn.Linear(d_model * 4, d_model))]))\n\t\tself.ln_2 = LayerNorm(d_model)\n\t\tself.attn_mask = attn_mask\n\n\tdef attention(self, x: torch.Tensor):\n\t\tself.attn_mask = self.attn_mask.to(dtype=x.dtype,\n\t\t device=x.device) if self.attn_mask is not None else None\n\t\treturn self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]\n\n\tdef forward(self, x: torch.Tensor):\n\t\tx = x + self.attention(self.ln_1(x))\n\t\tx = x + self.mlp(self.ln_2(x))\n\t\treturn x\n\n\nclass Transformer(nn.Module):\n\n\tdef __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):\n\t\tsuper().__init__()\n\t\tself.width = width\n\t\tself.layers = layers\n\t\tself.resblocks = nn.Sequential(\n\t\t *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])\n\n\tdef forward(self, x: torch.Tensor):\n\t\treturn self.resblocks(x)\n\n\nclass VisionTransformer(nn.Module):\n\n\tdef __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int,\n\t output_dim: int):\n\t\tsuper().__init__()\n\t\tself.input_resolution = input_resolution\n\t\tself.output_dim = output_dim\n\t\tself.conv1 = nn.Conv2d(in_channels=3,\n\t\t out_channels=width,\n\t\t kernel_size=patch_size,\n\t\t stride=patch_size,\n\t\t bias=False)\n\n\t\tscale = width**-0.5\n\t\tself.class_embedding = nn.Parameter(scale * torch.randn(width))\n\t\tself.positional_embedding = nn.Parameter(scale * torch.randn(\n\t\t (input_resolution // patch_size)**2 + 1, width))\n\t\tself.ln_pre = LayerNorm(width)\n\n\t\tself.transformer = Transformer(width, layers, heads)\n\n\t\tself.ln_post = LayerNorm(width)\n\t\tself.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n\tdef forward(self, x: torch.Tensor):\n\t\tx = self.conv1(x) # shape = [*, width, grid, grid]\n\t\tx = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n\t\tx = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\t\tx = torch.cat([\n\t\t self.class_embedding.to(x.dtype) +\n\t\t torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x\n\t\t],\n\t\t dim=1) # shape = [*, grid ** 2 + 1, width]\n\t\tx = x + self.positional_embedding.to(x.dtype)\n\t\tx = self.ln_pre(x)\n\n\t\tx = x.permute(1, 0, 2) # NLD -> LND\n\t\tx = self.transformer(x)\n\t\tx = x.permute(1, 0, 2) # LND -> NLD\n\n\t\tx = self.ln_post(x[:, 0, :])\n\n\t\tif self.proj is not None:\n\t\t\tx = x @ self.proj\n\n\t\treturn x\n\n\nclass CLIP(nn.Module):\n\n\tdef __init__(\n\t self,\n\t embed_dim: int,\n\t # vision\n\t image_resolution: int,\n\t vision_layers: Union[Tuple[int, int, int, int], int],\n\t vision_width: int,\n\t vision_patch_size: int,\n\t # text\n\t context_length: int,\n\t vocab_size: int,\n\t transformer_width: int,\n\t transformer_heads: int,\n\t transformer_layers: int):\n\t\tsuper().__init__()\n\n\t\tself.context_length = context_length\n\n\t\tif isinstance(vision_layers, (tuple, list)):\n\t\t\tvision_heads = vision_width * 32 // 64\n\t\t\tself.visual = ModifiedResNet(layers=vision_layers,\n\t\t\t output_dim=embed_dim,\n\t\t\t heads=vision_heads,\n\t\t\t input_resolution=image_resolution,\n\t\t\t width=vision_width)\n\t\telse:\n\t\t\tvision_heads = vision_width // 64\n\t\t\tself.visual = VisionTransformer(input_resolution=image_resolution,\n\t\t\t patch_size=vision_patch_size,\n\t\t\t width=vision_width,\n\t\t\t layers=vision_layers,\n\t\t\t heads=vision_heads,\n\t\t\t output_dim=embed_dim)\n\n\t\tself.transformer = Transformer(width=transformer_width,\n\t\t layers=transformer_layers,\n\t\t heads=transformer_heads,\n\t\t attn_mask=self.build_attention_mask())\n\n\t\tself.vocab_size = vocab_size\n\t\tself.token_embedding = nn.Embedding(vocab_size, transformer_width)\n\t\tself.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))\n\t\tself.ln_final = LayerNorm(transformer_width)\n\n\t\tself.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))\n\t\tself.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n\t\tself.initialize_parameters()\n\n\tdef initialize_parameters(self):\n\t\tnn.init.normal_(self.token_embedding.weight, std=0.02)\n\t\tnn.init.normal_(self.positional_embedding, std=0.01)\n\n\t\tif isinstance(self.visual, ModifiedResNet):\n\t\t\tif self.visual.attnpool is not None:\n\t\t\t\tstd = self.visual.attnpool.c_proj.in_features**-0.5\n\t\t\t\tnn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)\n\t\t\t\tnn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)\n\t\t\t\tnn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)\n\t\t\t\tnn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)\n\n\t\t\tfor resnet_block in [\n\t\t\t self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4\n\t\t\t]:\n\t\t\t\tfor name, param in resnet_block.named_parameters():\n\t\t\t\t\tif name.endswith(\"bn3.weight\"):\n\t\t\t\t\t\tnn.init.zeros_(param)\n\n\t\tproj_std = (self.transformer.width**-0.5) * ((2 * self.transformer.layers)**-0.5)\n\t\tattn_std = self.transformer.width**-0.5\n\t\tfc_std = (2 * self.transformer.width)**-0.5\n\t\tfor block in self.transformer.resblocks:\n\t\t\tnn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n\t\t\tnn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n\t\t\tnn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n\t\t\tnn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n\t\tif self.text_projection is not None:\n\t\t\tnn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)\n\n\tdef build_attention_mask(self):\n\t\t# lazily create causal attention mask, with full attention between the vision tokens\n\t\t# pytorch uses additive attention mask; fill with -inf\n\t\tmask = torch.empty(self.context_length, self.context_length)\n\t\tmask.fill_(float(\"-inf\"))\n\t\tmask.triu_(1) # zero out the lower diagonal\n\t\treturn mask\n\n\t@property\n\tdef dtype(self):\n\t\treturn self.visual.conv1.weight.dtype\n\n\tdef encode_image(self, image):\n\t\treturn self.visual(image.type(self.dtype))\n\n\tdef encode_text(self, text):\n\t\tx = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]\n\n\t\tx = x + self.positional_embedding.type(self.dtype)\n\t\tx = x.permute(1, 0, 2) # NLD -> LND\n\t\tx = self.transformer(x)\n\t\tx = x.permute(1, 0, 2) # LND -> NLD\n\t\tx = self.ln_final(x).type(self.dtype)\n\n\t\t# x.shape = [batch_size, n_ctx, transformer.width]\n\t\t# take features from the eot embedding (eot_token is the highest number in each sequence)\n\t\tx = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n\n\t\treturn x\n\n\tdef forward(self, image, text):\n\t\timage_features = self.encode_image(image)\n\t\ttext_features = self.encode_text(text)\n\n\t\t# normalized features\n\t\timage_features = image_features / image_features.norm(dim=-1, keepdim=True)\n\t\ttext_features = text_features / text_features.norm(dim=-1, keepdim=True)\n\n\t\t# cosine similarity as logits\n\t\tlogit_scale = self.logit_scale.exp()\n\t\tlogits_per_image = logit_scale * image_features @ text_features.t()\n\t\tlogits_per_text = logits_per_image.t()\n\n\t\t# shape = [global_batch_size, global_batch_size]\n\t\treturn logits_per_image, logits_per_text\n\n\ndef convert_weights(model: nn.Module):\n\t\"\"\"Convert applicable model parameters to fp16\"\"\"\n\n\tdef _convert_weights_to_fp16(l):\n\t\tif isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n\t\t\tl.weight.data = l.weight.data.half()\n\t\t\tif l.bias is not None:\n\t\t\t\tl.bias.data = l.bias.data.half()\n\n\t\tif isinstance(l, nn.MultiheadAttention):\n\t\t\tfor attr in [\n\t\t\t *[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"\n\t\t\t]:\n\t\t\t\ttensor = getattr(l, attr)\n\t\t\t\tif tensor is not None:\n\t\t\t\t\ttensor.data = tensor.data.half()\n\n\t\tfor name in [\"text_projection\", \"proj\"]:\n\t\t\tif hasattr(l, name):\n\t\t\t\tattr = getattr(l, name)\n\t\t\t\tif attr is not None:\n\t\t\t\t\tattr.data = attr.data.half()\n\n\tmodel.apply(_convert_weights_to_fp16)\n\n\ndef build_model(state_dict: dict):\n\tvit = \"visual.proj\" in state_dict\n\n\tif vit:\n\t\tvision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n\t\tvision_layers = len([\n\t\t k for k in state_dict.keys()\n\t\t if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")\n\t\t])\n\t\tvision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n\t\tgrid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1)**0.5)\n\t\timage_resolution = vision_patch_size * grid_size\n\telse:\n\t\tcounts: list = [\n\t\t len(set(k.split(\".\")[2]\n\t\t for k in state_dict\n\t\t if k.startswith(f\"visual.layer{b}\")))\n\t\t for b in [1, 2, 3, 4]\n\t\t]\n\t\tvision_layers = tuple(counts)\n\t\tvision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n\t\toutput_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1)**0.5)\n\t\tvision_patch_size = None\n\t\tassert output_width**2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n\t\timage_resolution = output_width * 32\n\n\tembed_dim = state_dict[\"text_projection\"].shape[1]\n\tcontext_length = state_dict[\"positional_embedding\"].shape[0]\n\tvocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n\ttransformer_width = state_dict[\"ln_final.weight\"].shape[0]\n\ttransformer_heads = transformer_width // 64\n\ttransformer_layers = len(\n\t set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"transformer.resblocks\")))\n\n\tmodel = CLIP(embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size,\n\t context_length, vocab_size, transformer_width, transformer_heads, transformer_layers)\n\n\tfor key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n\t\tif key in state_dict:\n\t\t\tdel state_dict[key]\n\n\tconvert_weights(model)\n\tmodel.load_state_dict(state_dict)\n\treturn model.eval()\n"
] | [
[
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.ones",
"torch.nn.MultiheadAttention",
"torch.randn",
"torch.arange",
"torch.nn.Sequential",
"torch.sigmoid",
"numpy.log",
"torch.empty",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.nn.BatchNorm2d",
"torch.nn.Identity",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uricohen/google-research | [
"dec85093d61be11c58d5862ddd8f814a1140ef29"
] | [
"demogen/total_variation_util.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for computing the total variation.\n\nThis file contains function for computing the total variation\nof hidden layer over the entire training set.\n\n Typical usage example:\n\n root_dir = # directory where the models are\n data_dir = # directory where the dataset is\n model_config = ModelConfig(...)\n input_fn = data_util.get_input(data_dir,\n data=model_config.dataset, data_format=model_config.data_format)\n h1_total_variation = compute_total_variation(input_fn, root_dir, \n model_config, 'h1')\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n\nIMG_HEIGHT = 32\nIMG_WIDTH = 32\nIMG_CHANNELS = 3\n\n\ndef compute_total_variation(\n input_fn, root_dir, model_config, layer='inputs', sess=None,\n batchsize=50, dataset_size=50000):\n \"\"\"Compute the total variation of a hidden layer on all input data.\n\n Loads a given model from given directory and load the parameters in the given\n scope. Iterates over the entire training dataset and computes the total\n variation of the layer over the entire training set.\n\n Args:\n input_fn: function that produces the input and label tensors\n root_dir: the directory where the dataset is at\n model_config: a ModelConfig object that specifies the model\n layer: name of the hidden layer at which the total variation is computed.\n Only 1 layer at a time due to memory constraints. Available options\n include inputs, h1, h2, and h3.\n sess: optional tensorflow session\n batchsize: batch size with which the margin is computed\n dataset_size: number of data points in the dataset\n\n Returns:\n A scalar that is the total variation at the specified layer.\n \"\"\"\n #param_path = model_config.get_model_dir_name(root_dir)\n param_path = model_config.get_checkpoint_path(root_dir)\n model_fn = model_config.get_model_fn()\n\n if not sess:\n sess = tf.Session()\n\n data_format = model_config.data_format\n image_iter, label_iter = input_fn()\n if data_format == 'HWC':\n img_dim = [None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]\n else:\n img_dim = [None, IMG_CHANNELS, IMG_HEIGHT, IMG_WIDTH]\n image = tf.placeholder(tf.float32, shape=img_dim, name='image')\n label = tf.placeholder(\n tf.float32, shape=[None, model_config.num_class], name='label')\n\n loss_layers = [layer]\n end_points_collection = {}\n _ = model_fn(image, False, end_points_collection=end_points_collection)\n\n layer_activations = [end_points_collection[l] for l in loss_layers]\n\n # load model parameters\n sess.run(tf.global_variables_initializer())\n model_config.load_parameters(param_path, sess)\n\n count = 0\n all_activation = []\n while count < dataset_size:\n try:\n count += batchsize\n image_batch, label_batch = sess.run([image_iter, label_iter])\n label_batch = np.reshape(label_batch, [-1, model_config.num_class])\n fd = {image: image_batch, label: label_batch.astype(np.float32)}\n activation = np.squeeze(list(sess.run(layer_activations, feed_dict=fd)))\n all_activation.append(activation)\n except tf.errors.OutOfRangeError:\n print('reached the end of the data (%d)'%count)\n break\n\n all_activation = np.concatenate(all_activation, axis=0)\n response_flat = all_activation.reshape([all_activation.shape[0], -1])\n response_std = np.std(response_flat, axis=0)\n total_variation_unnormalized = (np.sum(response_std ** 2)) ** 0.5\n return total_variation_unnormalized / all_activation.shape[0]\n"
] | [
[
"numpy.reshape",
"tensorflow.placeholder",
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"numpy.std",
"tensorflow.Session",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
alexlib/vampy | [
"b8639f13cbe9ab42d961a2b12f42a4cc37e410ca"
] | [
"vampy/lax_wendroff.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport sys\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\nimport vampy.utils as utils\n\n\nclass LaxWendroff(object):\n \"\"\"\n Class implementing Richtmyer's 2 step Lax-Wendroff method.\n \"\"\"\n \n \n def __init__(self, theta, gamma, nx):\n \"\"\"\n Constructor for LaxWendroff class.\n \n :param theta: factor for flux vector\n :param gamma: factor for source vector\n :param nx: number of spatial points\n \"\"\"\n self._theta = theta\n self._gamma = gamma\n self._nx = nx\n \n\n def solve(self, U0, U_in, U_out, F, S):\n \"\"\"\n Solver implementing Richtmyer's two-step Lax-Wendroff method [1,2].\n \n [1] R. D. Richtmyer. A Survey of Difference Methods for Non-Steady Fluid Dynamics. NCAR Technical Notes, 63(2), 1963.\n [2] R. J. LeVeque. Numerical Methods for Conservation Laws. Birkhauser Verlag, Basel, Switzerland, 2nd edition, 1992.\n \n :param U0: solution from previous time step\n :param U_in: inlet boundary condition\n :param U_out: outlet boundary condition\n :param F: flux function (see [2])\n :param S: source function (see [2])\n \"\"\"\n # U0: previous timestep, U1 current timestep\n U1 = np.zeros((2,self.nx))\n # apply boundary conditions\n U1[:,0] = U_in\n U1[:,-1] = U_out\n # calculate half steps\n U_np_mp = (U0[:,2:]+U0[:,1:-1])/2 -\\\n self.theta*(F(U0[:,2:], j=2, k=self.nx)-F(U0[:,1:-1], j=1, k=-1))/2 +\\\n self.gamma*(S(U0[:,2:], j=2, k=self.nx)+S(U0[:,1:-1], j=1, k=-1))/2\n U_np_mm = (U0[:,1:-1]+U0[:,0:-2])/2 -\\\n self.theta*(F(U0[:,1:-1], j=1, k=-1)-F(U0[:,0:-2], j=0, k=-2))/2 +\\\n self.gamma*(S(U0[:,1:-1], j=1, k=-1)+S(U0[:,0:-2], j=0, k=-2))/2\n # calculate full step\n U1[:,1:-1] = U0[:,1:-1] -\\\n self.theta*(F(U_np_mp, j=1, k=-1)-F(U_np_mm, j=1, k=-1)) +\\\n self.gamma*(S(U_np_mp, j=1, k=-1)+S(U_np_mm, j=1, k=-1))\n return U1\n \n \n @property \n def theta(self):\n \"\"\"\n dt/dx\n \"\"\"\n return self._theta\n \n @property \n def gamma(self):\n \"\"\"\n dt/2\n \"\"\"\n return self._gamma\n \n @property \n def nx(self):\n \"\"\"\n Number of spatial steps\n \"\"\" \n return self._nx"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jarobyte91/post_ocr_correction | [
"bae2e601c838a23cc31a82e10ed5cd1b10ccdac6"
] | [
"train/launch_experiments_en.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\nimport torch\nimport torch.utils.data as tud\nimport torch.nn as nn\nimport pickle\nimport datetime\nimport argparse\nfrom random import randint as r\nfrom random import choice\nimport sys\nimport pandas as pd\nsys.path.append(\"/home/jarobyte/guemes/lib/\")\nfrom pytorch_decoding.seq2seq import Transformer\n# from metrics import levenshtein\nfrom timeit import default_timer as t\nfrom ocr_correction import evaluate_model\n \nparser = argparse.ArgumentParser(description = \"Launch experiments\")\nparser.add_argument(\"--experiment_id\", type = str)\nparser.add_argument('--full', action = \"store_true\")\nparser.add_argument('--random', action = \"store_true\")\n\nmain_folder = \"/home/jarobyte/scratch/guemes/icdar/en/\"\noutput_folder = \"baseline\"\n\nprint(f\"main folder:{main_folder}\\noutput folder:{output_folder}\\n\")\n\ndevice = torch.device(\"cuda\")\nargs = parser.parse_args()\nexperiment_id = args.experiment_id\n\n# fit parameters\n \nlearning_rate = 10**-4\nbatch_size = 100\n\nif args.full:\n epochs = 50\n train_size = 1000000\n dev_size = 1000000\nelse:\n epochs = 10\n train_size = 1000\n dev_size = 100\n \n\n# model hyperparameters\n\nif args.random:\n encoder_layers = 2\n decoder_layers = encoder_layers\n attention_heads = 8\n embedding_dimension = 256\n feedforward_dimension = embedding_dimension * 4\n dropout = 0.0\n weight_decay = 0.0\nelse:\n encoder_layers = 4\n decoder_layers = 4\n attention_heads = 8\n embedding_dimension = 512\n feedforward_dimension = 2048\n dropout = 0.1 \n\n# loading data\n \ninput_vocabulary = pickle.load(open(main_folder + \"data/char2i.pkl\", \"rb\"))\ntrain_source = torch.load(main_folder + \"data/train_source.pt\")[:train_size].to(device)\ndev_source = torch.load(main_folder + \"data/dev_source.pt\")[:dev_size].to(device)\n\noutput_vocabulary = pickle.load(open(main_folder + \"data/i2char.pkl\", \"rb\"))\ntrain_target = torch.load(main_folder + \"data/train_target.pt\")[:train_size].to(device)\ndev_target = torch.load(main_folder + \"data/dev_target.pt\")[:dev_size].to(device)\n \n# creating the model\n \nnet = Transformer(in_vocabulary = input_vocabulary, \n out_vocabulary = output_vocabulary, \n embedding_dimension = embedding_dimension,\n encoder_layers = encoder_layers,\n decoder_layers = decoder_layers,\n attention_heads = attention_heads,\n feedforward_dimension = feedforward_dimension,\n dropout = dropout,\n max_sequence_length = 110)\n\nnet.to(device)\n\n# fitting the model\n\nperformance = net.fit(X_train = train_source,\n Y_train = train_target,\n X_dev = dev_source,\n Y_dev = dev_target,\n epochs = epochs,\n batch_size = batch_size,\n learning_rate = learning_rate, \n weight_decay = weight_decay, \n progress_bar = 0, \n save_path = f\"{main_folder}{output_folder}/checkpoints/{experiment_id}.pt\")\n\n\n# saving the log and the model architecture\n\nperformance\\\n.assign(encoder_tokens = len(input_vocabulary), \n decoder_tokens = len(output_vocabulary),\n experiment_id = experiment_id)\\\n.to_csv(f\"{main_folder}{output_folder}/experiments/{experiment_id}.csv\", index = False)\n\nnet.save_architecture(f\"{main_folder}{output_folder}/models/{experiment_id}.arch\")\n\n# computing performance\n\nprint(\"\\nEvaluating model..\")\nnet.load_state_dict(torch.load(f\"{main_folder}{output_folder}/checkpoints/{experiment_id}.pt\"))\nwith open(main_folder + \"data/vocabulary.pkl\", \"rb\") as file:\n vocabulary = pickle.load(file)\ndev = pd.read_pickle(main_folder + \"data/dev.pkl\")\nif args.full:\n window_size = 50\nelse:\n window_size = 5\nevaluation = evaluate_model(raw = dev.ocr_to_input,\n gs = dev.gs_aligned,\n model = net,\n vocabulary = vocabulary,\n window_size = window_size)\nevaluation = evaluation.assign(experiment_id = experiment_id)[[\"experiment_id\", \"improvement\"] + list(evaluation.columns)[:-1]]\nprint(evaluation)\nevaluation.to_csv(f\"{main_folder}{output_folder}/evaluation/{experiment_id}.csv\", index = False)\n"
] | [
[
"torch.device",
"pandas.read_pickle",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
flaviovdf/pyseries | [
"59c8a321790d2398d71305710b7d322ce2d8eaaf"
] | [
"pyseries/data/tests/test_tsio.py"
] | [
"# -*- coding: utf8\nfrom __future__ import division, print_function\n'''\nTests for the io module\n'''\n\nfrom numpy.testing import assert_equal\nfrom pyseries.testing import YOUTUBE_1K\n\nfrom pyseries.data import tsio\n\ndef test_from_mat():\n dataset = tsio.from_id_row_mat(YOUTUBE_1K)\n assert_equal(1000, dataset.num_series)\n \n for series in dataset:\n assert_equal(100, len(series))"
] | [
[
"numpy.testing.assert_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AnaZou/ssd_keras | [
"44c923e6e062eb522b92ed4c1421dc3082e493f2"
] | [
"ssd_training.py"
] | [
"\"\"\"SSD training utils.\"\"\"\n\nimport tensorflow as tf\n\n\nclass MultiboxLoss(object):\n \"\"\"Multibox loss with some helper functions.\n\n # Arguments\n num_classes: Number of classes including background.\n alpha: Weight of L1-smooth loss.\n neg_pos_ratio: Max ratio of negative to positive boxes in loss.\n background_label_id: Id of background label.\n negatives_for_hard: Number of negative boxes to consider\n it there is no positive boxes in batch.\n\n # References\n https://arxiv.org/abs/1512.02325\n\n # TODO\n Add possibility for background label id be not zero\n \"\"\"\n def __init__(self, num_classes, alpha=1.0, neg_pos_ratio=3.0,\n background_label_id=0, negatives_for_hard=100.0):\n self.num_classes = num_classes\n self.alpha = alpha\n self.neg_pos_ratio = neg_pos_ratio\n if background_label_id != 0:\n raise Exception('Only 0 as background label id is supported')\n self.background_label_id = background_label_id\n self.negatives_for_hard = negatives_for_hard\n\n def _l1_smooth_loss(self, y_true, y_pred):\n \"\"\"Compute L1-smooth loss.\n\n # Arguments\n y_true: Ground truth bounding boxes,\n tensor of shape (?, num_boxes, 4).\n y_pred: Predicted bounding boxes,\n tensor of shape (?, num_boxes, 4).\n\n # Returns\n l1_loss: L1-smooth loss, tensor of shape (?, num_boxes).\n\n # References\n https://arxiv.org/abs/1504.08083\n \"\"\"\n abs_loss = tf.abs(y_true - y_pred)\n sq_loss = 0.5 * (y_true - y_pred)**2\n l1_loss = tf.select(tf.less(abs_loss, 1.0), sq_loss, abs_loss - 0.5)\n return tf.reduce_sum(l1_loss, -1)\n\n def _softmax_loss(self, y_true, y_pred):\n \"\"\"Compute softmax loss.\n\n # Arguments\n y_true: Ground truth targets,\n tensor of shape (?, num_boxes, num_classes).\n y_pred: Predicted logits,\n tensor of shape (?, num_boxes, num_classes).\n\n # Returns\n softmax_loss: Softmax loss, tensor of shape (?, num_boxes).\n \"\"\"\n y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)\n softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred),\n reduction_indices=-1)\n return softmax_loss\n\n def compute_loss(self, y_true, y_pred):\n \"\"\"Compute mutlibox loss.\n\n # Arguments\n y_true: Ground truth targets,\n tensor of shape (?, num_boxes, 4 + num_classes + 8),\n priors in ground truth are fictitious,\n y_true[:, :, -8] has 1 if prior should be penalized\n or in other words is assigned to some ground truth box,\n y_true[:, :, -7:] are all 0.\n y_pred: Predicted logits,\n tensor of shape (?, num_boxes, 4 + num_classes + 8).\n\n # Returns\n loss: Loss for prediction, tensor of shape (?,).\n \"\"\"\n batch_size = tf.shape(y_true)[0]\n num_boxes = tf.to_float(tf.shape(y_true)[1])\n\n # loss for all priors\n conf_loss = self._softmax_loss(y_true[:, :, 4:-8],\n y_pred[:, :, 4:-8])\n loc_loss = self._l1_smooth_loss(y_true[:, :, :4],\n y_pred[:, :, :4])\n\n # get positives loss\n num_pos = tf.reduce_sum(y_true[:, :, -8], reduction_indices=-1)\n pos_loc_loss = tf.reduce_sum(loc_loss * y_true[:, :, -8],\n reduction_indices=1)\n pos_conf_loss = tf.reduce_sum(conf_loss * y_true[:, :, -8],\n reduction_indices=1)\n\n # get negatives loss, we penalize only confidence here\n num_neg = tf.minimum(self.neg_pos_ratio * num_pos,\n num_boxes - num_pos)\n pos_num_neg_mask = tf.greater(num_neg, 0)\n has_min = tf.to_float(tf.reduce_any(pos_num_neg_mask))\n num_neg = tf.concat(0, [num_neg,\n [(1 - has_min) * self.negatives_for_hard]])\n num_neg_batch = tf.reduce_min(tf.boolean_mask(num_neg,\n tf.greater(num_neg, 0)))\n num_neg_batch = tf.to_int32(num_neg_batch)\n confs_start = 4 + self.background_label_id + 1\n confs_end = confs_start + self.num_classes - 1\n max_confs = tf.reduce_max(y_pred[:, :, confs_start:confs_end],\n reduction_indices=2)\n _, indices = tf.nn.top_k(max_confs * (1 - y_true[:, :, -8]),\n k=num_neg_batch)\n batch_idx = tf.expand_dims(tf.range(0, batch_size), 1)\n batch_idx = tf.tile(batch_idx, (1, num_neg_batch))\n full_indices = (tf.reshape(batch_idx, [-1]) * tf.to_int32(num_boxes) +\n tf.reshape(indices, [-1]))\n # full_indices = tf.concat(2, [tf.expand_dims(batch_idx, 2),\n # tf.expand_dims(indices, 2)])\n # neg_conf_loss = tf.gather_nd(conf_loss, full_indices)\n neg_conf_loss = tf.gather(tf.reshape(conf_loss, [-1]),\n full_indices)\n neg_conf_loss = tf.reshape(neg_conf_loss,\n [batch_size, num_neg_batch])\n neg_conf_loss = tf.reduce_sum(neg_conf_loss, reduction_indices=1)\n\n # loss is sum of positives and negatives\n total_loss = pos_conf_loss + neg_conf_loss\n total_loss /= (num_pos + tf.to_float(num_neg_batch))\n num_pos = tf.select(tf.not_equal(num_pos, 0), num_pos,\n tf.ones_like(num_pos))\n total_loss += (self.alpha * pos_loc_loss) / num_pos\n return total_loss\n"
] | [
[
"tensorflow.not_equal",
"tensorflow.reduce_max",
"tensorflow.concat",
"tensorflow.range",
"tensorflow.greater",
"tensorflow.less",
"tensorflow.reduce_sum",
"tensorflow.shape",
"tensorflow.minimum",
"tensorflow.reshape",
"tensorflow.reduce_any",
"tensorflow.ones_like",
"tensorflow.nn.top_k",
"tensorflow.to_float",
"tensorflow.log",
"tensorflow.to_int32",
"tensorflow.tile",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
researchmm/WSOD2 | [
"fd6f99401013ed5a66e39cee71a6c2b35580008e"
] | [
"mmdet/models/roi_heads/bbox_heads/oicr_head.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.runner import auto_fp16, force_fp32\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import build_bbox_coder, multi_apply, WeaklyMulticlassNMS, multiclass_nms\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.losses import accuracy\n\[email protected]_module()\nclass OICRHead(nn.Module):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively.\"\"\"\n\n def __init__(self,\n roi_feat_size=7,\n in_channels=256,\n hidden_channels=1024,\n bbox_coder=None,\n num_classes=20):\n super(OICRHead, self).__init__()\n self.roi_feat_size = _pair(roi_feat_size)\n self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.fp16_enabled = False\n\n in_channels *= self.roi_feat_area\n\n self.fc1 = nn.Linear(in_channels, hidden_channels)\n self.dropout1 = nn.Dropout()\n self.fc2 = nn.Linear(hidden_channels, hidden_channels)\n self.dropout2 = nn.Dropout()\n\n self.fc_cls1 = nn.Linear(hidden_channels, num_classes)\n self.fc_cls2 = nn.Linear(hidden_channels, num_classes)\n\n self.fc_refine1 = nn.Linear(hidden_channels, num_classes + 1)\n self.fc_refine2 = nn.Linear(hidden_channels, num_classes + 1)\n self.fc_refine3 = nn.Linear(hidden_channels, num_classes + 1)\n\n self.with_bbox = False\n if bbox_coder is not None:\n self.fc_bbox = nn.Linear(hidden_channels, 4)\n self.bbox_coder = build_bbox_coder(bbox_coder)\n self.with_bbox = True\n\n self.weakly_multiclass_nms = WeaklyMulticlassNMS(20)\n\n def init_weights(self):\n nn.init.normal_(self.fc_cls1.weight, 0, 0.01)\n nn.init.constant_(self.fc_cls1.bias, 0)\n nn.init.normal_(self.fc_cls2.weight, 0, 0.01)\n nn.init.constant_(self.fc_cls2.bias, 0)\n nn.init.normal_(self.fc_refine1.weight, 0, 0.01)\n nn.init.constant_(self.fc_refine1.bias, 0)\n nn.init.normal_(self.fc_refine2.weight, 0, 0.01)\n nn.init.constant_(self.fc_refine2.bias, 0)\n nn.init.normal_(self.fc_refine3.weight, 0, 0.01)\n nn.init.constant_(self.fc_refine3.bias, 0)\n if self.with_bbox:\n nn.init.normal_(self.fc_bbox.weight, 0, 0.001)\n nn.init.constant_(self.fc_bbox.bias, 0)\n\n @auto_fp16()\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.dropout1(F.relu(self.fc1(x)))\n x = self.dropout2(F.relu(self.fc2(x)))\n cls1 = self.fc_cls1(x)\n cls2 = self.fc_cls2(x)\n\n cls1 = F.softmax(cls1, dim=1)\n cls2 = F.softmax(cls2, dim=0)\n cls = cls1 * cls2\n\n refine1 = F.softmax(self.fc_refine1(x), dim=1)\n refine2 = F.softmax(self.fc_refine2(x), dim=1)\n refine3 = F.softmax(self.fc_refine3(x), dim=1)\n\n if self.with_bbox:\n bbox = self.fc_bbox(x)\n else:\n bbox = None\n\n return cls, refine1, refine2, refine3, bbox\n\n @force_fp32(apply_to=('cls'))\n def loss_wsddn(self, cls, labels):\n cls = cls.sum(dim=0)\n cls = torch.clamp(cls, 0., 1.)\n loss_wsddn = F.binary_cross_entropy(cls, labels.float(), reduction='sum')\n return loss_wsddn\n\n @force_fp32(apply_to=('cls'))\n def loss_oicr(self, cls, labels, weights):\n labels += 1\n labels = F.one_hot(labels, self.num_classes + 1)\n #loss_oicr = F.cross_entropy(cls, labels, reduction='none')\n loss_oicr = (- labels * (cls + 1e-6).log()).sum(dim=1)\n loss_oicr = (loss_oicr * weights).mean()\n return loss_oicr\n\n @force_fp32(apply_to=('bbox'))\n def loss_bbox(self, bbox, targets, labels, weights):\n pos_idx = labels.nonzero().squeeze(1)\n bbox = bbox[pos_idx]\n targets = targets[pos_idx]\n weights = weights[pos_idx]\n\n loss_bbox = F.smooth_l1_loss(bbox, targets, reduction='none')\n weights = weights.view(-1, 1)\n loss_bbox = 30 * (loss_bbox * weights).mean()\n return loss_bbox\n\n\n @force_fp32(apply_to=('bbox')) \n def get_targets(self, bbox, gt):\n bbox_targets = self.bbox_coder.encode(bbox, gt)\n return bbox_targets\n\n\n @force_fp32(apply_to=('cls'))\n def get_bboxes(self,\n rois,\n scores,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None):\n\n scores_pad = torch.zeros((scores.shape[0], 1), dtype=torch.float32).to(device=scores.device)\n scores = torch.cat([scores, scores_pad], dim=1)\n\n if bbox_pred is not None:\n bboxes = self.bbox_coder.decode(\n rois[:, 1:], bbox_pred, max_shape=img_shape)\n else:\n bboxes = rois[:, 1:].clone()\n if img_shape is not None:\n bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])\n bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])\n\n if rescale and bboxes.size(0) > 0:\n if isinstance(scale_factor, float):\n bboxes /= scale_factor\n else:\n scale_factor = bboxes.new_tensor(scale_factor)\n bboxes = (bboxes.view(bboxes.size(0), -1, 4) /\n scale_factor).view(bboxes.size()[0], -1)\n\n if cfg is None:\n return bboxes, scores\n else:\n #det_bboxes, det_labels = self.weakly_multiclass_nms(bboxes, scores, cfg.nms, cfg.max_per_img)\n det_bboxes, det_labels = multiclass_nms(bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img)\n\n return det_bboxes, det_labels\n\n\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.init.constant_",
"torch.zeros",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.functional.smooth_l1_loss",
"torch.nn.modules.utils._pair",
"torch.nn.functional.one_hot",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lornatang/PyTorch-WGANGP | [
"5fde831f7509dd9136b1eb6fa1b772a0ea08b67d"
] | [
"gan_mnist.py"
] | [
"import argparse\nimport os\nimport random\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\n\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', required=True, help='path to dataset')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--batchSize', type=int, default=64, help='inputs batch size')\nparser.add_argument('--imageSize', type=int, default=28, help='the height / width of the inputs image to network')\nparser.add_argument('--nz', type=int, default=128, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--niter', type=int, default=50, help='number of epochs to train for')\nparser.add_argument(\"--n_critic\", type=int, default=5, help=\"number of training steps for discriminator per iter\")\nparser.add_argument('--lr', type=float, default=0.0001, help='learning rate, default=0.0002')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--outf', default='./checkpoints', help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--model', type=str, default='train', help='GAN train models.default: \\'train\\'. other: gen')\n\nopt = parser.parse_args()\n\n\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndataset = dset.MNIST(root=opt.dataroot, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ]))\n\n\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\ndevice = torch.device(\"cuda:0\" if opt.cuda else \"cpu\")\nngpu = int(opt.ngpu)\nnz = int(opt.nz)\nngf = int(opt.ngf)\nndf = int(opt.ndf)\nnc = 1\n\n# Loss weight for gradient penalty\nlambda_gp = 10\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass Generator(nn.Module):\n def __init__(self, gpus):\n super(Generator, self).__init__()\n self.ngpu = gpus\n self.main = nn.Sequential(\n # inputs is Z, going into a convolution\n nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf*2) x 14 x 14\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n # state size. (ngf) x 28 x 28\n )\n\n def forward(self, inputs):\n if inputs.is_cuda and self.ngpu > 1:\n outputs = nn.parallel.data_parallel(self.main, inputs, range(self.ngpu))\n else:\n outputs = self.main(inputs)\n return outputs\n\n\nnetG = Generator(ngpu).to(device)\nnetG.apply(weights_init)\n\nif opt.netG != '':\n netG = torch.load(opt.netG)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, gpus):\n super(Discriminator, self).__init__()\n self.ngpu = gpus\n self.main = nn.Sequential(\n # inputs is (nc) x 28 x 28\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 14 x 14\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 8 x 8\n nn.Conv2d(ndf * 2, ndf * 4, 3, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 4 x 4\n nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False),\n )\n\n def forward(self, inputs):\n if inputs.is_cuda and self.ngpu > 1:\n outputs = nn.parallel.data_parallel(self.main, inputs, range(self.ngpu))\n else:\n outputs = self.main(inputs)\n\n return outputs.view(-1, 1).squeeze(1)\n\n\nnetD = Discriminator(ngpu).to(device)\nnetD.apply(weights_init)\n\nif opt.netD != '':\n netD = torch.load(opt.netD)\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.9))\noptimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.9))\n\n\ndef compute_gradient_penalty(net, real_samples, fake_samples):\n \"\"\"Calculates the gradient penalty loss for WGAN GP\"\"\"\n # Random weight term for interpolation between real and fake samples\n alpha = torch.randn(real_samples.size(0), 1, 1, 1, device=device)\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n d_interpolates = net(interpolates)\n fake = torch.full((real_samples.size(0), ), 1, device=device)\n # Get gradient w.r.t. interpolates\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1)\n gradient_penaltys = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * lambda_gp\n return gradient_penaltys\n\n\ndef train():\n for epoch in range(opt.niter):\n for i, (real_imgs, _) in enumerate(dataloader):\n\n # configure input\n real_imgs = real_imgs.to(device)\n\n # Get real imgs batch size\n batch_size = real_imgs.size(0)\n\n # -----------------\n # Train Discriminator\n # -----------------\n\n netD.zero_grad()\n\n # Sample noise as generator input\n noise = torch.randn(batch_size, nz, 1, 1, device=device)\n\n # Generate a batch of images\n fake_imgs = netG(noise)\n\n # Real images\n real_validity = netD(real_imgs)\n # Fake images\n fake_validity = netD(fake_imgs)\n # Gradient penalty\n gradient_penalty = compute_gradient_penalty(netD, real_imgs.data, fake_imgs.data)\n\n # Loss measures generator's ability to fool the discriminator\n errD = -torch.mean(real_validity) + torch.mean(fake_validity) + gradient_penalty\n\n errD.backward()\n optimizerD.step()\n\n optimizerG.zero_grad()\n\n # Train the generator every n_critic iterations\n if i % opt.n_critic == 0:\n\n # ---------------------\n # Train Generator\n # ---------------------\n\n # Generate a batch of images\n fake_imgs = netG(noise)\n # Adversarial loss\n errG = -torch.mean(netD(fake_imgs))\n\n errG.backward()\n optimizerG.step()\n\n print(f'[{epoch + 1}/{opt.niter}][{i}/{len(dataloader)}] '\n f'Loss_D: {errD.item():.4f} '\n f'Loss_G: {errG.item():.4f}.', end=\"\\r\")\n\n if epoch % 5 == 0:\n vutils.save_image(real_imgs,\n f'{opt.outf}/real_samples.png',\n normalize=True)\n vutils.save_image(netG(noise).detach(),\n f'{opt.outf}/fake_samples_epoch_{epoch}.png',\n normalize=True)\n\n # do checkpointing\n torch.save(netG, f'{opt.outf}/netG_epoch_{epoch + 1}.pth')\n torch.save(netD, f'{opt.outf}/netD_epoch_{epoch + 1}.pth')\n\n\nif __name__ == '__main__':\n train()\n"
] | [
[
"torch.mean",
"torch.nn.ConvTranspose2d",
"torch.load",
"torch.manual_seed",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.device",
"torch.nn.ReLU",
"torch.autograd.grad",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iTakeshi/dgl | [
"25c9221b835b775f40983c844e482a1242095756"
] | [
"python/dgl/dataloading/cluster_gcn.py"
] | [
"\"\"\"Cluster-GCN samplers.\"\"\"\nimport os\nimport pickle\nimport numpy as np\n\nfrom .. import backend as F\nfrom ..base import DGLError\nfrom ..partition import metis_partition_assignment\nfrom .base import set_node_lazy_features, set_edge_lazy_features\n\nclass ClusterGCNSampler(object):\n \"\"\"Cluster-GCN sampler.\n\n This sampler first partitions the graph with METIS partitioning, then it caches the nodes of\n each partition to a file within the given cache directory.\n\n This is used in conjunction with :class:`dgl.dataloading.DataLoader`.\n\n Notes\n -----\n The graph must be homogeneous and on CPU.\n\n Parameters\n ----------\n g : DGLGraph\n The original graph.\n k : int\n The number of partitions.\n cache_path : str\n The path to the cache directory for storing the partition result.\n \"\"\"\n def __init__(self, g, k, balance_ntypes=None, balance_edges=False, mode='k-way',\n prefetch_node_feats=None, prefetch_edge_feats=None, output_device=None,\n cache_path='cluster_gcn.pkl'):\n if os.path.exists(cache_path):\n try:\n with open(cache_path, 'rb') as f:\n self.partition_offset, self.partition_node_ids = pickle.load(f)\n except (EOFError, TypeError, ValueError):\n raise DGLError(\n f'The contents in the cache file {cache_path} is invalid. '\n f'Please remove the cache file {cache_path} or specify another path.')\n if len(self.partition_offset) != k + 1:\n raise DGLError(\n f'Number of partitions in the cache does not match the value of k. '\n f'Please remove the cache file {cache_path} or specify another path.')\n if len(self.partition_node_ids) != g.num_nodes():\n raise DGLError(\n f'Number of nodes in the cache does not match the given graph. '\n f'Please remove the cache file {cache_path} or specify another path.')\n else:\n partition_ids = metis_partition_assignment(\n g, k, balance_ntypes=balance_ntypes, balance_edges=balance_edges, mode=mode)\n partition_ids = F.asnumpy(partition_ids)\n partition_node_ids = np.argsort(partition_ids)\n partition_size = F.zerocopy_from_numpy(np.bincount(partition_ids, minlength=k))\n partition_offset = F.zerocopy_from_numpy(np.insert(np.cumsum(partition_size), 0, 0))\n partition_node_ids = F.zerocopy_from_numpy(partition_ids)\n with open(cache_path, 'wb') as f:\n pickle.dump((partition_offset, partition_node_ids), f)\n self.partition_offset = partition_offset\n self.partition_node_ids = partition_node_ids\n\n self.prefetch_node_feats = prefetch_node_feats or []\n self.prefetch_edge_feats = prefetch_edge_feats or []\n self.output_device = output_device\n\n def sample(self, g, partition_ids):\n \"\"\"Samples a subgraph given a list of partition IDs.\"\"\"\n node_ids = F.cat([\n self.partition_node_ids[self.partition_offset[i]:self.partition_offset[i+1]]\n for i in F.asnumpy(partition_ids)], 0)\n sg = g.subgraph(node_ids, relabel_nodes=True, output_device=self.output_device)\n set_node_lazy_features(sg, self.prefetch_node_feats)\n set_edge_lazy_features(sg, self.prefetch_edge_feats)\n return sg\n"
] | [
[
"numpy.argsort",
"numpy.cumsum",
"numpy.bincount"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
clbarnes/imageio | [
"73dadda9d93f9e2c4724095f91a2acd4ecd13c4a"
] | [
"tests/test_grab.py"
] | [
"import sys\n\nimport numpy as np\n\nfrom pytest import raises\nfrom imageio.testing import run_tests_if_main\n\nimport imageio\n\n\ndef test_grab_plugin_load():\n \n imageio.plugins.grab.BaseGrabFormat._ImageGrab = FakeImageGrab\n imageio.plugins.grab.BaseGrabFormat._pillow_imported = True\n _plat = sys.platform\n sys.platform = 'win32'\n \n try:\n \n reader = imageio.get_reader('<screen>')\n assert reader.format.name == 'SCREENGRAB'\n \n reader = imageio.get_reader('<clipboard>')\n assert reader.format.name == 'CLIPBOARDGRAB'\n \n with raises(ValueError):\n imageio.get_writer('<clipboard>')\n with raises(ValueError):\n imageio.get_writer('<screen>')\n \n finally:\n sys.platform = _plat\n imageio.plugins.grab.BaseGrabFormat._ImageGrab = None\n imageio.plugins.grab.BaseGrabFormat._pillow_imported = False\n\n\nclass FakeImageGrab:\n \n has_clipboard = True\n \n @classmethod\n def grab(cls):\n return np.zeros((8, 8, 3), np.uint8)\n \n @classmethod\n def grabclipboard(cls):\n if cls.has_clipboard:\n return np.zeros((9, 9, 3), np.uint8)\n else:\n return None\n\n\ndef test_grab_simulated():\n # Hard to test for real, if only because its only fully suppored on\n # Windows, but we can monkey patch so we can test all the imageio bits.\n \n imageio.plugins.grab.BaseGrabFormat._ImageGrab = FakeImageGrab\n imageio.plugins.grab.BaseGrabFormat._pillow_imported = True\n _plat = sys.platform\n sys.platform = 'win32'\n \n try:\n \n im = imageio.imread('<screen>')\n assert im.shape == (8, 8, 3)\n \n reader = imageio.get_reader('<screen>')\n im1 = reader.get_data(0)\n im2 = reader.get_data(0)\n im3 = reader.get_data(1)\n assert im1.shape == (8, 8, 3)\n assert im2.shape == (8, 8, 3)\n assert im3.shape == (8, 8, 3)\n \n im = imageio.imread('<clipboard>')\n assert im.shape == (9, 9, 3)\n \n reader = imageio.get_reader('<clipboard>')\n im1 = reader.get_data(0)\n im2 = reader.get_data(0)\n im3 = reader.get_data(1)\n assert im1.shape == (9, 9, 3)\n assert im2.shape == (9, 9, 3)\n assert im3.shape == (9, 9, 3)\n \n # Grabbing from clipboard can fail if there is no image data to grab\n FakeImageGrab.has_clipboard = False\n with raises(RuntimeError):\n im = imageio.imread('<clipboard>')\n \n finally:\n sys.platform = _plat\n imageio.plugins.grab.BaseGrabFormat._ImageGrab = None\n imageio.plugins.grab.BaseGrabFormat._pillow_imported = False\n FakeImageGrab.has_clipboard = True\n\n\nrun_tests_if_main()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FinchZHU/uai-sdk | [
"78e06bebba2d18233ce6dcb5be619e940f7a7ef3",
"78e06bebba2d18233ce6dcb5be619e940f7a7ef3"
] | [
"examples/tensorflow/inference/im2txt/code/inference_utils/inference_wrapper_base.py",
"examples/tensorflow/inference/tf-serving/mnist/inference.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base wrapper class for performing inference with an image-to-text model.\n\nSubclasses must implement the following methods:\n\n build_model():\n Builds the model for inference and returns the model object.\n\n feed_image():\n Takes an encoded image and returns the initial model state, where \"state\"\n is a numpy array whose specifics are defined by the subclass, e.g.\n concatenated LSTM state. It's assumed that feed_image() will be called\n precisely once at the start of inference for each image. Subclasses may\n compute and/or save per-image internal context in this method.\n\n inference_step():\n Takes a batch of inputs and states at a single time-step. Returns the\n softmax output corresponding to the inputs, and the new states of the batch.\n Optionally also returns metadata about the current inference step, e.g. a\n serialized numpy array containing activations from a particular model layer.\n\nClient usage:\n 1. Build the model inference graph via build_graph_from_config() or\n build_graph_from_proto().\n 2. Call the resulting restore_fn to load the model checkpoint.\n 3. For each image in a batch of images:\n a) Call feed_image() once to get the initial state.\n b) For each step of caption generation, call inference_step().\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\n\n\nimport tensorflow as tf\n\n# pylint: disable=unused-argument\n\n\nclass InferenceWrapperBase(object):\n \"\"\"Base wrapper class for performing inference with an image-to-text model.\"\"\"\n\n def __init__(self):\n pass\n\n def build_model(self, model_config):\n \"\"\"Builds the model for inference.\n\n Args:\n model_config: Object containing configuration for building the model.\n\n Returns:\n model: The model object.\n \"\"\"\n tf.logging.fatal(\"Please implement build_model in subclass\")\n\n def _create_restore_fn(self, checkpoint_path, saver):\n \"\"\"Creates a function that restores a model from checkpoint.\n\n Args:\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n saver: Saver for restoring variables from the checkpoint file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n\n Raises:\n ValueError: If checkpoint_path does not refer to a checkpoint file or a\n directory containing a checkpoint file.\n \"\"\"\n if tf.gfile.IsDirectory(checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_path:\n raise ValueError(\"No checkpoint file found in: %s\" % checkpoint_path)\n\n def _restore_fn(sess):\n\t\n tf.logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n tf.logging.info(\"Successfully loaded checkpoint: %s\",\n os.path.basename(checkpoint_path))\n\n return _restore_fn\n\n def build_graph_from_config(self, model_config, checkpoint_path):\n \"\"\"Builds the inference graph from a configuration object.\n\n Args:\n model_config: Object containing configuration for building the model.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n \"\"\"\n tf.logging.info(\"Building model from: \" + checkpoint_path)\n self.build_model(model_config)\n saver = tf.train.Saver()\n\n return self._create_restore_fn(checkpoint_path, saver)\n\n def build_graph_from_proto(self, graph_def_file, saver_def_file,\n checkpoint_path):\n \"\"\"Builds the inference graph from serialized GraphDef and SaverDef protos.\n\n Args:\n graph_def_file: File containing a serialized GraphDef proto.\n saver_def_file: File containing a serialized SaverDef proto.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n \"\"\"\n # Load the Graph.\n tf.logging.info(\"Loading GraphDef from file: %s\", graph_def_file)\n graph_def = tf.GraphDef()\n with tf.gfile.FastGFile(graph_def_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name=\"\")\n\n # Load the Saver.\n tf.logging.info(\"Loading SaverDef from file: %s\", saver_def_file)\n saver_def = tf.train.SaverDef()\n with tf.gfile.FastGFile(saver_def_file, \"rb\") as f:\n saver_def.ParseFromString(f.read())\n saver = tf.train.Saver(saver_def=saver_def)\n\n return self._create_restore_fn(checkpoint_path, saver)\n\n def feed_image(self, sess, encoded_image):\n \"\"\"Feeds an image and returns the initial model state.\n\n See comments at the top of file.\n\n Args:\n sess: TensorFlow Session object.\n encoded_image: An encoded image string.\n\n Returns:\n state: A numpy array of shape [1, state_size].\n \"\"\"\n tf.logging.fatal(\"Please implement feed_image in subclass\")\n\n def inference_step(self, sess, input_feed, state_feed):\n \"\"\"Runs one step of inference.\n\n Args:\n sess: TensorFlow Session object.\n input_feed: A numpy array of shape [batch_size].\n state_feed: A numpy array of shape [batch_size, state_size].\n\n Returns:\n softmax_output: A numpy array of shape [batch_size, vocab_size].\n new_state: A numpy array of shape [batch_size, state_size].\n metadata: Optional. If not None, a string containing metadata about the\n current inference step (e.g. serialized numpy array containing\n activations from a particular model layer.).\n \"\"\"\n tf.logging.fatal(\"Please implement inference_step in subclass\")\n\n# pylint: enable=unused-argument\n",
"# Copyright 2017 The UAI-SDK Authors. All Rights Reserved. \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\" A very simple MNIST inferencer.\n The model that loaded was saved by SavedModelBuilder.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nimport json\nfrom uai.arch.tf_serving import TFServingAiUcloudModel\n\nclass MnistModel(TFServingAiUcloudModel):\n \"\"\" Mnist example model\n \"\"\"\n def __init__(self, conf):\n super(MnistModel, self).__init__(conf)\n\n def load_model(self):\n super(MnistModel, self).load_model()\n\n def preprocess(self, data):\n im = Image.open(data).resize((28, 28)).convert('L')\n im = np.array(im)\n im = im.reshape(784)\n im = im.astype(np.float32)\n im = np.multiply(im, 1.0 / 255.0)\n return im\n\n def execute(self, data, batch_size):\n # call TFServingAiUcloudModel.execute to do the inference\n output_tensor = super(MnistModel, self).execute(data, batch_size)\n\n ret = []\n for i in range(batch_size):\n scores_arr = output_tensor[0]\n ret_val = np.array_str(np.argmax(scores_arr[i])) + '\\n'\n ret.append(ret_val)\n\n return ret\n"
] | [
[
"tensorflow.import_graph_def",
"tensorflow.train.latest_checkpoint",
"tensorflow.gfile.FastGFile",
"tensorflow.train.SaverDef",
"tensorflow.logging.info",
"tensorflow.train.Saver",
"tensorflow.gfile.IsDirectory",
"tensorflow.logging.fatal",
"tensorflow.GraphDef"
],
[
"numpy.array",
"numpy.argmax",
"numpy.multiply"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pyvista/vista | [
"c49a6abae7cc62d242f12ec45a6b22b524db1ec8"
] | [
"pyvista/plotting/widgets.py"
] | [
"\"\"\"Module dedicated to widgets.\"\"\"\n\nimport numpy as np\n\nimport pyvista\nfrom pyvista import _vtk\nfrom pyvista.utilities import (\n NORMALS,\n generate_plane,\n get_array,\n get_array_association,\n try_callback,\n)\n\nfrom .colors import Color\n\n\nclass WidgetHelper:\n \"\"\"An internal class to manage widgets.\n\n It also manages and other helper methods involving widgets.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize widget helper.\"\"\"\n super().__init__(*args, **kwargs)\n self.camera_widgets = []\n self.box_widgets = []\n self.box_clipped_meshes = []\n self.plane_widgets = []\n self.plane_clipped_meshes = []\n self.plane_sliced_meshes = []\n self.line_widgets = []\n self.slider_widgets = []\n self.threshold_meshes = []\n self.isovalue_meshes = []\n self.spline_widgets = []\n self.spline_sliced_meshes = []\n self.sphere_widgets = []\n self.button_widgets = []\n\n def add_box_widget(\n self,\n callback,\n bounds=None,\n factor=1.25,\n rotation_enabled=True,\n color=None,\n use_planes=False,\n outline_translation=True,\n pass_widget=False,\n ):\n \"\"\"Add a box widget to the scene.\n\n This is useless without a callback function. You can pass a\n callable function that takes a single argument, the PolyData\n box output from this widget, and performs a task with that\n box.\n\n Parameters\n ----------\n callback : callable\n The method called every time the box is updated. This has\n two options: Take a single argument, the ``PolyData`` box\n (default) or if ``use_planes=True``, then it takes a\n single argument of the plane collection as a ``vtkPlanes``\n object.\n\n bounds : tuple(float)\n Length 6 tuple of the bounding box where the widget is\n placed.\n\n factor : float, optional\n An inflation factor to expand on the bounds when placing.\n\n rotation_enabled : bool, optional\n If ``False``, the box widget cannot be rotated and is\n strictly orthogonal to the cartesian axes.\n\n color : color_like, optional\n Either a string, rgb sequence, or hex color string.\n Defaults to :attr:`pyvista.global_theme.font.color\n <pyvista.themes._Font.color>`.\n\n use_planes : bool, optional\n Changes the arguments passed to the callback to the planes\n that make up the box.\n\n outline_translation : bool, optional\n If ``False``, the box widget cannot be translated and is\n strictly placed at the given bounds.\n\n pass_widget : bool, optional\n If ``True``, the widget will be passed as the last\n argument of the callback.\n\n Returns\n -------\n vtk.vtkBoxWidget\n Box widget.\n\n Examples\n --------\n The following example generates a static image of the widget.\n\n >>> import pyvista as pv\n >>> from pyvista import examples\n >>> mesh = examples.download_nefertiti()\n >>> p = pv.Plotter()\n >>> _ = p.add_mesh_clip_box(mesh, color='white')\n >>> p.show(cpos=[-1, -1, 0.2])\n\n Download the interactive example at :ref:`box_widget_example`.\n\n \"\"\"\n if bounds is None:\n bounds = self.bounds\n\n def _the_callback(box_widget, event_id):\n the_box = pyvista.PolyData()\n box_widget.GetPolyData(the_box)\n planes = _vtk.vtkPlanes()\n box_widget.GetPlanes(planes)\n if callable(callback):\n if use_planes:\n args = [planes]\n else:\n args = [the_box]\n if pass_widget:\n args.append(box_widget)\n try_callback(callback, *args)\n return\n\n box_widget = _vtk.vtkBoxWidget()\n box_widget.GetOutlineProperty().SetColor(\n Color(color, default_color=pyvista.global_theme.font.color).float_rgb\n )\n box_widget.SetInteractor(self.iren.interactor)\n box_widget.SetCurrentRenderer(self.renderer)\n box_widget.SetPlaceFactor(factor)\n box_widget.SetRotationEnabled(rotation_enabled)\n box_widget.SetTranslationEnabled(outline_translation)\n box_widget.PlaceWidget(bounds)\n box_widget.On()\n box_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _the_callback)\n _the_callback(box_widget, None)\n\n self.box_widgets.append(box_widget)\n return box_widget\n\n def clear_box_widgets(self):\n \"\"\"Remove all of the box widgets.\"\"\"\n self.box_widgets.clear()\n\n def add_mesh_clip_box(\n self,\n mesh,\n invert=False,\n rotation_enabled=True,\n widget_color=None,\n outline_translation=True,\n merge_points=True,\n crinkle=False,\n **kwargs,\n ):\n \"\"\"Clip a mesh using a box widget.\n\n Add a mesh to the scene with a box widget that is used to clip\n the mesh interactively.\n\n The clipped mesh is saved to the ``.box_clipped_meshes`` attribute on\n the plotter.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and clip.\n\n invert : bool, optional\n Flag on whether to flip/invert the clip.\n\n rotation_enabled : bool, optional\n If ``False``, the box widget cannot be rotated and is strictly\n orthogonal to the cartesian axes.\n\n widget_color : color_like, optional\n Color of the widget. Either a string, RGB sequence, or\n hex color string. For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n outline_translation : bool, optional\n If ``False``, the plane widget cannot be translated and is\n strictly placed at the given bounds.\n\n merge_points : bool, optional\n If ``True`` (default), coinciding points of independently\n defined mesh elements will be merged.\n\n crinkle : bool, optional\n Crinkle the clip by extracting the entire cells along the clip.\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to\n :func:`BasePlotter.add_mesh` to control how the mesh is\n displayed.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the mesh.\n\n \"\"\"\n name = kwargs.get('name', mesh.memory_address)\n rng = mesh.get_data_range(kwargs.get('scalars', None))\n kwargs.setdefault('clim', kwargs.pop('rng', rng))\n mesh.set_active_scalars(kwargs.get('scalars', mesh.active_scalars_name))\n\n self.add_mesh(mesh.outline(), name=name + \"outline\", opacity=0.0)\n\n port = 1 if invert else 0\n\n if crinkle:\n mesh.cell_data['cell_ids'] = np.arange(mesh.n_cells)\n\n alg = _vtk.vtkBoxClipDataSet()\n if not merge_points:\n # vtkBoxClipDataSet uses vtkMergePoints by default\n alg.SetLocator(_vtk.vtkNonMergingPointLocator())\n alg.SetInputDataObject(mesh)\n alg.GenerateClippedOutputOn()\n\n box_clipped_mesh = pyvista.wrap(alg.GetOutput(port))\n self.box_clipped_meshes.append(box_clipped_mesh)\n\n def callback(planes):\n bounds = []\n for i in range(planes.GetNumberOfPlanes()):\n plane = planes.GetPlane(i)\n bounds.append(plane.GetNormal())\n bounds.append(plane.GetOrigin())\n\n alg.SetBoxClip(*bounds)\n alg.Update()\n clipped = pyvista.wrap(alg.GetOutput(port))\n if crinkle:\n clipped = mesh.extract_cells(np.unique(clipped.cell_data['cell_ids']))\n box_clipped_mesh.shallow_copy(clipped)\n\n self.add_box_widget(\n callback=callback,\n bounds=mesh.bounds,\n factor=1.25,\n rotation_enabled=rotation_enabled,\n use_planes=True,\n color=widget_color,\n outline_translation=outline_translation,\n )\n\n return self.add_mesh(box_clipped_mesh, reset_camera=False, **kwargs)\n\n def add_plane_widget(\n self,\n callback,\n normal='x',\n origin=None,\n bounds=None,\n factor=1.25,\n color=None,\n assign_to_axis=None,\n tubing=False,\n outline_translation=False,\n origin_translation=True,\n implicit=True,\n pass_widget=False,\n test_callback=True,\n normal_rotation=True,\n ):\n \"\"\"Add a plane widget to the scene.\n\n This is useless without a callback function. You can pass a\n callable function that takes two arguments, the normal and\n origin of the plane in that order output from this widget, and\n performs a task with that plane.\n\n Parameters\n ----------\n callback : callable\n The method called every time the plane is updated. Takes\n two arguments, the normal and origin of the plane in that\n order.\n\n normal : str or tuple(float)\n The starting normal vector of the plane.\n\n origin : tuple(float)\n The starting coordinate of the center of the place.\n\n bounds : tuple(float)\n Length 6 tuple of the bounding box where the widget is placed.\n\n factor : float, optional\n An inflation factor to expand on the bounds when placing.\n\n color : color_like, optional\n Either a string, rgb list, or hex color string.\n\n assign_to_axis : str or int, optional\n Assign the normal of the plane to be parallel with a given\n axis: options are ``(0, 'x')``, ``(1, 'y')``, or ``(2,\n 'z')``.\n\n tubing : bool, optional\n When using an implicit plane wiget, this controls whether\n or not tubing is shown around the plane's boundaries.\n\n outline_translation : bool, optional\n If ``False``, the plane widget cannot be translated and is\n strictly placed at the given bounds. Only valid when using\n an implicit plane.\n\n origin_translation : bool, optional\n If ``False``, the plane widget cannot be translated by its\n origin and is strictly placed at the given origin. Only\n valid when using an implicit plane.\n\n implicit : bool, optional\n When ``True``, a ``vtkImplicitPlaneWidget`` is used and\n when ``False``, a ``vtkPlaneWidget`` is used.\n\n pass_widget : bool, optional\n If ``True``, the widget will be passed as the last\n argument of the callback.\n\n test_callback : bool, optional\n If ``True``, run the callback function after the widget is\n created.\n\n normal_rotation : bool, optional\n Set the opacity of the normal vector arrow to 0 such that\n it is effectively disabled. This prevents the user from\n rotating the normal. This is forced to ``False`` when\n ``assign_to_axis`` is set.\n\n Returns\n -------\n vtk.vtkImplicitPlaneWidget or vtk.vtkPlaneWidget\n Plane widget.\n\n \"\"\"\n if origin is None:\n origin = self.center\n if bounds is None:\n bounds = self.bounds\n\n if isinstance(normal, str):\n normal = NORMALS[normal.lower()]\n\n color = Color(color, default_color=pyvista.global_theme.font.color)\n\n if assign_to_axis:\n normal_rotation = False\n\n def _the_callback(widget, event_id):\n the_plane = _vtk.vtkPlane()\n widget.GetPlane(the_plane)\n normal = the_plane.GetNormal()\n origin = the_plane.GetOrigin()\n if callable(callback):\n if pass_widget:\n try_callback(callback, normal, origin, widget)\n else:\n try_callback(callback, normal, origin)\n return\n\n if implicit:\n plane_widget = _vtk.vtkImplicitPlaneWidget()\n plane_widget.GetNormalProperty().SetColor(color.float_rgb)\n plane_widget.GetOutlineProperty().SetColor(color.float_rgb)\n plane_widget.GetOutlineProperty().SetColor(color.float_rgb)\n plane_widget.SetTubing(tubing)\n plane_widget.SetOutlineTranslation(outline_translation)\n plane_widget.SetOriginTranslation(origin_translation)\n\n _start_interact = lambda plane_widget, event: plane_widget.SetDrawPlane(True)\n _stop_interact = lambda plane_widget, event: plane_widget.SetDrawPlane(False)\n\n plane_widget.SetDrawPlane(False)\n plane_widget.AddObserver(_vtk.vtkCommand.StartInteractionEvent, _start_interact)\n plane_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _stop_interact)\n plane_widget.SetPlaceFactor(factor)\n plane_widget.PlaceWidget(bounds)\n plane_widget.SetOrigin(origin)\n\n if not normal_rotation:\n plane_widget.GetNormalProperty().SetOpacity(0)\n\n else:\n # Position of the small plane\n source = _vtk.vtkPlaneSource()\n source.SetNormal(normal)\n source.SetCenter(origin)\n source.SetPoint1(\n origin[0] + (bounds[1] - bounds[0]) * 0.01,\n origin[1] - (bounds[3] - bounds[2]) * 0.01,\n origin[2],\n )\n source.SetPoint2(\n origin[0] - (bounds[1] - bounds[0]) * 0.01,\n origin[1] + (bounds[3] - bounds[2]) * 0.01,\n origin[2],\n )\n source.Update()\n plane_widget = _vtk.vtkPlaneWidget()\n plane_widget.SetHandleSize(0.01)\n # Position of the widget\n plane_widget.SetInputData(source.GetOutput())\n plane_widget.SetRepresentationToOutline()\n plane_widget.SetPlaceFactor(factor)\n plane_widget.PlaceWidget(bounds)\n plane_widget.SetCenter(origin) # Necessary\n plane_widget.GetPlaneProperty().SetColor(color.float_rgb) # self.C_LOT[fn])\n plane_widget.GetHandleProperty().SetColor(color.float_rgb)\n\n if not normal_rotation:\n plane_widget.GetHandleProperty().SetOpacity(0)\n\n plane_widget.GetPlaneProperty().SetOpacity(0.5)\n plane_widget.SetInteractor(self.iren.interactor)\n plane_widget.SetCurrentRenderer(self.renderer)\n\n if assign_to_axis:\n # Note that normal_rotation was forced to False\n if assign_to_axis in [0, \"x\", \"X\"]:\n plane_widget.NormalToXAxisOn()\n plane_widget.SetNormal(NORMALS[\"x\"])\n elif assign_to_axis in [1, \"y\", \"Y\"]:\n plane_widget.NormalToYAxisOn()\n plane_widget.SetNormal(NORMALS[\"y\"])\n elif assign_to_axis in [2, \"z\", \"Z\"]:\n plane_widget.NormalToZAxisOn()\n plane_widget.SetNormal(NORMALS[\"z\"])\n else:\n raise RuntimeError(\"assign_to_axis not understood\")\n else:\n plane_widget.SetNormal(normal)\n\n plane_widget.Modified()\n plane_widget.UpdatePlacement()\n plane_widget.On()\n plane_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _the_callback)\n if test_callback:\n _the_callback(plane_widget, None) # Trigger immediate update\n\n self.plane_widgets.append(plane_widget)\n return plane_widget\n\n def clear_plane_widgets(self):\n \"\"\"Remove all of the plane widgets.\"\"\"\n self.plane_widgets.clear()\n\n def add_mesh_clip_plane(\n self,\n mesh,\n normal='x',\n invert=False,\n widget_color=None,\n value=0.0,\n assign_to_axis=None,\n tubing=False,\n origin_translation=True,\n outline_translation=False,\n implicit=True,\n normal_rotation=True,\n crinkle=False,\n **kwargs,\n ):\n \"\"\"Clip a mesh using a plane widget.\n\n Add a mesh to the scene with a plane widget that is used to clip\n the mesh interactively.\n\n The clipped mesh is saved to the ``.plane_clipped_meshes``\n attribute on the plotter.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and clip.\n\n normal : str or tuple(float), optional\n The starting normal vector of the plane.\n\n invert : bool, optional\n Flag on whether to flip/invert the clip.\n\n widget_color : color_like, optional\n Either a string, RGB list, or hex color string.\n\n value : float, optional\n Set the clipping value along the normal direction.\n The default value is 0.0.\n\n assign_to_axis : str or int, optional\n Assign the normal of the plane to be parallel with a given\n axis. Options are ``(0, 'x')``, ``(1, 'y')``, or ``(2,\n 'z')``.\n\n tubing : bool, optional\n When using an implicit plane wiget, this controls whether\n or not tubing is shown around the plane's boundaries.\n\n origin_translation : bool, optional\n If ``False``, the plane widget cannot be translated by its\n origin and is strictly placed at the given origin. Only\n valid when using an implicit plane.\n\n outline_translation : bool, optional\n If ``False``, the box widget cannot be translated and is\n strictly placed at the given bounds.\n\n implicit : bool, optional\n When ``True``, a ``vtkImplicitPlaneWidget`` is used and\n when ``False``, a ``vtkPlaneWidget`` is used.\n\n normal_rotation : bool, optional\n Set the opacity of the normal vector arrow to 0 such that\n it is effectively disabled. This prevents the user from\n rotating the normal. This is forced to ``False`` when\n ``assign_to_axis`` is set.\n\n crinkle : bool, optional\n Crinkle the clip by extracting the entire cells along the clip.\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to\n :func:`BasePlotter.add_mesh` to control how the mesh is\n displayed.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the mesh.\n\n \"\"\"\n from pyvista.core.filters import _get_output # avoids circular import\n\n name = kwargs.get('name', mesh.memory_address)\n rng = mesh.get_data_range(kwargs.get('scalars', None))\n kwargs.setdefault('clim', kwargs.pop('rng', rng))\n mesh.set_active_scalars(kwargs.get('scalars', mesh.active_scalars_name))\n\n self.add_mesh(mesh.outline(), name=name + \"outline\", opacity=0.0)\n\n if crinkle:\n mesh.cell_data['cell_ids'] = np.arange(0, mesh.n_cells, dtype=int)\n\n if isinstance(mesh, _vtk.vtkPolyData):\n alg = _vtk.vtkClipPolyData()\n # elif isinstance(mesh, vtk.vtkImageData):\n # alg = vtk.vtkClipVolume()\n # alg.SetMixed3DCellGeneration(True)\n else:\n alg = _vtk.vtkTableBasedClipDataSet()\n alg.SetInputDataObject(mesh) # Use the grid as the data we desire to cut\n alg.SetValue(value)\n alg.SetInsideOut(invert) # invert the clip if needed\n\n plane_clipped_mesh = _get_output(alg)\n self.plane_clipped_meshes.append(plane_clipped_mesh)\n\n def callback(normal, origin):\n function = generate_plane(normal, origin)\n alg.SetClipFunction(function) # the implicit function\n alg.Update() # Perform the Cut\n clipped = pyvista.wrap(alg.GetOutput())\n if crinkle:\n clipped = mesh.extract_cells(np.unique(clipped.cell_data['cell_ids']))\n plane_clipped_mesh.shallow_copy(clipped)\n\n self.add_plane_widget(\n callback=callback,\n bounds=mesh.bounds,\n factor=1.25,\n normal=normal,\n color=widget_color,\n tubing=tubing,\n assign_to_axis=assign_to_axis,\n origin_translation=origin_translation,\n outline_translation=outline_translation,\n implicit=implicit,\n origin=mesh.center,\n normal_rotation=normal_rotation,\n )\n\n return self.add_mesh(plane_clipped_mesh, **kwargs)\n\n def add_mesh_slice(\n self,\n mesh,\n normal='x',\n generate_triangles=False,\n widget_color=None,\n assign_to_axis=None,\n tubing=False,\n origin_translation=True,\n outline_translation=False,\n implicit=True,\n normal_rotation=True,\n **kwargs,\n ):\n \"\"\"Slice a mesh using a plane widget.\n\n Add a mesh to the scene with a plane widget that is used to slice\n the mesh interactively.\n\n The sliced mesh is saved to the ``.plane_sliced_meshes`` attribute on\n the plotter.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and slice.\n\n normal : str or tuple(float), optional\n The starting normal vector of the plane.\n\n generate_triangles : bool, optional\n If this is enabled (``False`` by default), the output will be\n triangles otherwise, the output will be the intersection polygons.\n\n widget_color : color_like, optional\n Either a string, RGB sequence, or hex color string. Defaults\n to ``'white'``.\n\n assign_to_axis : str or int, optional\n Assign the normal of the plane to be parallel with a given axis:\n options are (0, 'x'), (1, 'y'), or (2, 'z').\n\n tubing : bool, optional\n When using an implicit plane wiget, this controls whether or not\n tubing is shown around the plane's boundaries.\n\n origin_translation : bool, optional\n If ``False``, the plane widget cannot be translated by its origin\n and is strictly placed at the given origin. Only valid when using\n an implicit plane.\n\n outline_translation : bool, optional\n If ``False``, the box widget cannot be translated and is strictly\n placed at the given bounds.\n\n implicit : bool, optional\n When ``True``, a ``vtkImplicitPlaneWidget`` is used and when\n ``False``, a ``vtkPlaneWidget`` is used.\n\n normal_rotation : bool, optional\n Set the opacity of the normal vector arrow to 0 such that it is\n effectively disabled. This prevents the user from rotating the\n normal. This is forced to ``False`` when ``assign_to_axis`` is set.\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to\n :func:`BasePlotter.add_mesh` to control how the mesh is\n displayed.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the mesh.\n\n \"\"\"\n name = kwargs.get('name', mesh.memory_address)\n rng = mesh.get_data_range(kwargs.get('scalars', None))\n kwargs.setdefault('clim', kwargs.pop('rng', rng))\n mesh.set_active_scalars(kwargs.get('scalars', mesh.active_scalars_name))\n\n self.add_mesh(mesh.outline(), name=name + \"outline\", opacity=0.0)\n\n alg = _vtk.vtkCutter() # Construct the cutter object\n alg.SetInputDataObject(mesh) # Use the grid as the data we desire to cut\n if not generate_triangles:\n alg.GenerateTrianglesOff()\n\n plane_sliced_mesh = pyvista.wrap(alg.GetOutput())\n self.plane_sliced_meshes.append(plane_sliced_mesh)\n\n def callback(normal, origin):\n # create the plane for clipping\n plane = generate_plane(normal, origin)\n alg.SetCutFunction(plane) # the cutter to use the plane we made\n alg.Update() # Perform the Cut\n plane_sliced_mesh.shallow_copy(alg.GetOutput())\n\n self.add_plane_widget(\n callback=callback,\n bounds=mesh.bounds,\n factor=1.25,\n normal=normal,\n color=widget_color,\n tubing=tubing,\n assign_to_axis=assign_to_axis,\n origin_translation=origin_translation,\n outline_translation=outline_translation,\n implicit=implicit,\n origin=mesh.center,\n normal_rotation=normal_rotation,\n )\n\n return self.add_mesh(plane_sliced_mesh, **kwargs)\n\n def add_mesh_slice_orthogonal(\n self, mesh, generate_triangles=False, widget_color=None, tubing=False, **kwargs\n ):\n \"\"\"Slice a mesh with three interactive planes.\n\n Adds three interactive plane slicing widgets for orthogonal slicing\n along each cartesian axis.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and threshold.\n\n generate_triangles : bool, optional\n If this is enabled (``False`` by default), the output will be\n triangles otherwise, the output will be the intersection polygons.\n\n widget_color : color_like, optional\n Color of the widget. Either a string, RGB sequence, or\n hex color string. For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n tubing : bool, optional\n When using an implicit plane wiget, this controls whether or not\n tubing is shown around the plane's boundaries.\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to\n :func:`BasePlotter.add_mesh` to control how the mesh is\n displayed.\n\n Returns\n -------\n list\n List of vtk.vtkActor(s).\n\n \"\"\"\n actors = []\n for ax in [\"x\", \"y\", \"z\"]:\n a = self.add_mesh_slice(\n mesh,\n assign_to_axis=ax,\n origin_translation=False,\n outline_translation=False,\n generate_triangles=generate_triangles,\n widget_color=widget_color,\n tubing=tubing,\n **kwargs,\n )\n actors.append(a)\n\n return actors\n\n def add_line_widget(\n self,\n callback,\n bounds=None,\n factor=1.25,\n resolution=100,\n color=None,\n use_vertices=False,\n pass_widget=False,\n ):\n \"\"\"Add a line widget to the scene.\n\n This is useless without a callback function. You can pass a\n callable function that takes a single argument, the PolyData\n line output from this widget, and performs a task with that\n line.\n\n Parameters\n ----------\n callback : callable\n The method called every time the line is updated. This has\n two options: Take a single argument, the ``PolyData`` line\n (default) or if ``use_vertices=True``, then it can take\n two arguments of the coordinates of the line's end points.\n\n bounds : tuple(float), optional\n Length 6 tuple of the bounding box where the widget is\n placed.\n\n factor : float, optional\n An inflation factor to expand on the bounds when placing.\n\n resolution : int, optional\n The number of points in the line created.\n\n color : color_like, optional, defaults to white\n Either a string, rgb sequence, or hex color string.\n\n use_vertices : bool, optional\n Changes the arguments of the callback method to take the end\n points of the line instead of a PolyData object.\n\n pass_widget : boollist\n If ``True``, the widget will be passed as the last\n argument of the callback.\n\n Returns\n -------\n vtk.vtkLineWidget\n Created line widget.\n\n \"\"\"\n if bounds is None:\n bounds = self.bounds\n\n color = Color(color, default_color=pyvista.global_theme.font.color)\n\n def _the_callback(widget, event_id):\n pointa = widget.GetPoint1()\n pointb = widget.GetPoint2()\n if callable(callback):\n if use_vertices:\n args = [pointa, pointb]\n else:\n the_line = pyvista.Line(pointa, pointb, resolution=resolution)\n args = [the_line]\n if pass_widget:\n args.append(widget)\n try_callback(callback, *args)\n\n line_widget = _vtk.vtkLineWidget()\n line_widget.GetLineProperty().SetColor(color.float_rgb)\n line_widget.SetInteractor(self.iren.interactor)\n line_widget.SetCurrentRenderer(self.renderer)\n line_widget.SetPlaceFactor(factor)\n line_widget.PlaceWidget(bounds)\n line_widget.SetResolution(resolution)\n line_widget.Modified()\n line_widget.On()\n line_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _the_callback)\n _the_callback(line_widget, None)\n\n self.line_widgets.append(line_widget)\n return line_widget\n\n def clear_line_widgets(self):\n \"\"\"Remove all of the line widgets.\"\"\"\n self.line_widgets.clear()\n\n def add_text_slider_widget(\n self,\n callback,\n data,\n value=None,\n pointa=(0.4, 0.9),\n pointb=(0.9, 0.9),\n color=None,\n event_type='end',\n style=None,\n ):\n \"\"\"Add a text slider bar widget.\n\n This is useless without a callback function. You can pass a callable\n function that takes a single argument, the value of this slider widget,\n and performs a task with that value.\n\n Parameters\n ----------\n callback : callable\n The method called every time the slider is updated. This should take\n a single parameter: the float value of the slider.\n\n data : list\n The list of possible values displayed on the slider bar.\n\n value : float, optional\n The starting value of the slider.\n\n pointa : tuple(float), optional\n The relative coordinates of the left point of the slider on the\n display port.\n\n pointb : tuple(float), optional\n The relative coordinates of the right point of the slider on the\n display port.\n\n color : color_like, optional\n Either a string, RGB list, or hex color string. Defaults\n to :attr:`pyvista.global_theme.font.color\n <pyvista.themes._Font.color>`.\n\n event_type : str, optional\n Either ``'start'``, ``'end'`` or ``'always'``, this\n defines how often the slider interacts with the callback.\n\n style : str, optional\n The name of the slider style. The list of available styles\n are in ``pyvista.global_theme.slider_styles``. Defaults to\n ``None``.\n\n Returns\n -------\n vtk.vtkSliderWidget\n The VTK slider widget configured to display text.\n\n \"\"\"\n if not isinstance(data, list):\n raise TypeError(\n f\"The `data` parameter must be a list but {type(data).__name__} was passed instead\"\n )\n n_states = len(data)\n if n_states == 0:\n raise ValueError(\"The input list of values is empty\")\n delta = (n_states - 1) / float(n_states)\n # avoid division by zero in case there is only one element\n delta = 1 if delta == 0 else delta\n\n def _the_callback(value):\n if isinstance(value, float):\n idx = int(value / delta)\n # handle limit index\n if idx == n_states:\n idx = n_states - 1\n if callable(callback):\n try_callback(callback, data[idx])\n return\n\n slider_widget = self.add_slider_widget(\n callback=_the_callback,\n rng=[0, n_states - 1],\n value=value,\n pointa=pointa,\n pointb=pointb,\n color=color,\n event_type=event_type,\n style=style,\n )\n slider_rep = slider_widget.GetRepresentation()\n slider_rep.ShowSliderLabelOff()\n\n def title_callback(widget, event):\n value = widget.GetRepresentation().GetValue()\n idx = int(value / delta)\n # handle limit index\n if idx == n_states:\n idx = n_states - 1\n slider_rep.SetTitleText(data[idx])\n\n if event_type == 'start':\n slider_widget.AddObserver(_vtk.vtkCommand.StartInteractionEvent, title_callback)\n elif event_type == 'end':\n slider_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, title_callback)\n elif event_type == 'always':\n slider_widget.AddObserver(_vtk.vtkCommand.InteractionEvent, title_callback)\n else:\n raise ValueError(\n \"Expected value for `event_type` is 'start',\"\n f\" 'end' or 'always': {event_type} was given.\"\n )\n title_callback(slider_widget, None)\n return slider_widget\n\n def add_slider_widget(\n self,\n callback,\n rng,\n value=None,\n title=None,\n pointa=(0.4, 0.9),\n pointb=(0.9, 0.9),\n color=None,\n pass_widget=False,\n event_type='end',\n style=None,\n title_height=0.03,\n title_opacity=1.0,\n title_color=None,\n fmt=None,\n ):\n \"\"\"Add a slider bar widget.\n\n This is useless without a callback function. You can pass a\n callable function that takes a single argument, the value of\n this slider widget, and performs a task with that value.\n\n Parameters\n ----------\n callback : callable\n The method called every time the slider is updated. This\n should take a single parameter: the float value of the\n slider.\n\n rng : tuple(float)\n Length two tuple of the minimum and maximum ranges of the\n slider.\n\n value : float, optional\n The starting value of the slider.\n\n title : str, optional\n The string label of the slider widget.\n\n pointa : tuple(float), optional\n The relative coordinates of the left point of the slider\n on the display port.\n\n pointb : tuple(float), optional\n The relative coordinates of the right point of the slider\n on the display port.\n\n color : color_like, optional\n Either a string, RGB list, or hex color string. Defaults\n to :attr:`pyvista.global_theme.font.color\n <pyvista.themes._Font.color>`.\n\n pass_widget : bool, optional\n If ``True``, the widget will be passed as the last\n argument of the callback.\n\n event_type : str, optional\n Either ``'start'``, ``'end'`` or ``'always'``, this\n defines how often the slider interacts with the callback.\n\n style : str, optional\n The name of the slider style. The list of available styles\n are in ``pyvista.global_theme.slider_styles``. Defaults to\n ``None``.\n\n title_height : float, optional\n Relative height of the title as compared to the length of\n the slider.\n\n title_opacity : float, optional\n Opacity of title. Defaults to 1.0.\n\n title_color : color_like, optional\n Either a string, RGB sequence, or hex color string. Defaults\n to the value given in ``color``.\n\n fmt : str, optional\n String formatter used to format numerical data. Defaults\n to ``None``.\n\n Returns\n -------\n vtk.vtkSliderWidget\n Slider widget.\n\n Examples\n --------\n >>> import pyvista as pv\n >>> pl = pv.Plotter()\n >>> def create_mesh(value):\n ... res = int(value)\n ... sphere = pv.Sphere(phi_resolution=res, theta_resolution=res)\n ... pl.add_mesh(sphere, name=\"sphere\", show_edges=True)\n >>> slider = pl.add_slider_widget(\n ... create_mesh,\n ... [5, 100],\n ... title=\"Resolution\",\n ... title_opacity=0.5,\n ... title_color=\"red\",\n ... fmt=\"%0.9f\",\n ... title_height=0.08,\n ... )\n >>> pl.show()\n \"\"\"\n if value is None:\n value = ((rng[1] - rng[0]) / 2) + rng[0]\n\n color = Color(color, default_color=pyvista.global_theme.font.color)\n title_color = Color(title_color, default_color=color)\n\n if fmt is None:\n fmt = pyvista.global_theme.font.fmt\n\n def normalize(point, viewport):\n return (point[0] * (viewport[2] - viewport[0]), point[1] * (viewport[3] - viewport[1]))\n\n pointa = normalize(pointa, self.renderer.GetViewport())\n pointb = normalize(pointb, self.renderer.GetViewport())\n\n slider_rep = _vtk.vtkSliderRepresentation2D()\n slider_rep.SetPickable(False)\n slider_rep.SetMinimumValue(rng[0])\n slider_rep.SetMaximumValue(rng[1])\n slider_rep.SetValue(value)\n slider_rep.SetTitleText(title)\n slider_rep.GetTitleProperty().SetColor(color.float_rgb)\n slider_rep.GetSliderProperty().SetColor(color.float_rgb)\n slider_rep.GetCapProperty().SetColor(color.float_rgb)\n slider_rep.GetLabelProperty().SetColor(color.float_rgb)\n slider_rep.GetTubeProperty().SetColor(color.float_rgb)\n slider_rep.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n slider_rep.GetPoint1Coordinate().SetValue(pointa[0], pointa[1])\n slider_rep.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n slider_rep.GetPoint2Coordinate().SetValue(pointb[0], pointb[1])\n slider_rep.SetSliderLength(0.05)\n slider_rep.SetSliderWidth(0.05)\n slider_rep.SetEndCapLength(0.01)\n\n if style is not None:\n if not isinstance(style, str):\n raise TypeError(\n f\"Expected type for ``style`` is str but {type(style).__name__} was given.\"\n )\n slider_style = getattr(pyvista.global_theme.slider_styles, style)\n slider_rep.SetSliderLength(slider_style.slider_length)\n slider_rep.SetSliderWidth(slider_style.slider_width)\n slider_rep.GetSliderProperty().SetColor(slider_style.slider_color.float_rgb)\n slider_rep.SetTubeWidth(slider_style.tube_width)\n slider_rep.GetTubeProperty().SetColor(slider_style.tube_color.float_rgb)\n slider_rep.GetCapProperty().SetOpacity(slider_style.cap_opacity)\n slider_rep.SetEndCapLength(slider_style.cap_length)\n slider_rep.SetEndCapWidth(slider_style.cap_width)\n\n def _the_callback(widget, event):\n value = widget.GetRepresentation().GetValue()\n if callable(callback):\n if pass_widget:\n try_callback(callback, value, widget)\n else:\n try_callback(callback, value)\n return\n\n slider_widget = _vtk.vtkSliderWidget()\n slider_widget.SetInteractor(self.iren.interactor)\n slider_widget.SetCurrentRenderer(self.renderer)\n slider_widget.SetRepresentation(slider_rep)\n slider_widget.GetRepresentation().SetTitleHeight(title_height)\n slider_widget.GetRepresentation().GetTitleProperty().SetOpacity(title_opacity)\n slider_widget.GetRepresentation().GetTitleProperty().SetColor(title_color.float_rgb)\n if fmt is not None:\n slider_widget.GetRepresentation().SetLabelFormat(fmt)\n slider_widget.On()\n if not isinstance(event_type, str):\n raise TypeError(f\"Expected type for `event_type` is str: {type(event_type)} was given.\")\n if event_type == 'start':\n slider_widget.AddObserver(_vtk.vtkCommand.StartInteractionEvent, _the_callback)\n elif event_type == 'end':\n slider_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _the_callback)\n elif event_type == 'always':\n slider_widget.AddObserver(_vtk.vtkCommand.InteractionEvent, _the_callback)\n else:\n raise ValueError(\n \"Expected value for `event_type` is 'start',\"\n f\" 'end' or 'always': {event_type} was given.\"\n )\n _the_callback(slider_widget, None)\n\n self.slider_widgets.append(slider_widget)\n return slider_widget\n\n def clear_slider_widgets(self):\n \"\"\"Remove all of the slider widgets.\"\"\"\n self.slider_widgets.clear()\n\n def add_mesh_threshold(\n self,\n mesh,\n scalars=None,\n invert=False,\n widget_color=None,\n preference='cell',\n title=None,\n pointa=(0.4, 0.9),\n pointb=(0.9, 0.9),\n continuous=False,\n **kwargs,\n ):\n \"\"\"Apply a threshold on a mesh with a slider.\n\n Add a mesh to the scene with a slider widget that is used to\n threshold the mesh interactively.\n\n The threshold mesh is saved to the ``.threshold_meshes`` attribute on\n the plotter.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and threshold.\n\n scalars : str, optional\n The string name of the scalars on the mesh to threshold and display.\n\n invert : bool, optional\n Invert (flip) the threshold.\n\n widget_color : color_like, optional\n Color of the widget. Either a string, RGB sequence, or\n hex color string. For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n preference : str, optional\n When ``mesh.n_points == mesh.n_cells`` and setting\n scalars, this parameter sets how the scalars will be\n mapped to the mesh. Default ``'points'``, causes the\n scalars will be associated with the mesh points. Can be\n either ``'points'`` or ``'cells'``.\n\n title : str, optional\n The string label of the slider widget.\n\n pointa : sequence, optional\n The relative coordinates of the left point of the slider\n on the display port.\n\n pointb : sequence, optional\n The relative coordinates of the right point of the slider\n on the display port.\n\n continuous : bool, optional\n If this is enabled (default is ``False``), use the continuous\n interval ``[minimum cell scalar, maximum cell scalar]``\n to intersect the threshold bound, rather than the set of\n discrete scalar values from the vertices.\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to ``add_mesh`` to\n control how the mesh is displayed.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the mesh.\n\n \"\"\"\n if isinstance(mesh, pyvista.MultiBlock):\n raise TypeError('MultiBlock datasets are not supported for threshold widget.')\n name = kwargs.get('name', mesh.memory_address)\n if scalars is None:\n field, scalars = mesh.active_scalars_info\n arr = get_array(mesh, scalars, preference=preference)\n if arr is None:\n raise ValueError('No arrays present to threshold.')\n field = get_array_association(mesh, scalars, preference=preference)\n\n rng = mesh.get_data_range(scalars)\n kwargs.setdefault('clim', kwargs.pop('rng', rng))\n if title is None:\n title = scalars\n mesh.set_active_scalars(scalars)\n\n self.add_mesh(mesh.outline(), name=name + \"outline\", opacity=0.0)\n\n alg = _vtk.vtkThreshold()\n alg.SetInputDataObject(mesh)\n alg.SetInputArrayToProcess(\n 0, 0, 0, field.value, scalars\n ) # args: (idx, port, connection, field, name)\n alg.SetUseContinuousCellRange(continuous)\n\n threshold_mesh = pyvista.wrap(alg.GetOutput())\n self.threshold_meshes.append(threshold_mesh)\n\n def callback(value):\n if invert:\n alg.ThresholdByLower(value)\n else:\n alg.ThresholdByUpper(value)\n alg.Update()\n threshold_mesh.shallow_copy(alg.GetOutput())\n\n self.add_slider_widget(\n callback=callback,\n rng=rng,\n title=title,\n color=widget_color,\n pointa=pointa,\n pointb=pointb,\n )\n\n kwargs.setdefault(\"reset_camera\", False)\n return self.add_mesh(threshold_mesh, scalars=scalars, **kwargs)\n\n def add_mesh_isovalue(\n self,\n mesh,\n scalars=None,\n compute_normals=False,\n compute_gradients=False,\n compute_scalars=True,\n preference='point',\n title=None,\n pointa=(0.4, 0.9),\n pointb=(0.9, 0.9),\n widget_color=None,\n **kwargs,\n ):\n \"\"\"Create a contour of a mesh with a slider.\n\n Add a mesh to the scene with a slider widget that is used to\n contour at an isovalue of the *point* data on the mesh\n interactively.\n\n The isovalue mesh is saved to the ``.isovalue_meshes``\n attribute on the plotter.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and contour.\n\n scalars : str, optional\n The string name of the scalars on the mesh to contour and display.\n\n compute_normals : bool, optional\n Enable or disable the computation of normals. If the\n output data will be processed by filters that modify\n topology or geometry, it may be wise to disable computing\n normals.\n\n compute_gradients : bool, optional\n Enable or disable the computation of gradients. If the\n output data will be processed by filters that modify\n topology or geometry, it may be wise to disable computing\n gradients.\n\n compute_scalars : bool, optional\n Enable or disable the computation of scalars.\n\n preference : str, optional\n When ``mesh.n_points == mesh.n_cells`` and setting\n scalars, this parameter sets how the scalars will be\n mapped to the mesh. Default ``'points'``, causes the\n scalars will be associated with the mesh points. Can be\n either ``'points'`` or ``'cells'``.\n\n title : str, optional\n The string label of the slider widget.\n\n pointa : sequence, optional\n The relative coordinates of the left point of the slider\n on the display port.\n\n pointb : sequence\n The relative coordinates of the right point of the slider\n on the display port.\n\n widget_color : color_like, optional\n Color of the widget. Either a string, RGB sequence, or\n hex color string. For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to\n :func:`BasePlotter.add_mesh` to control how the mesh is\n displayed.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the mesh.\n\n \"\"\"\n if isinstance(mesh, pyvista.MultiBlock):\n raise TypeError('MultiBlock datasets are not supported for this widget.')\n name = kwargs.get('name', mesh.memory_address)\n # set the array to contour on\n if mesh.n_arrays < 1:\n raise ValueError('Input dataset for the contour filter must have data arrays.')\n if scalars is None:\n field, scalars = mesh.active_scalars_info\n else:\n field = get_array_association(mesh, scalars, preference=preference)\n # NOTE: only point data is allowed? well cells works but seems buggy?\n if field != pyvista.FieldAssociation.POINT:\n raise TypeError(\n f'Contour filter only works on Point data. Array ({scalars}) is in the Cell data.'\n )\n\n rng = mesh.get_data_range(scalars)\n kwargs.setdefault('clim', kwargs.pop('rng', rng))\n if title is None:\n title = scalars\n mesh.set_active_scalars(scalars)\n\n alg = _vtk.vtkContourFilter()\n alg.SetInputDataObject(mesh)\n alg.SetComputeNormals(compute_normals)\n alg.SetComputeGradients(compute_gradients)\n alg.SetComputeScalars(compute_scalars)\n alg.SetInputArrayToProcess(0, 0, 0, field.value, scalars)\n alg.SetNumberOfContours(1) # Only one contour level\n\n self.add_mesh(mesh.outline(), name=name + \"outline\", opacity=0.0)\n\n isovalue_mesh = pyvista.wrap(alg.GetOutput())\n self.isovalue_meshes.append(isovalue_mesh)\n\n def callback(value):\n alg.SetValue(0, value)\n alg.Update()\n isovalue_mesh.shallow_copy(alg.GetOutput())\n\n self.add_slider_widget(\n callback=callback,\n rng=rng,\n title=title,\n color=widget_color,\n pointa=pointa,\n pointb=pointb,\n )\n\n kwargs.setdefault(\"reset_camera\", False)\n return self.add_mesh(isovalue_mesh, scalars=scalars, **kwargs)\n\n def add_spline_widget(\n self,\n callback,\n bounds=None,\n factor=1.25,\n n_handles=5,\n resolution=25,\n color=\"yellow\",\n show_ribbon=False,\n ribbon_color=\"pink\",\n ribbon_opacity=0.5,\n pass_widget=False,\n closed=False,\n initial_points=None,\n ):\n \"\"\"Create and add a spline widget to the scene.\n\n Use the bounds argument to place this widget. Several \"handles\" are\n used to control a parametric function for building this spline. Click\n directly on the line to translate the widget.\n\n Parameters\n ----------\n callback : callable\n The method called every time the spline is updated. This passes a\n :class:`pyvista.PolyData` object to the callback function of the\n generated spline.\n\n bounds : tuple(float), optional\n Length 6 tuple of the bounding box where the widget is placed.\n\n factor : float, optional\n An inflation factor to expand on the bounds when placing.\n\n n_handles : int, optional\n The number of interactive spheres to control the spline's\n parametric function.\n\n resolution : int, optional\n The number of points in the spline created between all the handles.\n\n color : color_like, optional\n Either a string, RGB sequence, or hex color string.\n\n show_ribbon : bool, optional\n If ``True``, the poly plane used for slicing will also be shown.\n\n ribbon_color : color_like, optional\n Color of the ribbon. Either a string, RGB sequence, or\n hex color string.\n\n ribbon_opacity : float, optional\n Opacity of ribbon. Defaults to 1.0 and must be between\n ``[0, 1]``.\n\n pass_widget : bool, optional\n If ``True``, the widget will be passed as the last argument of the\n callback.\n\n closed : bool, optional\n Make the spline a closed loop.\n\n initial_points : sequence, optional\n The points to initialize the widget placement. Must have\n same number of elements as ``n_handles``. If the first and\n last point are the same, this will be a closed loop\n spline.\n\n Returns\n -------\n vtk.vtkSplineWidget\n The newly created spline widget.\n\n Notes\n -----\n This widget has trouble displaying certain colors. Use only simple\n colors (white, black, yellow).\n\n \"\"\"\n if initial_points is not None and len(initial_points) != n_handles:\n raise ValueError(\"`initial_points` must be length `n_handles`.\")\n\n color = Color(color, default_color=pyvista.global_theme.color)\n\n if bounds is None:\n bounds = self.bounds\n\n ribbon = pyvista.PolyData()\n\n def _the_callback(widget, event_id):\n para_source = _vtk.vtkParametricFunctionSource()\n para_source.SetParametricFunction(widget.GetParametricSpline())\n para_source.Update()\n polyline = pyvista.wrap(para_source.GetOutput())\n ribbon.shallow_copy(polyline.ribbon(normal=(0, 0, 1), angle=90.0))\n if callable(callback):\n if pass_widget:\n try_callback(callback, polyline, widget)\n else:\n try_callback(callback, polyline)\n return\n\n spline_widget = _vtk.vtkSplineWidget()\n spline_widget.GetLineProperty().SetColor(color.float_rgb)\n spline_widget.SetNumberOfHandles(n_handles)\n spline_widget.SetInteractor(self.iren.interactor)\n spline_widget.SetCurrentRenderer(self.renderer)\n spline_widget.SetPlaceFactor(factor)\n spline_widget.PlaceWidget(bounds)\n spline_widget.SetResolution(resolution)\n if initial_points is not None:\n spline_widget.InitializeHandles(pyvista.vtk_points((initial_points)))\n else:\n spline_widget.SetClosed(closed)\n spline_widget.Modified()\n spline_widget.On()\n spline_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _the_callback)\n _the_callback(spline_widget, None)\n\n if show_ribbon:\n self.add_mesh(ribbon, color=ribbon_color, opacity=ribbon_opacity)\n\n self.spline_widgets.append(spline_widget)\n return spline_widget\n\n def clear_spline_widgets(self):\n \"\"\"Remove all of the spline widgets.\"\"\"\n self.spline_widgets.clear()\n\n def add_mesh_slice_spline(\n self,\n mesh,\n generate_triangles=False,\n n_handles=5,\n resolution=25,\n widget_color=None,\n show_ribbon=False,\n ribbon_color=\"pink\",\n ribbon_opacity=0.5,\n initial_points=None,\n closed=False,\n **kwargs,\n ):\n \"\"\"Slice a mesh with a spline widget.\n\n Add a mesh to the scene with a spline widget that is used to slice\n the mesh interactively.\n\n The sliced mesh is saved to the ``.spline_sliced_meshes`` attribute on\n the plotter.\n\n Parameters\n ----------\n mesh : pyvista.DataSet\n The input dataset to add to the scene and slice along the spline.\n\n generate_triangles : bool, optional\n If this is enabled (``False`` by default), the output will be\n triangles otherwise, the output will be the intersection polygons.\n\n n_handles : int, optional\n The number of interactive spheres to control the spline's\n parametric function.\n\n resolution : int, optional\n The number of points to generate on the spline.\n\n widget_color : color_like, optional\n Color of the widget. Either a string, RGB sequence, or\n hex color string. For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n show_ribbon : bool, optional\n If ``True``, the poly plane used for slicing will also be shown.\n\n ribbon_color : color_like, optional\n Color of the ribbon. Either a string, RGB sequence, or\n hex color string.\n\n ribbon_opacity : float, optional\n Opacity of ribbon. Defaults to 1.0 and must be between\n ``[0, 1]``.\n\n initial_points : sequence, optional\n The points to initialize the widget placement. Must have same\n number of elements as ``n_handles``. If the first and last point\n are the same, this will be a closed loop spline.\n\n closed : bool, optional\n Make the spline a closed loop.\n\n **kwargs : dict, optional\n All additional keyword arguments are passed to\n :func:`BasePlotter.add_mesh` to control how the mesh is\n displayed.\n\n Returns\n -------\n vtk.vtkActor\n VTK actor of the mesh.\n\n \"\"\"\n name = kwargs.get('name', mesh.memory_address)\n rng = mesh.get_data_range(kwargs.get('scalars', None))\n kwargs.setdefault('clim', kwargs.pop('rng', rng))\n mesh.set_active_scalars(kwargs.get('scalars', mesh.active_scalars_name))\n\n self.add_mesh(mesh.outline(), name=name + \"outline\", opacity=0.0)\n\n alg = _vtk.vtkCutter() # Construct the cutter object\n alg.SetInputDataObject(mesh) # Use the grid as the data we desire to cut\n if not generate_triangles:\n alg.GenerateTrianglesOff()\n\n spline_sliced_mesh = pyvista.wrap(alg.GetOutput())\n self.spline_sliced_meshes.append(spline_sliced_mesh)\n\n def callback(spline):\n polyline = spline.GetCell(0)\n # create the plane for clipping\n polyplane = _vtk.vtkPolyPlane()\n polyplane.SetPolyLine(polyline)\n alg.SetCutFunction(polyplane) # the cutter to use the poly planes\n alg.Update() # Perform the Cut\n spline_sliced_mesh.shallow_copy(alg.GetOutput())\n\n self.add_spline_widget(\n callback=callback,\n bounds=mesh.bounds,\n factor=1.25,\n color=widget_color,\n n_handles=n_handles,\n resolution=resolution,\n show_ribbon=show_ribbon,\n ribbon_color=ribbon_color,\n ribbon_opacity=ribbon_opacity,\n initial_points=initial_points,\n closed=closed,\n )\n\n return self.add_mesh(spline_sliced_mesh, **kwargs)\n\n def add_sphere_widget(\n self,\n callback,\n center=(0, 0, 0),\n radius=0.5,\n theta_resolution=30,\n phi_resolution=30,\n color=None,\n style=\"surface\",\n selected_color=\"pink\",\n indices=None,\n pass_widget=False,\n test_callback=True,\n ):\n \"\"\"Add one or many sphere widgets to a scene.\n\n Use a sphere widget to control a vertex location.\n\n Parameters\n ----------\n callback : callable\n The function to call back when the widget is modified. It\n takes a single argument: the center of the sphere as an\n XYZ coordinate (a 3-length sequence). If multiple centers\n are passed in the ``center`` parameter, the callback must\n also accept an index of that widget.\n\n center : tuple(float), optional\n Length 3 array for the XYZ coordinate of the sphere's\n center when placing it in the scene. If more than one\n location is passed, then that many widgets will be added\n and the callback will also be passed the integer index of\n that widget.\n\n radius : float, optional\n The radius of the sphere.\n\n theta_resolution : int, optional\n Set the number of points in the longitude direction.\n\n phi_resolution : int, optional\n Set the number of points in the latitude direction.\n\n color : color_like, optional\n The color of the sphere's surface. If multiple centers\n are passed, then this must be a list of colors. Each\n color is either a string, rgb list, or hex color string.\n For example:\n\n * ``color='white'``\n * ``color='w'``\n * ``color=[1.0, 1.0, 1.0]``\n * ``color='#FFFFFF'``\n\n style : str, optional\n Representation style: ``'surface'`` or ``'wireframe'``.\n\n selected_color : color_like, optional\n Color of the widget when selected during interaction.\n\n indices : sequence, optional\n Indices to assign the sphere widgets.\n\n pass_widget : bool, optional\n If ``True``, the widget will be passed as the last\n argument of the callback.\n\n test_callback : bool, optional\n If ``True``, run the callback function after the widget is\n created.\n\n Returns\n -------\n vtk.vtkSphereWidget\n The sphere widget.\n\n \"\"\"\n if color is None:\n color = pyvista.global_theme.color.float_rgb\n selected_color = Color(selected_color)\n\n center = np.array(center)\n num = 1\n if center.ndim > 1:\n num = len(center)\n\n if isinstance(color, (list, tuple, np.ndarray)):\n if len(color) == num and not isinstance(color[0], float):\n colors = color\n else:\n colors = [color] * num\n else:\n colors = [color] * num\n\n def _the_callback(widget, event_id):\n point = widget.GetCenter()\n index = widget.WIDGET_INDEX\n if callable(callback):\n if num > 1:\n args = [point, index]\n else:\n args = [point]\n if pass_widget:\n args.append(widget)\n try_callback(callback, *args)\n return\n\n if indices is None:\n indices = [x for x in range(num)]\n\n for i in range(num):\n if center.ndim > 1:\n loc = center[i]\n else:\n loc = center\n sphere_widget = _vtk.vtkSphereWidget()\n sphere_widget.WIDGET_INDEX = indices[i] # Monkey patch the index\n if style in \"wireframe\":\n sphere_widget.SetRepresentationToWireframe()\n else:\n sphere_widget.SetRepresentationToSurface()\n sphere_widget.GetSphereProperty().SetColor(Color(colors[i]).float_rgb)\n sphere_widget.GetSelectedSphereProperty().SetColor(selected_color.float_rgb)\n sphere_widget.SetInteractor(self.iren.interactor)\n sphere_widget.SetCurrentRenderer(self.renderer)\n sphere_widget.SetRadius(radius)\n sphere_widget.SetCenter(loc)\n sphere_widget.SetThetaResolution(theta_resolution)\n sphere_widget.SetPhiResolution(phi_resolution)\n sphere_widget.Modified()\n sphere_widget.On()\n sphere_widget.AddObserver(_vtk.vtkCommand.EndInteractionEvent, _the_callback)\n self.sphere_widgets.append(sphere_widget)\n\n if test_callback is True:\n # Test call back in the last\n _the_callback(sphere_widget, None)\n if num > 1:\n return self.sphere_widgets\n\n return sphere_widget\n\n def clear_sphere_widgets(self):\n \"\"\"Remove all of the sphere widgets.\"\"\"\n self.sphere_widgets.clear()\n\n def add_checkbox_button_widget(\n self,\n callback,\n value=False,\n position=(10.0, 10.0),\n size=50,\n border_size=5,\n color_on='blue',\n color_off='grey',\n background_color='white',\n ):\n \"\"\"Add a checkbox button widget to the scene.\n\n This is useless without a callback function. You can pass a callable\n function that takes a single argument, the state of this button widget\n and performs a task with that value.\n\n Parameters\n ----------\n callback : callable\n The method called every time the button is clicked. This should take\n a single parameter: the bool value of the button.\n\n value : bool, optional\n The default state of the button.\n\n position : tuple(float), optional\n The absolute coordinates of the bottom left point of the button.\n\n size : int, optional\n The size of the button in number of pixels.\n\n border_size : int, optional\n The size of the borders of the button in pixels.\n\n color_on : color_like, optional\n The color used when the button is checked. Default is ``'blue'``.\n\n color_off : color_like, optional\n The color used when the button is not checked. Default is ``'grey'``.\n\n background_color : color_like, optional\n The background color of the button. Default is ``'white'``.\n\n Returns\n -------\n vtk.vtkButtonWidget\n The VTK button widget configured as a checkbox button.\n\n Examples\n --------\n The following example generates a static image of the widget.\n\n >>> import pyvista as pv\n >>> mesh = pv.Sphere()\n >>> p = pv.Plotter()\n >>> actor = p.add_mesh(mesh)\n >>> def toggle_vis(flag):\n ... actor.SetVisibility(flag)\n >>> _ = p.add_checkbox_button_widget(toggle_vis, value=True)\n >>> p.show()\n\n Download the interactive example at :ref:`checkbox_widget_example`.\n\n \"\"\"\n\n def create_button(color1, color2, color3, dims=(size, size, 1)):\n color1 = np.array(Color(color1).int_rgb)\n color2 = np.array(Color(color2).int_rgb)\n color3 = np.array(Color(color3).int_rgb)\n\n n_points = dims[0] * dims[1]\n button = pyvista.UniformGrid(dims=dims)\n arr = np.array([color1] * n_points).reshape(dims[0], dims[1], 3) # fill with color1\n arr[1 : dims[0] - 1, 1 : dims[1] - 1] = color2 # apply color2\n arr[\n border_size : dims[0] - border_size, border_size : dims[1] - border_size\n ] = color3 # apply color3\n button.point_data['texture'] = arr.reshape(n_points, 3).astype(np.uint8)\n return button\n\n button_on = create_button(color_on, background_color, color_on)\n button_off = create_button(color_on, background_color, color_off)\n\n bounds = [position[0], position[0] + size, position[1], position[1] + size, 0.0, 0.0]\n\n button_rep = _vtk.vtkTexturedButtonRepresentation2D()\n button_rep.SetNumberOfStates(2)\n button_rep.SetState(value)\n button_rep.SetButtonTexture(0, button_off)\n button_rep.SetButtonTexture(1, button_on)\n button_rep.SetPlaceFactor(1)\n button_rep.PlaceWidget(bounds)\n\n button_widget = _vtk.vtkButtonWidget()\n button_widget.SetInteractor(self.iren.interactor)\n button_widget.SetRepresentation(button_rep)\n button_widget.SetCurrentRenderer(self.renderer)\n button_widget.On()\n\n def _the_callback(widget, event):\n state = widget.GetRepresentation().GetState()\n if callable(callback):\n try_callback(callback, bool(state))\n\n button_widget.AddObserver(_vtk.vtkCommand.StateChangedEvent, _the_callback)\n self.button_widgets.append(button_widget)\n return button_widget\n\n def add_camera_orientation_widget(self, animate=True, n_frames=20):\n \"\"\"Add a camera orientation widget to the active renderer.\n\n .. note::\n This widget requires ``vtk>=9.1.0``.\n\n Parameters\n ----------\n animate : bool, optional\n Enable or disable jump-to-axis-view animation.\n n_frames : int, optional\n The number of frames to animate the jump-to-axis-viewpoint feature.\n\n Returns\n -------\n vtkCameraOrientationWidget\n Camera orientation widget.\n\n Examples\n --------\n Add a camera orientation widget to the scene.\n\n >>> import pyvista\n >>> mesh = pyvista.Cube()\n >>> plotter = pyvista.Plotter()\n >>> _ = plotter.add_mesh(mesh, scalars=range(6), show_scalar_bar=False)\n >>> _ = plotter.add_camera_orientation_widget()\n >>> plotter.show()\n\n \"\"\"\n widget = _vtk.lazy_vtkCameraOrientationWidget()\n widget.SetParentRenderer(self.renderer)\n widget.SetAnimate(animate)\n widget.SetAnimatorTotalFrames(n_frames)\n widget.On()\n self.camera_widgets.append(widget)\n return widget\n\n def clear_camera_widgets(self):\n \"\"\"Remove all of the camera widgets.\"\"\"\n self.camera_widgets.clear()\n\n def clear_button_widgets(self):\n \"\"\"Remove all of the button widgets.\"\"\"\n self.button_widgets.clear()\n\n def close(self):\n \"\"\"Close the widgets.\"\"\"\n self.clear_box_widgets()\n self.clear_plane_widgets()\n self.clear_line_widgets()\n self.clear_slider_widgets()\n self.clear_sphere_widgets()\n self.clear_spline_widgets()\n self.clear_button_widgets()\n self.clear_camera_widgets()\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mlomnitz/deep_avsr | [
"01fe2fe6b25a3968b25d8aca861a592ef62561b2"
] | [
"audio_visual/test.py"
] | [
"\"\"\"\nAuthor: Smeet Shah\nFile part of 'deep_avsr' GitHub repository available at -\nhttps://github.com/LordMartian/deep_avsr\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\nfrom config import args\nfrom models.av_net import AVNet\nfrom models.lrs2_char_lm import LRS2CharLM\nfrom data.lrs2_dataset import LRS2Main\nfrom data.utils import collate_fn\nfrom utils.general import evaluate\n\n\n\nnp.random.seed(args[\"SEED\"])\ntorch.manual_seed(args[\"SEED\"])\ngpuAvailable = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if gpuAvailable else \"cpu\")\nkwargs = {\"num_workers\":args[\"NUM_WORKERS\"], \"pin_memory\":True} if gpuAvailable else {}\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\n#declaring the test dataset and test dataloader\naudioParams = {\"stftWindow\":args[\"STFT_WINDOW\"], \"stftWinLen\":args[\"STFT_WIN_LENGTH\"], \"stftOverlap\":args[\"STFT_OVERLAP\"]}\nvideoParams = {\"videoFPS\":args[\"VIDEO_FPS\"]}\nif args[\"TEST_DEMO_NOISY\"]:\n noiseParams = {\"noiseFile\":args[\"DATA_DIRECTORY\"] + \"/noise.wav\", \"noiseProb\":1, \"noiseSNR\":args[\"NOISE_SNR_DB\"]}\nelse:\n noiseParams = {\"noiseFile\":args[\"DATA_DIRECTORY\"] + \"/noise.wav\", \"noiseProb\":0, \"noiseSNR\":args[\"NOISE_SNR_DB\"]}\ntestData = LRS2Main(\"test\", args[\"DATA_DIRECTORY\"], args[\"MAIN_REQ_INPUT_LENGTH\"], args[\"CHAR_TO_INDEX\"], args[\"STEP_SIZE\"],\n audioParams, videoParams, noiseParams)\ntestLoader = DataLoader(testData, batch_size=args[\"BATCH_SIZE\"], collate_fn=collate_fn, shuffle=True, **kwargs)\n\n\nif args[\"TRAINED_MODEL_FILE\"] is not None:\n\n print(\"\\nTrained Model File: %s\" %(args[\"TRAINED_MODEL_FILE\"]))\n\n #declaring the model,loss function and loading the trained model weights\n model = AVNet(args[\"TX_NUM_FEATURES\"], args[\"TX_ATTENTION_HEADS\"], args[\"TX_NUM_LAYERS\"], args[\"PE_MAX_LENGTH\"],\n args[\"AUDIO_FEATURE_SIZE\"], args[\"TX_FEEDFORWARD_DIM\"], args[\"TX_DROPOUT\"], args[\"NUM_CLASSES\"])\n model.load_state_dict(torch.load(args[\"CODE_DIRECTORY\"] + args[\"TRAINED_MODEL_FILE\"], map_location=device))\n model.to(device)\n loss_function = nn.CTCLoss(blank=0, zero_infinity=False)\n\n\n #declaring the language model\n lm = LRS2CharLM()\n lm.load_state_dict(torch.load(args[\"TRAINED_LM_FILE\"], map_location=device))\n lm.to(device)\n if not args[\"USE_LM\"]:\n lm = None\n\n\n print(\"\\nTesting the trained model .... \\n\")\n\n beamSearchParams = {\"beamWidth\":args[\"BEAM_WIDTH\"], \"alpha\":args[\"LM_WEIGHT_ALPHA\"], \"beta\":args[\"LENGTH_PENALTY_BETA\"],\n \"threshProb\":args[\"THRESH_PROBABILITY\"]}\n if args[\"TEST_DEMO_MODE\"] == \"AO\":\n testParams = {\"decodeScheme\":args[\"TEST_DEMO_DECODING\"], \"beamSearchParams\":beamSearchParams, \"spaceIx\":args[\"CHAR_TO_INDEX\"][\" \"],\n \"eosIx\":args[\"CHAR_TO_INDEX\"][\"<EOS>\"], \"lm\":lm, \"aoProb\":1, \"voProb\":0}\n elif args[\"TEST_DEMO_MODE\"] == \"VO\":\n testParams = {\"decodeScheme\":args[\"TEST_DEMO_DECODING\"], \"beamSearchParams\":beamSearchParams, \"spaceIx\":args[\"CHAR_TO_INDEX\"][\" \"],\n \"eosIx\":args[\"CHAR_TO_INDEX\"][\"<EOS>\"], \"lm\":lm, \"aoProb\":0, \"voProb\":1}\n elif args[\"TEST_DEMO_MODE\"] == \"AV\":\n testParams = {\"decodeScheme\":args[\"TEST_DEMO_DECODING\"], \"beamSearchParams\":beamSearchParams, \"spaceIx\":args[\"CHAR_TO_INDEX\"][\" \"],\n \"eosIx\":args[\"CHAR_TO_INDEX\"][\"<EOS>\"], \"lm\":lm, \"aoProb\":0, \"voProb\":0}\n else:\n print(\"Invalid Operation Mode.\")\n exit()\n\n #evaluating the model over the test set\n testLoss, testCER, testWER = evaluate(model, testLoader, loss_function, device, testParams)\n\n #printing the test set loss, CER and WER\n print(\"Test Loss: %.6f || Test CER: %.3f || Test WER: %.3f\" %(testLoss, testCER, testWER))\n print(\"\\nTesting Done.\\n\")\n\n\nelse:\n print(\"Path to the trained model file not specified.\\n\")\n"
] | [
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.CTCLoss",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhang-rongchen/Logo-Retrieval-in-Commercial-Plaza | [
"863fd98ff926f2b5814fc4cbf3fb5d06c5ec3913"
] | [
"yolo3/utils.py"
] | [
"\"\"\"Miscellaneous utility functions.\"\"\"\n\nfrom functools import reduce\nimport cv2\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\ndef compose(*funcs):\n \"\"\"Compose arbitrarily many functions, evaluated left to right.\n\n Reference: https://mathieularose.com/function-composition-in-python/\n \"\"\"\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')\n\ndef letterbox_image(image, size):\n '''resize image with unchanged aspect ratio using padding'''\n (ih, iw, _) = image.shape\n w, h = size\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)\n new_image = cv2.copyMakeBorder(image, (h-nh)//2, (h-nh)-(h-nh)//2, (w-nw)//2, (w-nw)-(w-nw)//2,\n cv2.BORDER_CONSTANT, value=(128, 128, 128))\n return new_image\n\ndef rand(a=0, b=1):\n return np.random.rand()*(b-a) + a\n\ndef get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):\n '''random preprocessing for real-time data augmentation'''\n line = annotation_line.split()\n image = cv2.imread(line[0])\n (ih, iw, _) = image.shape\n h, w = input_shape\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n\n if not random:\n # resize image\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n dx = (w-nw)//2\n dy = (h-nh)//2\n image_data=0\n if proc_img:\n image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)\n new_image = cv2.copyMakeBorder(image, dy, (h - nh) - dy, dx,\n (w - nw) - dx,\n cv2.BORDER_CONSTANT, value=(128, 128, 128))\n image_data = np.array(new_image)/255.\n\n # correct boxes\n box_data = np.zeros((max_boxes,5))\n if len(box)>0:\n np.random.shuffle(box)\n if len(box)>max_boxes: box = box[:max_boxes]\n box[:, [0,2]] = box[:, [0,2]]*scale + dx\n box[:, [1,3]] = box[:, [1,3]]*scale + dy\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n # resize image\n new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)\n\n # place image\n dx = int(rand(0, w-nw))\n dy = int(rand(0, h-nh))\n\n img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(img, (dx, dy))\n image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)\n\n # flip image or not\n flip = rand()<.5\n if flip: image = cv2.flip(image,1,dst=None)\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\n x = rgb_to_hsv(np.array(image)/255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0]>1] -= 1\n x[..., 0][x[..., 0]<0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x>1] = 1\n x[x<0] = 0\n image_data = hsv_to_rgb(x) # numpy array, 0 to 1\n\n # correct boxes\n box_data = np.zeros((max_boxes,5))\n if len(box)>0:\n np.random.shuffle(box)\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\n box[:, 0:2][box[:, 0:2]<0] = 0\n box[:, 2][box[:, 2]>w] = w\n box[:, 3][box[:, 3]>h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\n if len(box)>max_boxes: box = box[:max_boxes]\n box_data[:len(box)] = box\n\n return image_data, box_data\n"
] | [
[
"numpy.logical_and",
"matplotlib.colors.hsv_to_rgb",
"numpy.asarray",
"numpy.random.shuffle",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Artamus/label-propagation | [
"e4dbe1be99d94178415f086b0a3f87bb6ededfe1"
] | [
"src/get_label_frequency.py"
] | [
"import argparse\nimport pypcd\nimport os\nimport numpy as np\nfrom cityscapes_classes import id_to_name\n\n\ndef read_pointcloud_file(pointcloud_file_path):\n return pypcd.PointCloud.from_path(pointcloud_file_path)\n\n\ndef input_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n 'input', type=str,\n help='Path to directory containing labelled pointcloud files'\n )\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = input_arguments()\n\n input_dir = args.input\n\n pointcloud_files = os.listdir(input_dir)\n pointcloud_files = [\n filename for filename in pointcloud_files if filename.endswith('.pcd')]\n\n labels = []\n\n for filename in pointcloud_files:\n pointcloud_file_path = os.path.join(input_dir, filename)\n pointcloud = read_pointcloud_file(pointcloud_file_path)\n\n label = pointcloud.pc_data['label'].astype(np.int32)\n labels.extend(label)\n\n labels = np.array(labels)\n unique, counts = np.unique(labels, return_counts=True)\n unique_names = map(id_to_name, unique)\n print('Unique labels: {:s}'.format(', '.join(unique_names)))\n print('Counts: ')\n print(np.asarray((unique, counts)).T)\n"
] | [
[
"numpy.asarray",
"numpy.array",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
diazshejo/Friday_IA | [
"2d6cd2f54fda7b2935eaacc84df2cf4277eac762"
] | [
"tensorflow-conexion/tensorflow/Raspberry/label_image/label_image.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n#sdiaz\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\n\ndef read_tensor_from_image_file(file_name,\n input_height=299,\n input_width=299,\n input_mean=0,\n input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(\n file_reader, channels=3, name=\"png_reader\")\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(\n tf.image.decode_gif(file_reader, name=\"gif_reader\"))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name=\"bmp_reader\")\n else:\n image_reader = tf.image.decode_jpeg(\n file_reader, channels=3, name=\"jpeg_reader\")\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0)\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\n\nif __name__ == \"__main__\":\n file_name = \"tensorflow/examples/label_image/data/grace_hopper.jpg\"\n model_file = \\\n \"tensorflow/examples/label_image/data/inception_v3_2016_08_28_frozen.pb\"\n label_file = \"tensorflow/examples/label_image/data/imagenet_slim_labels.txt\"\n input_height = 299\n input_width = 299\n input_mean = 0\n input_std = 255\n input_layer = \"input\"\n output_layer = \"InceptionV3/Predictions/Reshape_1\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.image:\n file_name = args.image\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_file(\n file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name)\n output_operation = graph.get_operation_by_name(output_name)\n\n with tf.Session(graph=graph) as sess:\n results = sess.run(output_operation.outputs[0], {\n input_operation.outputs[0]: t\n })\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n for i in top_k:\n print(labels[i], results[i])\n"
] | [
[
"tensorflow.Graph",
"tensorflow.image.resize_bilinear",
"tensorflow.import_graph_def",
"tensorflow.read_file",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.cast",
"tensorflow.image.decode_png",
"tensorflow.expand_dims",
"tensorflow.image.decode_bmp",
"tensorflow.subtract",
"tensorflow.image.decode_gif",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
RajatRasal/coma_ukbiobank_mesh | [
"74f2aa809c2679be6947a85cd0b8e514e8293605"
] | [
"coma/models/autoencoder.py"
] | [
"import torch.nn as nn\n\nfrom torch import Tensor\n\n\nclass AE(nn.Module):\n \"\"\"\n def __init__(self, in_channels, out_channels, latent_channels, edge_index,\n down_transform, up_transform, K, n_blocks, Encoder, Decoder, **kwargs):\n super(AE, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.edge_index = edge_index\n self.down_transform = down_transform\n self.up_transform = up_transform\n # self.num_vert used in the last and the first layer of encoder and decoder\n self.num_verts = self.down_transform[-1].size(0)\n\n self.encoder = Encoder(in_channels, out_channels, latent_channels,\n edge_index, down_transform, up_transform, K, self.num_verts, n_blocks, **kwargs,\n )\n self.decoder = Decoder(in_channels, out_channels, latent_channels,\n edge_index, down_transform, up_transform, K, self.num_verts, n_blocks, **kwargs,\n )\n \"\"\"\n\n def __init__(self, encoder, decoder, latent_dim):\n super(AE, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.reset_parameters()\n\n \"\"\"\n @classmethod\n def init_coma(cls, template: Data, device: str, pooling_factor: int = 4, **kwargs):\n mesh = psbody.mesh.Mesh(\n v=template.pos.detach().cpu().numpy(),\n f=template.face.T.detach().cpu().numpy(),\n )\n ds_factors = [pooling_factor] * 4 # 4, 4, 4, 4]\n _, A, D, U, F = mesh_sampling.generate_transform_matrices(mesh, ds_factors)\n tmp = {'face': F, 'adj': A, 'down_transform': D, 'up_transform': U}\n\n edge_index_list = [\n utils.to_edge_index(adj).to(device)\n for adj in tmp['adj']\n ]\n down_transform_list = [\n utils.to_sparse(down_transform).to(device)\n for down_transform in tmp['down_transform']\n ]\n up_transform_list = [\n utils.to_sparse(up_transform).to(device)\n for up_transform in tmp['up_transform']\n ]\n\n return cls(\n **kwargs,\n edge_index=edge_index_list,\n down_transform=down_transform_list,\n up_transform=up_transform_list,\n ).to(device)\n \"\"\"\n\n def reset_parameters(self):\n for name, param in self.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n else:\n nn.init.xavier_uniform_(param)\n\n def forward(self, x):\n # x - batched feature matrix\n z = self.encoder(x)\n out = self.decoder(z)\n return out\n\n\nclass VAE(nn.Module):\n\n def __init__(self, encoder, decoder, latent_dim):\n super(VAE, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.loc = nn.Linear(latent_dim, latent_dim)\n self.scale = nn.Linear(latent_dim, latent_dim)\n self.reset_parameters()\n\n def reset_parameters(self):\n for name, param in self.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n else:\n nn.init.xavier_uniform_(param)\n\n def reparametrise(self, mean: Tensor, log_std: Tensor) -> Tensor:\n # TODO: Change this to using multiple MC particles\n eps = torch.randn_like(std)\n z = mean + eps * log_std.exp()\n return z\n\n def _gaussian_parameters(self, enc: Tensor) -> [Tensor, Tensor]:\n mean = self.loc(enc)\n log_std = self.scale(enc)\n return mean, log_std\n \n def forward(self, x: Tensor) -> [Tensor, Tensor, Tensor, Tensor]:\n enc = self.encode(x)\n mean, log_std = self._gaussian_parameters(enc)\n z = self.reparametrise(mean, log_std)\n return self.decode(z), z, mean, log_std\n \n def generate(self, x: Tensor) -> Tensor:\n return self.forward(x)[0]\n \n @staticmethod\n def loss_function(preds: Tensor, targets: Tensor, mean: Tensor, log_std: Tensor) -> Tensor:\n log_prob = F.mse_loss(preds, targets) \n \n kl_loss = torch.sum(1 + 2 * log_std - mean ** 2 - log_std.exp() ** 2, dim=1)\n kl_loss = -0.5 * torch.mean(kl_loss)\n \n loss = log_prob + kl_loss\n \n return loss, log_prob, kl_loss\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liaopeiyuan/tenset | [
"2f7a4371989d32126fa3cd68ad8c3d244d78f790"
] | [
"python/tvm/testing.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=invalid-name,unnecessary-comprehension\n\"\"\" TVM testing utilities\n\nTesting Markers\n***************\n\nWe use pytest markers to specify the requirements of test functions. Currently\nthere is a single distinction that matters for our testing environment: does\nthe test require a gpu. For tests that require just a gpu or just a cpu, we\nhave the decorator :py:func:`requires_gpu` that enables the test when a gpu is\navailable. To avoid running tests that don't require a gpu on gpu nodes, this\ndecorator also sets the pytest marker `gpu` so we can use select the gpu subset\nof tests (using `pytest -m gpu`).\n\nUnfortunately, many tests are written like this:\n\n.. python::\n\n def test_something():\n for target in all_targets():\n do_something()\n\nThe test uses both gpu and cpu targets, so the test needs to be run on both cpu\nand gpu nodes. But we still want to only run the cpu targets on the cpu testing\nnode. The solution is to mark these tests with the gpu marker so they will be\nrun on the gpu nodes. But we also modify all_targets (renamed to\nenabled_targets) so that it only returns gpu targets on gpu nodes and cpu\ntargets on cpu nodes (using an environment variable).\n\nInstead of using the all_targets function, future tests that would like to\ntest against a variety of targets should use the\n:py:func:`tvm.testing.parametrize_targets` functionality. This allows us\ngreater control over which targets are run on which testing nodes.\n\nIf in the future we want to add a new type of testing node (for example\nfpgas), we need to add a new marker in `tests/python/pytest.ini` and a new\nfunction in this module. Then targets using this node should be added to the\n`TVM_TEST_TARGETS` environment variable in the CI.\n\"\"\"\nimport logging\nimport os\nimport sys\nimport time\nimport pytest\nimport numpy as np\nimport tvm\nimport tvm.arith\nimport tvm.tir\nimport tvm.te\nimport tvm._ffi\nfrom tvm.contrib import nvcc\n\n\ndef assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):\n \"\"\"Version of np.testing.assert_allclose with `atol` and `rtol` fields set\n in reasonable defaults.\n\n Arguments `actual` and `desired` are not interchangable, since the function\n compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we\n often allow `desired` to be close to zero, we generally want non-zero `atol`.\n \"\"\"\n actual = np.asanyarray(actual)\n desired = np.asanyarray(desired)\n np.testing.assert_allclose(actual.shape, desired.shape)\n np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)\n\n\ndef check_numerical_grads(\n function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1\n):\n \"\"\"A helper function that checks that numerical gradients of a function are\n equal to gradients computed in some different way (analytical gradients).\n\n Numerical gradients are computed using finite difference approximation. To\n reduce the number of function evaluations, the number of points used is\n gradually increased if the error value is too high (up to 5 points).\n\n Parameters\n ----------\n function\n A function that takes inputs either as positional or as keyword\n arguments (either `function(*input_values)` or `function(**input_values)`\n should be correct) and returns a scalar result. Should accept numpy\n ndarrays.\n\n input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]\n A list of values or a dict assigning values to variables. Represents the\n point at which gradients should be computed.\n\n grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]\n Gradients computed using a different method.\n\n function_value : float, optional\n Should be equal to `function(**input_values)`.\n\n delta : float, optional\n A small number used for numerical computation of partial derivatives.\n The default 1e-3 is a good choice for float32.\n\n atol : float, optional\n Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a\n gradient.\n\n rtol : float, optional\n Relative tolerance.\n \"\"\"\n # If input_values is a list then function accepts positional arguments\n # In this case transform it to a function taking kwargs of the form {\"0\": ..., \"1\": ...}\n if not isinstance(input_values, dict):\n input_len = len(input_values)\n input_values = {str(idx): val for idx, val in enumerate(input_values)}\n\n def _function(_input_len=input_len, _orig_function=function, **kwargs):\n return _orig_function(*(kwargs[str(i)] for i in range(input_len)))\n\n function = _function\n\n grad_values = {str(idx): val for idx, val in enumerate(grad_values)}\n\n if function_value is None:\n function_value = function(**input_values)\n\n # a helper to modify j-th element of val by a_delta\n def modify(val, j, a_delta):\n val = val.copy()\n val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta\n return val\n\n # numerically compute a partial derivative with respect to j-th element of the var `name`\n def derivative(x_name, j, a_delta):\n modified_values = {\n n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()\n }\n return (function(**modified_values) - function_value) / a_delta\n\n def compare_derivative(j, n_der, grad):\n der = grad.reshape(-1)[j]\n return np.abs(n_der - der) < atol + rtol * np.abs(n_der)\n\n for x_name, grad in grad_values.items():\n if grad.shape != input_values[x_name].shape:\n raise AssertionError(\n \"Gradient wrt '{}' has unexpected shape {}, expected {} \".format(\n x_name, grad.shape, input_values[x_name].shape\n )\n )\n\n ngrad = np.zeros_like(grad)\n\n wrong_positions = []\n\n # compute partial derivatives for each position in this variable\n for j in range(np.prod(grad.shape)):\n # forward difference approximation\n nder = derivative(x_name, j, delta)\n\n # if the derivative is not equal to the analytical one, try to use more\n # precise and expensive methods\n if not compare_derivative(j, nder, grad):\n # central difference approximation\n nder = (derivative(x_name, j, -delta) + nder) / 2\n\n if not compare_derivative(j, nder, grad):\n # central difference approximation using h = delta/2\n cnder2 = (\n derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)\n ) / 2\n # five-point derivative\n nder = (4 * cnder2 - nder) / 3\n\n # if the derivatives still don't match, add this position to the\n # list of wrong positions\n if not compare_derivative(j, nder, grad):\n wrong_positions.append(np.unravel_index(j, grad.shape))\n\n ngrad.reshape(-1)[j] = nder\n\n wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))\n\n dist = np.sqrt(np.sum((ngrad - grad) ** 2))\n grad_norm = np.sqrt(np.sum(ngrad ** 2))\n\n if not (np.isfinite(dist) and np.isfinite(grad_norm)):\n raise ValueError(\n \"NaN or infinity detected during numerical gradient checking wrt '{}'\\n\"\n \"analytical grad = {}\\n numerical grad = {}\\n\".format(x_name, grad, ngrad)\n )\n\n # we multiply atol by this number to make it more universal for different sizes\n sqrt_n = np.sqrt(float(np.prod(grad.shape)))\n\n if dist > atol * sqrt_n + rtol * grad_norm:\n raise AssertionError(\n \"Analytical and numerical grads wrt '{}' differ too much\\n\"\n \"analytical grad = {}\\n numerical grad = {}\\n\"\n \"{}% of elements differ, first 10 of wrong positions: {}\\n\"\n \"distance > atol*sqrt(n) + rtol*grad_norm\\n\"\n \"distance {} > {}*{} + {}*{}\".format(\n x_name,\n grad,\n ngrad,\n wrong_percentage,\n wrong_positions[:10],\n dist,\n atol,\n sqrt_n,\n rtol,\n grad_norm,\n )\n )\n\n max_diff = np.max(np.abs(ngrad - grad))\n avg_diff = np.mean(np.abs(ngrad - grad))\n logging.info(\n \"Numerical grad test wrt '%s' of shape %s passes, \"\n \"dist = %f, max_diff = %f, avg_diff = %f\",\n x_name,\n grad.shape,\n dist,\n max_diff,\n avg_diff,\n )\n\n\ndef assert_prim_expr_equal(lhs, rhs):\n \"\"\"Assert lhs and rhs equals to each iother.\n\n Parameters\n ----------\n lhs : tvm.tir.PrimExpr\n The left operand.\n\n rhs : tvm.tir.PrimExpr\n The left operand.\n \"\"\"\n ana = tvm.arith.Analyzer()\n res = ana.simplify(lhs - rhs)\n equal = isinstance(res, tvm.tir.IntImm) and res.value == 0\n if not equal:\n raise ValueError(\"{} and {} are not equal\".format(lhs, rhs))\n\n\ndef check_bool_expr_is_true(bool_expr, vranges, cond=None):\n \"\"\"Check that bool_expr holds given the condition cond\n for every value of free variables from vranges.\n\n for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)\n here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y\n We creates iterations to check,\n for x in range(10):\n for y in range(10):\n assert !(2x > 4y) || (x > 2y)\n\n Parameters\n ----------\n bool_expr : tvm.ir.PrimExpr\n Boolean expression to check\n vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]\n Free variables and their ranges\n cond: tvm.ir.PrimExpr\n extra conditions needs to be satisfied.\n \"\"\"\n if cond is not None:\n bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)\n\n def _run_expr(expr, vranges):\n \"\"\"Evaluate expr for every value of free variables\n given by vranges and return the tensor of results.\n \"\"\"\n\n def _compute_body(*us):\n vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}\n return tvm.tir.stmt_functor.substitute(expr, vmap)\n\n A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)\n args = [tvm.nd.empty(A.shape, A.dtype)]\n sch = tvm.te.create_schedule(A.op)\n mod = tvm.build(sch, [A])\n mod(*args)\n return args[0].asnumpy()\n\n res = _run_expr(bool_expr, vranges)\n if not np.all(res):\n indices = list(np.argwhere(res == 0)[0])\n counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]\n counterex = sorted(counterex, key=lambda x: x[0])\n counterex = \", \".join([v + \" = \" + str(i) for v, i in counterex])\n ana = tvm.arith.Analyzer()\n raise AssertionError(\n \"Expression {}\\nis not true on {}\\n\"\n \"Counterexample: {}\".format(ana.simplify(bool_expr), vranges, counterex)\n )\n\n\ndef check_int_constraints_trans_consistency(constraints_trans, vranges=None):\n \"\"\"Check IntConstraintsTransform is a bijective transformation.\n\n Parameters\n ----------\n constraints_trans : arith.IntConstraintsTransform\n Integer constraints transformation\n vranges: Dict[tvm.tir.Var, tvm.ir.Range]\n Free variables and their ranges\n \"\"\"\n if vranges is None:\n vranges = {}\n\n def _check_forward(constraints1, constraints2, varmap, backvarmap):\n ana = tvm.arith.Analyzer()\n all_vranges = vranges.copy()\n all_vranges.update({v: r for v, r in constraints1.ranges.items()})\n\n # Check that the transformation is injective\n cond_on_vars = tvm.tir.const(1, \"bool\")\n for v in constraints1.variables:\n if v in varmap:\n # variable mapping is consistent\n v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))\n cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)\n # Also we have to check that the new relations are true when old relations are true\n cond_subst = tvm.tir.stmt_functor.substitute(\n tvm.te.all(tvm.tir.const(1, \"bool\"), *constraints2.relations), backvarmap\n )\n # We have to include relations from vranges too\n for v in constraints2.variables:\n if v in constraints2.ranges:\n r = constraints2.ranges[v]\n range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)\n range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)\n cond_subst = tvm.te.all(cond_subst, range_cond)\n cond_subst = ana.simplify(cond_subst)\n check_bool_expr_is_true(\n tvm.te.all(cond_subst, cond_on_vars),\n all_vranges,\n cond=tvm.te.all(tvm.tir.const(1, \"bool\"), *constraints1.relations),\n )\n\n _check_forward(\n constraints_trans.src,\n constraints_trans.dst,\n constraints_trans.src_to_dst,\n constraints_trans.dst_to_src,\n )\n _check_forward(\n constraints_trans.dst,\n constraints_trans.src,\n constraints_trans.dst_to_src,\n constraints_trans.src_to_dst,\n )\n\n\ndef _get_targets():\n target_str = os.environ.get(\"TVM_TEST_TARGETS\", \"\")\n if len(target_str) == 0:\n target_str = DEFAULT_TEST_TARGETS\n targets = set()\n for dev in target_str.split(\";\"):\n if len(dev) == 0:\n continue\n target_kind = dev.split()[0]\n if tvm.runtime.enabled(target_kind) and tvm.context(target_kind, 0).exist:\n targets.add(dev)\n if len(targets) == 0:\n logging.warning(\n \"None of the following targets are supported by this build of TVM: %s.\"\n \" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.\",\n target_str,\n )\n return {\"llvm\"}\n return targets\n\n\nDEFAULT_TEST_TARGETS = (\n \"llvm;cuda;opencl;metal;rocm;vulkan;nvptx;\"\n \"llvm -device=arm_cpu;opencl -device=mali,aocl_sw_emu\"\n)\n\n\ndef device_enabled(target):\n \"\"\"Check if a target should be used when testing.\n\n It is recommended that you use :py:func:`tvm.testing.parametrize_targets`\n instead of manually checking if a target is enabled.\n\n This allows the user to control which devices they are testing against. In\n tests, this should be used to check if a device should be used when said\n device is an optional part of the test.\n\n Parameters\n ----------\n target : str\n Target string to check against\n\n Returns\n -------\n bool\n Whether or not the device associated with this target is enabled.\n\n Example\n -------\n >>> @tvm.testing.uses_gpu\n >>> def test_mytest():\n >>> for target in [\"cuda\", \"llvm\"]:\n >>> if device_enabled(target):\n >>> test_body...\n\n Here, `test_body` will only be reached by with `target=\"cuda\"` on gpu test\n nodes and `target=\"llvm\"` on cpu test nodes.\n \"\"\"\n assert isinstance(target, str), \"device_enabled requires a target as a string\"\n target_kind = target.split(\" \")[\n 0\n ] # only check if device name is found, sometime there are extra flags\n return any([target_kind in test_target for test_target in _get_targets()])\n\n\ndef enabled_targets():\n \"\"\"Get all enabled targets with associated contexts.\n\n In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of\n this function.\n\n In this context, enabled means that TVM was built with support for this\n target and the target name appears in the TVM_TEST_TARGETS environment\n variable. If TVM_TEST_TARGETS is not set, it defaults to variable\n DEFAULT_TEST_TARGETS in this module.\n\n If you use this function in a test, you **must** decorate the test with\n :py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).\n\n Returns\n -------\n targets: list\n A list of pairs of all enabled devices and the associated context\n \"\"\"\n return [(tgt, tvm.context(tgt)) for tgt in _get_targets()]\n\n\ndef _compose(args, decs):\n \"\"\"Helper to apply multiple markers\"\"\"\n if len(args) > 0:\n f = args[0]\n for d in reversed(decs):\n f = d(f)\n return f\n return decs\n\n\ndef uses_gpu(*args):\n \"\"\"Mark to differentiate tests that use the GPU is some capacity.\n\n These tests will be run on CPU-only test nodes and on test nodes with GPUS.\n To mark a test that must have a GPU present to run, use\n :py:func:`tvm.testing.requires_gpu`.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _uses_gpu = [pytest.mark.gpu]\n return _compose(args, _uses_gpu)\n\n\ndef requires_gpu(*args):\n \"\"\"Mark a test as requiring a GPU to run.\n\n Tests with this mark will not be run unless a gpu is present.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_gpu = [\n pytest.mark.skipif(not tvm.gpu().exist, reason=\"No GPU present\"),\n *uses_gpu(),\n ]\n return _compose(args, _requires_gpu)\n\n\ndef requires_cuda(*args):\n \"\"\"Mark a test as requiring the CUDA runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_cuda = [\n pytest.mark.cuda,\n pytest.mark.skipif(not device_enabled(\"cuda\"), reason=\"CUDA support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_cuda)\n\n\ndef requires_cudagraph(*args):\n \"\"\"Mark a test as requiring the CUDA Graph Feature\n\n This also marks the test as requiring cuda\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_cudagraph = [\n pytest.mark.skipif(\n not nvcc.have_cudagraph(), reason=\"CUDA Graph is not supported in this environment\"\n ),\n *requires_cuda(),\n ]\n return _compose(args, _requires_cudagraph)\n\n\ndef requires_opencl(*args):\n \"\"\"Mark a test as requiring the OpenCL runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_opencl = [\n pytest.mark.opencl,\n pytest.mark.skipif(not device_enabled(\"opencl\"), reason=\"OpenCL support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_opencl)\n\n\ndef requires_rocm(*args):\n \"\"\"Mark a test as requiring the rocm runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_rocm = [\n pytest.mark.rocm,\n pytest.mark.skipif(not device_enabled(\"rocm\"), reason=\"rocm support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_rocm)\n\n\ndef requires_metal(*args):\n \"\"\"Mark a test as requiring the metal runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_metal = [\n pytest.mark.metal,\n pytest.mark.skipif(not device_enabled(\"metal\"), reason=\"metal support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_metal)\n\n\ndef requires_vulkan(*args):\n \"\"\"Mark a test as requiring the vulkan runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_vulkan = [\n pytest.mark.vulkan,\n pytest.mark.skipif(not device_enabled(\"vulkan\"), reason=\"vulkan support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_vulkan)\n\n\ndef requires_tensorcore(*args):\n \"\"\"Mark a test as requiring a tensorcore to run.\n\n Tests with this mark will not be run unless a tensorcore is present.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_tensorcore = [\n pytest.mark.tensorcore,\n pytest.mark.skipif(\n not tvm.gpu().exist or not nvcc.have_tensorcore(tvm.gpu(0).compute_version),\n reason=\"No tensorcore present\",\n ),\n *requires_gpu(),\n ]\n return _compose(args, _requires_tensorcore)\n\n\ndef requires_llvm(*args):\n \"\"\"Mark a test as requiring llvm to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_llvm = [\n pytest.mark.llvm,\n pytest.mark.skipif(not device_enabled(\"llvm\"), reason=\"LLVM support not enabled\"),\n ]\n return _compose(args, _requires_llvm)\n\n\ndef requires_micro(*args):\n \"\"\"Mark a test as requiring microTVM to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_micro = [\n pytest.mark.skipif(\n tvm.support.libinfo().get(\"USE_MICRO\", \"OFF\") != \"ON\",\n reason=\"MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.\",\n )\n ]\n return _compose(args, _requires_micro)\n\n\ndef requires_rpc(*args):\n \"\"\"Mark a test as requiring rpc to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_rpc = [\n pytest.mark.skipif(\n tvm.support.libinfo().get(\"USE_RPC\", \"OFF\") != \"ON\",\n reason=\"RPC support not enabled. Set USE_RPC=ON in config.cmake to enable.\",\n )\n ]\n return _compose(args, _requires_rpc)\n\n\ndef _target_to_requirement(target):\n # mapping from target to decorator\n if target.startswith(\"cuda\"):\n return requires_cuda()\n if target.startswith(\"rocm\"):\n return requires_rocm()\n if target.startswith(\"vulkan\"):\n return requires_vulkan()\n if target.startswith(\"nvptx\"):\n return [*requires_llvm(), *requires_gpu()]\n if target.startswith(\"metal\"):\n return requires_metal()\n if target.startswith(\"opencl\"):\n return requires_opencl()\n if target.startswith(\"llvm\"):\n return requires_llvm()\n return []\n\n\ndef parametrize_targets(*args):\n \"\"\"Parametrize a test over all enabled targets.\n\n Use this decorator when you want your test to be run over a variety of\n targets and devices (including cpu and gpu devices).\n\n Parameters\n ----------\n f : function\n Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, ctx)`:,\n where `xxxxxxxxx` is any name.\n targets : list[str], optional\n Set of targets to run against. If not supplied,\n :py:func:`tvm.testing.enabled_targets` will be used.\n\n Example\n -------\n >>> @tvm.testing.parametrize\n >>> def test_mytest(target, ctx):\n >>> ... # do something\n\n Or\n\n >>> @tvm.testing.parametrize(\"llvm\", \"cuda\")\n >>> def test_mytest(target, ctx):\n >>> ... # do something\n \"\"\"\n\n def wrap(targets):\n def func(f):\n params = [\n pytest.param(target, tvm.context(target, 0), marks=_target_to_requirement(target))\n for target in targets\n ]\n return pytest.mark.parametrize(\"target,ctx\", params)(f)\n\n return func\n\n if len(args) == 1 and callable(args[0]):\n targets = [t for t, _ in enabled_targets()]\n return wrap(targets)(args[0])\n return wrap(args)\n\n\ndef identity_after(x, sleep):\n \"\"\"Testing function to return identity after sleep\n\n Parameters\n ----------\n x : int\n The input value.\n\n sleep : float\n The amount of time to sleep\n\n Returns\n -------\n x : object\n The original value\n \"\"\"\n if sleep:\n time.sleep(sleep)\n return x\n\n\ndef terminate_self():\n \"\"\"Testing function to terminate the process.\"\"\"\n sys.exit(-1)\n\n\ntvm._ffi._init_api(\"testing\", __name__)\n"
] | [
[
"numpy.abs",
"numpy.isfinite",
"numpy.argwhere",
"numpy.all",
"numpy.asanyarray",
"numpy.zeros_like",
"numpy.prod",
"numpy.testing.assert_allclose",
"numpy.unravel_index",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BotondA/PTAB_network_instability | [
"11bb01898aff51220cd91ad60f26c0c33f9a575a"
] | [
"code/compute_time_series.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThis script takes partially preprocessed voxel-space data from fmriprep,\nthen performs the following procedures:\n -detrends\n -standardizes\n -bandpass filters\n -regresses out confounds\n -parcels into ROIs\n\nOutputs preprocessed ROI-space time-series data.\n\n\"\"\"\n\nimport os\nimport sys\nimport itertools\nimport pandas as pd\nfrom nilearn import image\nfrom nilearn.input_data import NiftiLabelsMasker\n\n# =============================================================================\n# Setup\n# =============================================================================\n\n# Define filepaths\nHOMEDIR = os.path.abspath(os.path.join(__file__, \"../..\")) + \"/\"\nWORKDIR = HOMEDIR + \"data/fmriprep/\"\nOUTDIR = HOMEDIR + \"data/time_series/\"\n\n# Settings\nCUTOFF = 20 # Trimming time-series\n\n# Get run identifiers\nsubjects = [int(subid) for subid in sys.argv[1:]] # Subject IDs from bash script\nsessions = [\"BHB\", \"GLC\"]\ntasks = [\"rest\"]\nruns = [\"1\", \"2\"]\n\n# Items to be analyzed\nitems = list(itertools.product(subjects, sessions, tasks, runs))\n\n# Load the parcellation mask\nwillard_img = image.load_img(HOMEDIR + \"utils/499_roi.nii\")\n\n# =============================================================================\n# Perform computation\n# =============================================================================\n\n# Open files and perform analysis\ndef comp_timeseries(item):\n print(\"Computing time-series for:\", item)\n\n # Get filepaths\n bold_fp = WORKDIR + (\"sub-{0:0>3}/ses-{1}/func/\" \\\n \"sub-{0:0>3}_ses-{1}_task-{2}_run-{3}_space-MNI152NL\"\n \"in2009cAsym_desc-preproc_bold.nii.gz\") \\\n .format(item[0], item[1].lower(), item[2], item[3])\n\n conf_fp = WORKDIR + (\"sub-{0:0>3}/ses-{1}/func/\" \\\n \"sub-{0:0>3}_ses-{1}_task-{2}_run-{3}_desc-confounds_\"\n \"regressors.tsv\") \\\n .format(item[0], item[1].lower(), item[2], item[3])\n \n # checking if files are there\n for fp in [bold_fp, conf_fp]:\n if not(os.path.isfile(fp)):\n print(fp, ' missing. Can not proceed.')\n return 1\n \n # Load the image and drop first n frames\n func_img = image.index_img(image.load_img(bold_fp), slice(CUTOFF, None))\n print(item, ' func_img loaded')\n\n # Load confounds\n confounds = pd.read_csv(conf_fp, sep='\\t') \\\n .loc[CUTOFF:, [\n \"a_comp_cor_00\",\n \"a_comp_cor_01\",\n \"a_comp_cor_02\",\n \"a_comp_cor_03\",\n \"a_comp_cor_04\",\n \"a_comp_cor_05\",\n \"global_signal\",\n \"white_matter\",\n \"csf\",\n \"trans_x\",\n \"trans_y\",\n \"trans_z\",\n 'rot_x',\n 'rot_y',\n 'rot_z']]\n print(item, ' confounds loaded')\n\n # Create parcellation object with additional pre-processing parameters\n willard_mask = NiftiLabelsMasker(willard_img, detrend=True,\n t_r=0.802, low_pass=0.1, high_pass=0.01,\n standardize=True, memory=HOMEDIR+'cache',\n memory_level=1)\n\n # Process and perform parcellation\n roi_time_series = willard_mask.fit_transform(func_img,\n confounds=confounds.values)\n print(item, ' roi_time_series obtained')\n \n # Write into csv\n csv_data = pd.DataFrame(roi_time_series)\n csv_data.to_csv(OUTDIR + \"sub-{0:0>3}_ses-{1}_task-{2}_run-\" \\\n \"{3}.csv\".format(item[0], item[1].lower(), item[2], item[3]),\n header=False, index=False)\n print(item, ' time-series stores')\n\n return 0\n\n\n# Run computation\nif __name__ == '__main__':\n for item in items:\n comp_timeseries(item)\n \n print('done computing time-series')\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dabuc/CoralQuant | [
"26ba2e0b39a897d8947166796c6a4e9f5ab202fa"
] | [
"coralquant/spider/bs_stock_industry.py"
] | [
"import baostock as bs\nimport pandas as pd\nfrom coralquant.database import engine\nfrom sqlalchemy import String\nfrom coralquant.settings import CQ_Config\n\ndef create_stock_industry():\n \"\"\"\n BS-创建行业分类\n \"\"\"\n # 登陆系统\n lg = bs.login()\n # 显示登陆返回信息\n print('login respond error_code:' + lg.error_code)\n print('login respond error_msg:' + lg.error_msg)\n\n # 获取行业分类数据\n rs = bs.query_stock_industry()\n print('query_stock_industry error_code:' + rs.error_code)\n print('query_stock_industry respond error_msg:' + rs.error_msg)\n\n # 打印结果集\n industry_list = []\n while (rs.error_code == '0') & rs.next():\n # 获取一条记录,将记录合并在一起\n industry_list.append(rs.get_row_data())\n result = pd.DataFrame(industry_list, columns=rs.fields)\n\n dtype = {\n 'updateDate': String(10),\n 'code': String(9),\n 'code_name': String(10),\n 'industry': String(4),\n 'industryClassification': String(6)\n }\n\n result.to_sql('odl_bs_stock_industry', engine, schema=CQ_Config.DB_SCHEMA, if_exists='replace', index=False, dtype=dtype)\n\n # 登出系统\n bs.logout()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jcreinhold/synthtorch | [
"bb6eb20641b2cae3cbb96421b12e03865b5c5095"
] | [
"synthtorch/util/helper.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nsynthtorch.util.helper\n\ndefine helper function for defining neural networks in pytorch\n\nAuthor: Jacob Reinhold ([email protected])\n\nCreated on: Nov 2, 2018\n\"\"\"\n\n__all__ = ['get_act',\n 'get_loss',\n 'get_norm1d',\n 'get_norm2d',\n 'get_norm3d',\n 'get_optim',\n 'init_weights']\n\nfrom typing import Optional\n\nimport logging\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom synthtorch.errors import SynthtorchError\nfrom synthtorch.learn.loss import CosineProximityLoss\nfrom synthtorch.learn.layers import Swish\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_act(name: str, inplace: bool = True, params: Optional[dict] = None) -> nn.Module:\n \"\"\"\n get activation module from pytorch\n must be one of: relu, lrelu, linear, tanh, sigmoid\n\n Args:\n name (str): name of activation function desired\n inplace (bool): flag activation to do operations in-place (if option available)\n params (dict): dictionary of parameters (as per pytorch documentation)\n\n Returns:\n act (activation): instance of activation class\n \"\"\"\n if name.lower() == 'relu':\n act = nn.ReLU(inplace=inplace)\n elif name.lower() == 'lrelu':\n act = nn.LeakyReLU(inplace=inplace) if params is None else nn.LeakyReLU(inplace=inplace, **params)\n elif name.lower() == 'prelu':\n act = nn.PReLU() if params is None else nn.PReLU(**params)\n elif name.lower() == 'elu':\n act = nn.ELU(inplace=inplace) if params is None else nn.ELU(inplace=inplace, **params)\n elif name.lower() == 'celu':\n act = nn.CELU(inplace=inplace) if params is None else nn.CELU(inplace=inplace, **params)\n elif name.lower() == 'selu':\n act = nn.SELU(inplace=inplace)\n elif name.lower() == 'linear':\n act = nn.LeakyReLU(1, inplace=inplace) # hack to get linear output\n elif name.lower() == 'tanh':\n act = nn.Tanh()\n elif name.lower() == 'sigmoid':\n act = nn.Sigmoid()\n elif name.lower() == 'softmax':\n act = nn.Softmax(dim=1)\n elif name.lower() == 'swish':\n act = Swish(inplace=inplace)\n else:\n raise SynthtorchError(f'Activation: \"{name}\" not a valid activation function or not supported.')\n return act\n\n\ndef get_norm1d(name: str, num_features: int, affine: bool = True, params: Optional[dict] = None) -> nn.Module:\n \"\"\"\n get a 1d normalization module from pytorch\n must be one of: instance, batch, none\n\n Args:\n name (str): name of normalization function desired\n num_features (int): number of channels in the normalization layer\n affine (bool): learn affine transform after normalization\n params (dict): dictionary of optional other parameters for the normalization layer\n as specified by the pytorch documentation\n\n Returns:\n norm: instance of normalization layer\n \"\"\"\n if name.lower() == 'instance':\n norm = nn.InstanceNorm1d(num_features, affine=affine) if params is None else \\\n nn.InstanceNorm1d(num_features, affine=affine, **params)\n elif name.lower() == 'batch':\n norm = nn.BatchNorm1d(num_features, affine=affine) if params is None else \\\n nn.BatchNorm1d(num_features, affine=affine, **params)\n elif name.lower() == 'layer':\n norm = nn.GroupNorm(1, num_features, affine=affine)\n elif name.lower() == 'none':\n norm = None\n else:\n raise SynthtorchError(f'Normalization: \"{name}\" not a valid normalization routine or not supported.')\n return norm\n\n\ndef get_norm2d(name: str, num_features: int, affine: bool = True, params: Optional[dict] = None) -> nn.Module:\n \"\"\"\n get a 2d normalization module from pytorch\n must be one of: instance, batch, none\n\n Args:\n name (str): name of normalization function desired\n num_features (int): number of channels in the normalization layer\n affine (bool): learn affine transform after normalization\n params (dict): dictionary of optional other parameters for the normalization layer\n as specified by the pytorch documentation\n\n Returns:\n norm: instance of normalization layer\n \"\"\"\n if name.lower() == 'instance':\n norm = nn.InstanceNorm2d(num_features, affine=affine) if params is None else \\\n nn.InstanceNorm2d(num_features, affine=affine, **params)\n elif name.lower() == 'batch':\n norm = nn.BatchNorm2d(num_features, affine=affine) if params is None else \\\n nn.BatchNorm2d(num_features, affine=affine, **params)\n elif name.lower() == 'layer':\n norm = nn.GroupNorm(1, num_features, affine=affine)\n elif name.lower() == 'none':\n norm = None\n else:\n raise SynthtorchError(f'Normalization: \"{name}\" not a valid normalization routine or not supported.')\n return norm\n\n\ndef get_norm3d(name: str, num_features: int, affine: bool = True, params: Optional[dict] = None) -> nn.Module:\n \"\"\"\n get a 3d normalization module from pytorch\n must be one of: instance, batch, none\n\n Args:\n name (str): name of normalization function desired\n num_features (int): number of channels in the normalization layer\n affine (bool): learn affine transform after normalization\n params (dict): dictionary of optional other parameters for the normalization layer\n as specified by the pytorch documentation\n\n Returns:\n norm: instance of normalization layer\n \"\"\"\n if name.lower() == 'instance':\n norm = nn.InstanceNorm3d(num_features, affine=affine) if params is None else \\\n nn.InstanceNorm3d(num_features, affine=affine, **params)\n elif name.lower() == 'batch':\n norm = nn.BatchNorm3d(num_features, affine=affine) if params is None else \\\n nn.BatchNorm3d(num_features, affine=affine, **params)\n elif name.lower() == 'layer':\n norm = nn.GroupNorm(1, num_features, affine=affine)\n elif name.lower() == 'none':\n norm = None\n else:\n raise SynthtorchError(f'Normalization: \"{name}\" not a valid normalization routine or not supported.')\n return norm\n\n\ndef get_optim(name: str):\n \"\"\" get an optimizer by name \"\"\"\n if name.lower() == 'adam':\n optimizer = torch.optim.Adam\n elif name.lower() == 'adamw':\n optimizer = torch.optim.AdamW\n elif name.lower() == 'sgd':\n optimizer = torch.optim.SGD\n elif name.lower() == 'sgdw':\n from ..learn.optim import SGDW\n optimizer = SGDW\n elif name.lower() == 'nsgd':\n from ..learn.optim import NesterovSGD\n optimizer = NesterovSGD\n elif name.lower() == 'nsgdw':\n from ..learn.optim import NesterovSGDW\n optimizer = NesterovSGDW\n elif name.lower() == 'rmsprop':\n optimizer = torch.optim.rmsprop\n elif name.lower() == 'adagrad':\n optimizer = torch.optim.adagrad\n elif name.lower() == 'amsgrad':\n from ..learn.optim import AMSGrad\n optimizer = AMSGrad\n else:\n raise SynthtorchError(f'Optimizer: \"{name}\" not a valid optimizer routine or not supported.')\n return optimizer\n\n\ndef get_loss(name: str):\n \"\"\" get a loss function by name \"\"\"\n if name == 'mse' or name is None:\n loss = nn.MSELoss()\n elif name == 'cp':\n loss = CosineProximityLoss()\n elif name == 'mae':\n loss = nn.L1Loss()\n elif name == 'bce':\n loss = nn.BCEWithLogitsLoss()\n else:\n raise ValueError(f'Loss function {name} not supported.')\n return loss\n\n\ndef init_weights(net, init_type='kaiming', init_gain=0.02):\n \"\"\"\n Initialize network weights\n (inspired by https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/)\n\n Args:\n net (nn.Module): network to be initialized\n init_type (str): the name of an initialization method: normal, xavier, kaiming, or orthogonal\n init_gain (float): scaling factor for normal, xavier and orthogonal.\n\n Returns:\n None\n \"\"\"\n\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError(f'initialization method [{init_type}] is not implemented')\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n elif (classname.find('BatchNorm') != -1 or\n classname.find('InstanceNorm') != -1 or\n classname.find(\n 'GroupNorm') != -1): # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n if m.weight is not None:\n nn.init.normal_(m.weight.data, 1.0, init_gain)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n\n net.apply(init_func)\n if hasattr(net, 'n_seg'): # handle segae last layer initialization\n if net.last_init is not None:\n initial_values = torch.tensor(net.last_init)\n else:\n initial_values = torch.from_numpy(np.sort(np.random.rand(net.n_seg) * 2))\n net.finish[2].weight.data = (initial_values.type_as(net.finish[2].weight.data)\n .view(net.finish[2].weight.data.size()))\n\n if hasattr(net, 'all_conv'): # handle ICNR initalization of upsample layers\n if net.all_conv and net.dim == 2:\n for m in net.upsampconvs: icnr(m[0].weight)\n\n\ndef icnr(m, scale=2, init=nn.init.kaiming_normal_):\n \"\"\" ICNR init of `x`, with `scale` and `init` function \"\"\"\n ni, nf, h, w = m.shape\n ni2 = int(ni / (scale ** 2))\n k = init(torch.zeros([ni2, nf, h, w])).transpose(0, 1)\n k = k.contiguous().view(ni2, nf, -1)\n k = k.repeat(1, 1, scale ** 2)\n k = k.contiguous().view([nf, ni, h, w]).transpose(0, 1)\n m.data.copy_(k)\n"
] | [
[
"torch.nn.Softmax",
"torch.zeros",
"torch.nn.ELU",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.L1Loss",
"torch.nn.InstanceNorm1d",
"torch.tensor",
"torch.nn.Sigmoid",
"torch.nn.CELU",
"torch.nn.SELU",
"torch.nn.GroupNorm",
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.nn.PReLU",
"torch.nn.init.xavier_normal_",
"torch.nn.InstanceNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"numpy.random.rand",
"torch.nn.InstanceNorm3d",
"torch.nn.Tanh",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"torch.nn.MSELoss",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdhaffar/speechbrain-1 | [
"34bcf9d0783cf72a952674032834383194018b7b"
] | [
"recipes/WSJ0Mix/separation/dynamic_mixing.py"
] | [
"import speechbrain as sb\nimport numpy as np\nimport torch\nimport torchaudio\nimport glob\nimport os\nfrom pathlib import Path\nimport random\nfrom speechbrain.processing.signal_processing import rescale\nfrom speechbrain.dataio.batch import PaddedBatch\n\n\ndef build_spk_hashtable(hparams):\n\n wsj0_utterances = glob.glob(\n os.path.join(hparams[\"wsj0_tr\"], \"**/*.wav\"), recursive=True\n )\n\n spk_hashtable = {}\n for utt in wsj0_utterances:\n\n spk_id = Path(utt).stem[:3]\n assert torchaudio.info(utt).sample_rate == 8000\n\n # e.g. 2speakers/wav8k/min/tr/mix/019o031a_0.27588_01vo030q_-0.27588.wav\n # id of speaker 1 is 019 utterance id is o031a\n # id of speaker 2 is 01v utterance id is 01vo030q\n\n if spk_id not in spk_hashtable.keys():\n spk_hashtable[spk_id] = [utt]\n else:\n spk_hashtable[spk_id].append(utt)\n\n # calculate weights for each speaker ( len of list of utterances)\n spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]\n\n return spk_hashtable, spk_weights\n\n\ndef dynamic_mix_data_prep(hparams):\n\n # 1. Define datasets\n train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": hparams[\"data_folder\"]},\n )\n\n # we build an dictionary where keys are speakers id and entries are list\n # of utterances files of that speaker\n\n spk_hashtable, spk_weights = build_spk_hashtable(hparams)\n\n spk_list = [x for x in spk_hashtable.keys()]\n spk_weights = [x / sum(spk_weights) for x in spk_weights]\n\n @sb.utils.data_pipeline.takes(\"mix_wav\")\n @sb.utils.data_pipeline.provides(\"mix_sig\", \"s1_sig\", \"s2_sig\")\n def audio_pipeline(\n mix_wav,\n ): # this is dummy --> it means one epoch will be same as without dynamic mixing\n\n speakers = np.random.choice(\n spk_list, hparams[\"num_spks\"], replace=False, p=spk_weights\n )\n # select two speakers randomly\n sources = []\n first_lvl = None\n\n spk_files = [\n np.random.choice(spk_hashtable[spk], 1, False)[0]\n for spk in speakers\n ]\n\n minlen = min(\n *[torchaudio.info(x).num_frames for x in spk_files],\n hparams[\"training_signal_len\"],\n )\n\n for i, spk_file in enumerate(spk_files):\n\n # select random offset\n length = torchaudio.info(spk_file).num_frames\n start = 0\n stop = length\n if length > minlen: # take a random window\n start = np.random.randint(0, length - minlen)\n stop = start + minlen\n\n tmp, fs_read = torchaudio.load(\n spk_file, frame_offset=start, num_frames=stop - start,\n )\n\n # peak = float(Path(spk_file).stem.split(\"_peak_\")[-1])\n tmp = tmp[0] # * peak # remove channel dim and normalize\n\n if i == 0:\n gain = np.clip(random.normalvariate(-27.43, 2.57), -45, 0)\n tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale=\"dB\")\n # assert not torch.all(torch.isnan(tmp))\n first_lvl = gain\n else:\n gain = np.clip(\n first_lvl + random.normalvariate(-2.51, 2.66), -45, 0\n )\n tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale=\"dB\")\n # assert not torch.all(torch.isnan(tmp))\n sources.append(tmp)\n\n # we mix the sources together\n # here we can also use augmentations ! -> runs on cpu and for each\n # mixture parameters will be different rather than for whole batch.\n # no difference however for bsz=1 :)\n\n # padding left\n # sources, _ = batch_pad_right(sources)\n\n sources = torch.stack(sources)\n mixture = torch.sum(sources, 0)\n max_amp = max(\n torch.abs(mixture).max().item(),\n *[x.item() for x in torch.abs(sources).max(dim=-1)[0]],\n )\n mix_scaling = 1 / max_amp * 0.9\n sources = sources * mix_scaling\n mixture = mix_scaling * mixture\n\n yield mixture\n for i in range(hparams[\"num_spks\"]):\n yield sources[i]\n\n sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)\n sb.dataio.dataset.set_output_keys(\n [train_data], [\"id\", \"mix_sig\", \"s1_sig\", \"s2_sig\"]\n )\n\n train_data = torch.utils.data.DataLoader(\n train_data,\n batch_size=hparams[\"dataloader_opts\"][\"batch_size\"],\n num_workers=hparams[\"dataloader_opts\"][\"num_workers\"],\n collate_fn=PaddedBatch,\n worker_init_fn=lambda x: np.random.seed(\n int.from_bytes(os.urandom(4), \"little\") + x\n ),\n )\n return train_data\n\n\ndef dynamic_mix_data_prep_3mix(hparams):\n\n # 1. Define datasets\n train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": hparams[\"data_folder\"]},\n )\n\n # we build an dictionary where keys are speakers id and entries are list\n # of utterances files of that speaker\n\n spk_hashtable, spk_weights = build_spk_hashtable(hparams)\n\n spk_list = [x for x in spk_hashtable.keys()]\n spk_weights = [x / sum(spk_weights) for x in spk_weights]\n\n @sb.utils.data_pipeline.takes(\"mix_wav\")\n @sb.utils.data_pipeline.provides(\"mix_sig\", \"s1_sig\", \"s2_sig\", \"s3_sig\")\n def audio_pipeline(\n mix_wav,\n ): # this is dummy --> it means one epoch will be same as without dynamic mixing\n\n speakers = np.random.choice(\n spk_list, hparams[\"num_spks\"], replace=False, p=spk_weights\n )\n # select two speakers randomly\n sources = []\n first_lvl = None\n\n spk_files = [\n np.random.choice(spk_hashtable[spk], 1, False)[0]\n for spk in speakers\n ]\n\n minlen = min(\n *[torchaudio.info(x).num_frames for x in spk_files],\n hparams[\"training_signal_len\"],\n )\n\n for i, spk_file in enumerate(spk_files):\n\n # select random offset\n length = torchaudio.info(spk_file).num_frames\n start = 0\n stop = length\n if length > minlen: # take a random window\n start = np.random.randint(0, length - minlen)\n stop = start + minlen\n\n tmp, fs_read = torchaudio.load(\n spk_file, frame_offset=start, num_frames=stop - start,\n )\n\n # peak = float(Path(spk_file).stem.split(\"_peak_\")[-1])\n tmp = tmp[0] # * peak # remove channel dim and normalize\n\n if i == 0:\n gain = np.clip(random.normalvariate(-27.43, 2.57), -45, 0)\n tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale=\"dB\")\n # assert not torch.all(torch.isnan(tmp))\n first_lvl = gain\n elif i == 1:\n gain = np.clip(\n first_lvl + random.normalvariate(-2.51, 2.66), -45, 0\n )\n tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale=\"dB\")\n else:\n pass\n # note that we effectively using 0dB gain for the last source\n\n sources.append(tmp)\n\n # we mix the sources together\n # here we can also use augmentations ! -> runs on cpu and for each\n # mixture parameters will be different rather than for whole batch.\n # no difference however for bsz=1 :)\n\n # padding left\n # sources, _ = batch_pad_right(sources)\n\n sources = torch.stack(sources)\n mixture = torch.sum(sources, 0)\n max_amp = max(\n torch.abs(mixture).max().item(),\n *[x.item() for x in torch.abs(sources).max(dim=-1)[0]],\n )\n mix_scaling = 1 / max_amp * 0.9\n sources = sources * mix_scaling\n mixture = mix_scaling * mixture\n\n yield mixture\n for i in range(hparams[\"num_spks\"]):\n yield sources[i]\n\n sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)\n sb.dataio.dataset.set_output_keys(\n [train_data], [\"id\", \"mix_sig\", \"s1_sig\", \"s2_sig\", \"s3_sig\"]\n )\n\n train_data = torch.utils.data.DataLoader(\n train_data,\n batch_size=hparams[\"dataloader_opts\"][\"batch_size\"],\n num_workers=hparams[\"dataloader_opts\"][\"num_workers\"],\n collate_fn=PaddedBatch,\n worker_init_fn=lambda x: np.random.seed(\n int.from_bytes(os.urandom(4), \"little\") + x\n ),\n )\n return train_data\n\n\ndef dynamic_mix_shuffleonly_data_prep(hparams):\n\n # 1. Define datasets\n train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": hparams[\"data_folder\"]},\n )\n\n # we draw Nspk indices\n source_wavkeys = [\n \"s\" + str(i) + \"_wav\" for i in range(1, hparams[\"num_spks\"] + 1)\n ]\n\n @sb.utils.data_pipeline.takes(\"s1_wav\", \"s2_wav\")\n @sb.utils.data_pipeline.provides(\"mix_sig\", \"s1_sig\", \"s2_sig\")\n def audio_pipeline(\n s1_wav, s2_wav\n ): # this is dummy --> it means one epoch will be same as without dynamic mixing\n\n # find the indices of two items to mix\n inds = list(\n np.random.random_integers(\n 0, len(train_data) - 1, size=(hparams[\"num_spks\"],)\n )\n )\n\n # get the lengths of these items\n lengths = []\n sourcefls = []\n for i, (ind, wavkey) in enumerate(zip(inds, source_wavkeys)):\n fl = train_data.data[str(ind)]\n sourcefl = fl[wavkey]\n sourcefls.append(sourcefl)\n lengths.append(torchaudio.info(sourcefl).num_frames)\n minlen = min(lengths)\n\n sources = []\n for i, (sourcefl, wavkey, length) in enumerate(\n zip(sourcefls, source_wavkeys, lengths)\n ):\n\n start = 0\n stop = length\n if length > minlen: # take a random window\n start = np.random.randint(0, length - minlen)\n stop = start + minlen\n\n tmp, fs_read = torchaudio.load(\n sourcefl,\n frame_offset=start,\n num_frames=stop - start,\n # normalize=False,\n )\n\n tmp = tmp[0] # remove channel dim\n sources.append(tmp)\n\n sources = torch.stack(sources)\n mixture = torch.sum(sources, 0)\n max_amp = max(\n torch.abs(mixture).max().item(),\n *[x.item() for x in torch.abs(sources).max(dim=-1)[0]],\n )\n mix_scaling = 1 / max_amp * 0.9\n sources = sources * mix_scaling\n mixture = mix_scaling * mixture\n\n yield mixture\n for i in range(hparams[\"num_spks\"]):\n yield sources[i]\n\n sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)\n sb.dataio.dataset.set_output_keys(\n [train_data], [\"id\", \"mix_sig\", \"s1_sig\", \"s2_sig\"]\n )\n\n return train_data\n"
] | [
[
"torch.abs",
"numpy.random.choice",
"torch.sum",
"torch.stack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrispbradley/sundials | [
"8242ba0b361e285b0f826fc3e077e0d8e3e81ee2"
] | [
"examples/arkode/CXX_serial/plot_heat2D.py"
] | [
"#!/usr/bin/env python\n# ------------------------------------------------------------------------------\n# Programmer(s): Daniel R. Reynolds @ SMU\n# David J. Gardner @ LLNL\n# ------------------------------------------------------------------------------\n# SUNDIALS Copyright Start\n# Copyright (c) 2002-2022, Lawrence Livermore National Security\n# and Southern Methodist University.\n# All rights reserved.\n#\n# See the top-level LICENSE and NOTICE files for details.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n# SUNDIALS Copyright End\n# ------------------------------------------------------------------------------\n# matplotlib-based plotting script for the serial ark_heat2D example\n# ------------------------------------------------------------------------------\n\n# imports\nimport sys, os\nimport shlex\nimport numpy as np\nfrom pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\n\n# ------------------------------------------------------------------------------\n\n# read problem info file\ninfofile = 'heat2d_info.txt'\n\nwith open(infofile) as fn:\n\n # read the file line by line\n for line in fn:\n\n # split line into list\n text = shlex.split(line)\n\n # x-direction upper domian bound\n if \"xu\" in line:\n xu = float(text[1])\n continue\n\n # y-direction upper domain bound\n if \"yu\" in line:\n yu = float(text[1])\n continue\n\n # nodes in the x-direction\n if \"nx\" in line:\n nx = int(text[1])\n continue\n\n # nodes in the y-direction\n if \"ny\" in line:\n ny = int(text[1])\n continue\n\n # number of output times\n if \"nt\" in line:\n nt = int(text[1])\n continue\n\n# ------------------------------------------------------------------------------\n\n# check if the error was output\nfname = 'heat2d_error.txt'\n\nif os.path.isfile(fname):\n plottype = ['solution', 'error']\nelse:\n plottype = ['solution']\n\nfor pt in plottype:\n\n # fill array with data\n time = np.zeros(nt)\n result = np.zeros((nt, ny, nx))\n\n # load data\n data = np.loadtxt('heat2d_' + pt + '.txt', dtype=np.double)\n\n # extract data\n for i in range(nt):\n time[i] = data[i,0]\n result[i,0:ny+1,0:nx+1] = np.reshape(data[i,1:], (ny,nx))\n\n # determine extents of plots\n maxtemp = 1.1 * result.max()\n mintemp = 0.9 * result.min()\n\n # set x and y meshgrid objects\n xspan = np.linspace(0.0, xu, nx)\n yspan = np.linspace(0.0, yu, ny)\n X,Y = np.meshgrid(xspan, yspan)\n\n nxstr = repr(nx)\n nystr = repr(ny)\n\n # generate plots\n for tstep in range(nt):\n\n # set string constants for output plots, current time, mesh size\n pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png'\n tstr = str(time[tstep])\n\n # plot surface and save to disk\n fig = plt.figure(1)\n ax = fig.add_subplot(111, projection='3d')\n\n ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1,\n cmap=cm.jet, linewidth=0, antialiased=True, shade=True)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlim((mintemp, maxtemp))\n ax.view_init(20,45)\n if (pt == 'solution'):\n title('u(x,y) at t = ' + tstr)\n else:\n title('error(x,y) at t = ' + tstr)\n savefig(pname)\n plt.close()\n\n##### end of script #####\n"
] | [
[
"numpy.linspace",
"numpy.reshape",
"matplotlib.pyplot.close",
"numpy.meshgrid",
"numpy.zeros",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Megvii-BaseDetection/GFSD | [
"78c0a938d794584f44d60afab66debd43773d4f7"
] | [
"playground/fsdet/coco/retentive_rcnn/10shot/seed4/modeling/rpn.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom cvpods.modeling.anchor_generator import DefaultAnchorGenerator\nfrom cvpods.modeling.box_regression import Box2BoxTransform\nfrom cvpods.modeling.matcher import Matcher\nfrom cvpods.modeling.proposal_generator.rpn import RPN, StandardRPNHead\n\n\nclass RPNHead(StandardRPNHead):\n def __init__(self, cfg, input_shape):\n super(StandardRPNHead, self).__init__()\n\n # Standard RPN is shared across levels:\n in_channels = [s.channels for s in input_shape]\n assert len(set(in_channels)) == 1, \\\n \"Each level must have the same channel!\"\n in_channels = in_channels[0]\n\n # RPNHead should take the same input as anchor generator\n anchor_generator = DefaultAnchorGenerator(cfg, input_shape)\n num_cell_anchors = anchor_generator.num_cell_anchors\n box_dim = anchor_generator.box_dim\n assert (\n len(set(num_cell_anchors)) == 1\n ), \"Each level must have the same number of cell anchors\"\n num_cell_anchors = num_cell_anchors[0]\n\n # 3x3 conv for the hidden representation\n self.conv = nn.Conv2d(in_channels, in_channels,\n kernel_size=3, stride=1, padding=1)\n # 1x1 conv for predicting objectness logits\n self.objectness_logits = nn.Conv2d(in_channels, num_cell_anchors,\n kernel_size=1, stride=1, bias=True)\n self.finetuned_objectness_logits = nn.Conv2d(\n in_channels, num_cell_anchors, kernel_size=1, stride=1, bias=True\n )\n # 1x1 conv for predicting box2box transform deltas\n self.anchor_deltas = nn.Conv2d(\n in_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1\n )\n\n for layer in [self.conv, self.objectness_logits, self.anchor_deltas]:\n nn.init.normal_(layer.weight, std=0.01)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n\n def forward(self, features):\n pred_objectness_logits = []\n pred_anchor_deltas = []\n for x in features:\n t = F.relu(self.conv(x))\n pred_anchor_deltas.append(self.anchor_deltas(t))\n logit_map = torch.max(self.objectness_logits(t),\n self.finetuned_objectness_logits(t))\n pred_objectness_logits.append(logit_map)\n return pred_objectness_logits, pred_anchor_deltas\n\n\nclass DoubleHeadRPN(RPN):\n def __init__(self, cfg, input_shape):\n super(RPN, self).__init__()\n\n # fmt: off\n self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE\n self.in_features = cfg.MODEL.RPN.IN_FEATURES\n self.nms_thresh = cfg.MODEL.RPN.NMS_THRESH\n self.nms_type = cfg.MODEL.RPN.NMS_TYPE\n self.batch_size_per_image = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE\n self.positive_fraction = cfg.MODEL.RPN.POSITIVE_FRACTION\n self.smooth_l1_beta = cfg.MODEL.RPN.SMOOTH_L1_BETA\n self.loss_weight = cfg.MODEL.RPN.LOSS_WEIGHT\n # fmt: on\n\n # Map from self.training state to train/test settings\n self.pre_nms_topk = {\n True: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN,\n False: cfg.MODEL.RPN.PRE_NMS_TOPK_TEST,\n }\n self.post_nms_topk = {\n True: cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN,\n False: cfg.MODEL.RPN.POST_NMS_TOPK_TEST,\n }\n self.boundary_threshold = cfg.MODEL.RPN.BOUNDARY_THRESH\n\n self.anchor_generator = DefaultAnchorGenerator(\n cfg, [input_shape[f] for f in self.in_features]\n )\n self.box2box_transform = Box2BoxTransform(\n weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS\n )\n self.anchor_matcher = Matcher(\n cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS,\n allow_low_quality_matches=True\n )\n self.rpn_head = RPNHead(cfg,\n [input_shape[f] for f in self.in_features])\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.init.normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kaslanarian/PythonSVM | [
"715eeef2a245736167addf45a6aee8b40b54d0c7"
] | [
"tests/dataset_classify.py"
] | [
"import numpy as np\nfrom pysvm import LinearSVC, KernelSVC, NuSVC\nfrom sklearn.datasets import load_iris, load_breast_cancer, load_digits, load_wine\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nnp.random.seed(2022)\n\nscore = np.zeros((3, 4))\nfor i, load_dataset in enumerate(\n [load_iris, load_wine, load_breast_cancer, load_digits]):\n X, y = load_dataset(return_X_y=True)\n train_X, test_X, train_y, test_y = train_test_split(X, y)\n stder = StandardScaler().fit(train_X)\n train_X = stder.transform(train_X)\n test_X = stder.transform(test_X)\n\n for j, model in enumerate([LinearSVC, KernelSVC, NuSVC]):\n score[j, i] = model(\n n_jobs=6,\n max_iter=1000,\n ).fit(train_X, train_y).score(test_X, test_y)\n\nprint(score)"
] | [
[
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"sklearn.model_selection.train_test_split",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
archonic/frankmocap | [
"eebb4591307fb46bfbc53afcf5663e758f686ab6"
] | [
"mocap_utils/geometry_utils.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport os, sys, shutil\nimport os.path as osp\n# sys.path.append(\"/\")\nimport numpy as np\nimport torch\nfrom torch.nn import functional as F\nimport cv2\nimport numpy.matlib as npm\nimport mocap_utils.geometry_utils_torch as gut\n\n\ndef flip_hand_pose(pose):\n pose = pose.copy()\n if len(pose.shape) == 1:\n pose = pose.reshape(-1, 3)\n pose[:, 1] *= -1\n pose[:, 2] *= -1\n return pose.reshape(-1,)\n else:\n assert len(pose.shape) == 2\n pose[:, 1] *= -1\n pose[:, 2] *= -1\n return pose\n\n\ndef flip_hand_joints_3d(joints_3d):\n assert joints_3d.shape[1] == 3\n assert len(joints_3d.shape) == 2\n rot_mat = np.diag([-1, 1, 1])\n return np.matmul(rot_mat, joints_3d.T).T\n\n\ndef __quaternion_to_angle_axis_torch(quat):\n quat = quat.clone()\n if quat.dim() == 1:\n assert quat.size(0) == 4\n quat = quat.view(1, 4)\n angle_axis = gut.quaternion_to_angle_axis(quat)[0]\n elif quat.dim() == 2:\n assert quat.size(1) == 4\n angle_axis = gut.quaternion_to_angle_axis(quat)\n else:\n assert quat.dim() == 3\n dim0 = quat.size(0)\n dim1 = quat.size(1)\n assert quat.size(2) == 4\n quat = quat.view(dim0*dim1, 4)\n angle_axis = gut.quaternion_to_angle_axis(quat)\n angle_axis = angle_axis.view(dim0, dim1, 3)\n return angle_axis\n\n\ndef quaternion_to_angle_axis(quaternion):\n quat = quaternion\n if isinstance(quat, torch.Tensor):\n return __quaternion_to_angle_axis_torch(quaternion)\n else:\n assert isinstance(quat, np.ndarray)\n quat_torch = torch.from_numpy(quat)\n angle_axis_torch = __quaternion_to_angle_axis_torch(quat_torch)\n return angle_axis_torch.numpy()\n\n\ndef __angle_axis_to_quaternion_torch(aa):\n aa = aa.clone()\n if aa.dim() == 1:\n assert aa.size(0) == 3 \n aa = aa.view(1, 3)\n quat = gut.angle_axis_to_quaternion(aa)[0]\n elif aa.dim() == 2:\n assert aa.size(1) == 3\n quat = gut.angle_axis_to_quaternion(aa)\n else:\n assert aa.dim() == 3\n dim0 = aa.size(0)\n dim1 = aa.size(1)\n assert aa.size(2) == 3\n aa = aa.view(dim0*dim1, 3)\n quat = gut.angle_axis_to_quaternion(aa)\n quat = quat.view(dim0, dim1, 4)\n return quat\n\n\ndef angle_axis_to_quaternion(angle_axis):\n aa = angle_axis\n if isinstance(aa, torch.Tensor):\n return __angle_axis_to_quaternion_torch(aa)\n else:\n assert isinstance(aa, np.ndarray)\n aa_torch = torch.from_numpy(aa)\n quat_torch = __angle_axis_to_quaternion_torch(aa_torch)\n return quat_torch.numpy()\n\n\ndef __angle_axis_to_rotation_matrix_torch(aa):\n aa = aa.clone()\n if aa.dim() == 1:\n assert aa.size(0) ==3 \n aa = aa.view(1, 3)\n rotmat = gut.angle_axis_to_rotation_matrix(aa)[0][:3, :3]\n elif aa.dim() == 2:\n assert aa.size(1) == 3\n rotmat = gut.angle_axis_to_rotation_matrix(aa)[:, :3, :3]\n else:\n assert aa.dim() == 3\n dim0 = aa.size(0)\n dim1 = aa.size(1)\n assert aa.size(2) == 3\n aa = aa.view(dim0*dim1, 3)\n rotmat = gut.angle_axis_to_rotation_matrix(aa)\n rotmat = rotmat.view(dim0, dim1, 4, 4)\n rotmat = rotmat[:, :, :3, :3]\n return rotmat\n\n\ndef angle_axis_to_rotation_matrix(angle_axis):\n aa = angle_axis\n if isinstance(aa, torch.Tensor):\n return __angle_axis_to_rotation_matrix_torch(aa)\n else:\n assert isinstance(aa, np.ndarray)\n aa_torch = torch.from_numpy(aa)\n rotmat_torch = __angle_axis_to_rotation_matrix_torch(aa_torch)\n return rotmat_torch.numpy()\n\n\ndef __rotation_matrix_to_angle_axis_torch(rotmat):\n rotmat = rotmat.clone()\n if rotmat.dim() == 2:\n assert rotmat.size(0) == 3\n assert rotmat.size(1) == 3\n rotmat0 = torch.zeros((1, 3, 4))\n rotmat0[0, :, :3] = rotmat\n rotmat0[:, 2, 3] = 1.0\n aa = gut.rotation_matrix_to_angle_axis(rotmat0)[0]\n elif rotmat.dim() == 3:\n dim0 = rotmat.size(0)\n assert rotmat.size(1) == 3\n assert rotmat.size(2) == 3\n rotmat0 = torch.zeros((dim0, 3, 4))\n rotmat0[:, :, :3] = rotmat\n rotmat0[:, 2, 3] = 1.0\n aa = gut.rotation_matrix_to_angle_axis(rotmat0)\n else:\n assert rotmat.dim() == 4\n dim0 = rotmat.size(0)\n dim1 = rotmat.size(1)\n assert rotmat.size(2) == 3\n assert rotmat.size(3) == 3\n rotmat0 = torch.zeros((dim0*dim1, 3, 4))\n rotmat0[:, :, :3] = rotmat.view(dim0*dim1, 3, 3)\n rotmat0[:, 2, 3] = 1.0\n aa = gut.rotation_matrix_to_angle_axis(rotmat0)\n aa = aa.view(dim0, dim1, 3)\n return aa\n\n\ndef rotation_matrix_to_angle_axis(rotmat):\n if isinstance(rotmat, torch.Tensor):\n return __rotation_matrix_to_angle_axis_torch(rotmat)\n else:\n assert isinstance(rotmat, np.ndarray)\n rotmat_torch = torch.from_numpy(rotmat)\n aa_torch = __rotation_matrix_to_angle_axis_torch(rotmat_torch)\n return aa_torch.numpy()\n \n\ndef rot6d_to_rotmat(x):\n \"\"\"Convert 6D rotation representation to 3x3 rotation matrix.\n Based on Zhou et al., \"On the Continuity of Rotation Representations in Neural Networks\", CVPR 2019\n Input:\n (B,6) Batch of 6-D rotation representations\n Output:\n (B,3,3) Batch of corresponding rotation matrices\n \"\"\"\n assert isinstance(x, torch.Tensor), \"Current version only supports torch.tensor\"\n\n x = x.view(-1,3,2)\n a1 = x[:, :, 0]\n a2 = x[:, :, 1]\n b1 = F.normalize(a1)\n b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)\n b3 = torch.cross(b1, b2)\n return torch.stack((b1, b2, b3), dim=-1)"
] | [
[
"numpy.diag",
"torch.nn.functional.normalize",
"torch.zeros",
"torch.einsum",
"numpy.matmul",
"torch.from_numpy",
"torch.stack",
"torch.cross"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GaoZiqiang/Multiview-ObjectDetection | [
"41d28bc15622b4d3a863ba4c8b53b06f6b3b1568"
] | [
"cpp_test/line_chart.py"
] | [
"import matplotlib.pyplot as plt\n\n# 两个数组\nx_data1 = ['1','2','3','4','5']\ny_data1 = [0.2,0.38,0.6,0.801,1]\n\n# k-NN\nx_data2 = ['1','2','3','4','5']\ny_data2 = [0.11,0.25,0.52,0.63,0.87]\n\n# NN\nx_data3 = ['1','2','3','4','5']\ny_data3 = [0.13,0.31,0.57,0.69,0.95]\n\n# Cascade R-CNN\nx_data4 = ['1','2','3','4','5']\ny_data4 = [0.102,0.15,0.51,0.64,0.88]\n\n# plt.plot(ranks,cmc1,label='ranking',color='red',marker='o',markersize=5)\n\n\n\nplt.plot(x_data1,y_data1,color='red',label='our method',marker='o',markersize=3)\nplt.plot(x_data2,y_data2,color='blue',label='K-NN',marker='o',markersize=3)\nplt.plot(x_data3,y_data3,color='green',label='NN',marker='o',markersize=3)\nplt.plot(x_data4,y_data4,color='black',label='Cascade R-CNN',marker='o',markersize=3)\nplt.legend(['our method','K-NN','NN','Cascade R-CNN'])#\nplt.ylabel('Precision',fontsize=15)\nplt.xlabel('Rank_num',fontsize=15)\nplt.title('CMC Curve')\n\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
upura/commonlitreadabilityprize | [
"5af91d122d7038c5f107fd5d17e024c160a4698d",
"5af91d122d7038c5f107fd5d17e024c160a4698d"
] | [
"pl/dataset.py",
"working/roberta_large_finetune.py"
] | [
"import pandas as pd\nimport pytorch_lightning as pl\nimport torch\nfrom sklearn.model_selection import KFold\nfrom torch.utils.data import DataLoader, Dataset\nfrom transformers import AutoTokenizer\n\n\nclass TextDataset(Dataset):\n def __init__(\n self,\n df,\n text_col: str,\n target_col: str,\n tokenizer_name: str,\n max_len: int,\n is_train: bool = True,\n ):\n super().__init__()\n\n self.df = df\n self.is_train = is_train\n self.text = df[text_col].tolist()\n\n if self.is_train:\n self.target = torch.tensor(df[target_col].values, dtype=torch.float32)\n\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n self.encoded = tokenizer.batch_encode_plus(\n self.text,\n padding=\"max_length\",\n max_length=max_len,\n truncation=True,\n return_attention_mask=True,\n )\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n input_ids = torch.tensor(self.encoded[\"input_ids\"][index])\n attention_mask = torch.tensor(self.encoded[\"attention_mask\"][index])\n\n if self.is_train:\n target = self.target[index]\n return {\n \"ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"targets\": target,\n }\n else:\n return {\"ids\": input_ids, \"attention_mask\": attention_mask}\n\n\nclass MyDataModule(pl.LightningDataModule):\n def __init__(self, cfg):\n super().__init__()\n\n self.test_df = None\n self.train_df = None\n self.valid_df = None\n self.cfg = cfg\n\n def get_test_df(self):\n df = pd.read_csv(self.cfg.TEST_DF_PATH)\n return df\n\n def split_train_valid_df(self):\n if int(self.cfg.debug):\n df = pd.read_csv(self.cfg.TRAIN_DF_PATH, nrows=100)\n else:\n df = pd.read_csv(self.cfg.TRAIN_DF_PATH)\n\n # Remove incomplete entries if any.\n df.drop(\n df[(df.target == 0) & (df.standard_error == 0)].index,\n inplace=True,\n )\n df.reset_index(drop=True, inplace=True)\n\n cv = KFold(n_splits=self.cfg.NUM_FOLDS, shuffle=True, random_state=42)\n for n, (train_index, val_index) in enumerate(cv.split(df)):\n df.loc[val_index, \"fold\"] = int(n)\n df[\"fold\"] = df[\"fold\"].astype(int)\n\n train_df = df[df[\"fold\"] != self.cfg.fold].reset_index(drop=True)\n valid_df = df[df[\"fold\"] == self.cfg.fold].reset_index(drop=True)\n return train_df, valid_df\n\n def setup(self, stage):\n self.test_df = self.get_test_df()\n train_df, valid_df = self.split_train_valid_df()\n self.train_df = train_df\n self.valid_df = valid_df\n\n def get_dataframe(self, phase):\n assert phase in {\"train\", \"valid\", \"test\"}\n if phase == \"train\":\n return self.train_df\n elif phase == \"valid\":\n return self.valid_df\n elif phase == \"test\":\n return self.test_df\n\n def get_ds(self, phase):\n assert phase in {\"train\", \"valid\", \"test\"}\n ds = TextDataset(\n df=self.get_dataframe(phase=phase),\n text_col=self.cfg.TEXT_COL,\n target_col=self.cfg.TARGET_COL,\n tokenizer_name=self.cfg.TOKENIZER_PATH,\n max_len=self.cfg.MAX_LEN,\n is_train=(phase != \"test\"),\n )\n return ds\n\n def get_loader(self, phase):\n dataset = self.get_ds(phase=phase)\n return DataLoader(\n dataset,\n batch_size=self.cfg.BATCH_SIZE,\n shuffle=True if phase == \"train\" else False,\n num_workers=4,\n drop_last=True if phase == \"train\" else False,\n )\n\n # Trainer.fit() 時に呼び出される\n def train_dataloader(self):\n return self.get_loader(phase=\"train\")\n\n # Trainer.fit() 時に呼び出される\n def val_dataloader(self):\n return self.get_loader(phase=\"valid\")\n\n def test_dataloader(self):\n return self.get_loader(phase=\"test\")\n",
"import os\nimport gc\nimport math\nimport random\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom transformers import AdamW\nfrom transformers import AutoTokenizer\nfrom transformers import AutoModel\nfrom transformers import AutoConfig\nfrom transformers import get_cosine_schedule_with_warmup\nfrom sklearn.model_selection import KFold\n\n\ngc.enable()\nNUM_FOLDS = 5\nNUM_EPOCHS = 3\nBATCH_SIZE = 8\nMAX_LEN = 248\nEVAL_SCHEDULE = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1., 1)]\nROBERTA_PATH = 'roberta-large'\nTOKENIZER_PATH = 'roberta-large'\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef set_random_seed(random_seed):\n random.seed(random_seed)\n np.random.seed(random_seed)\n os.environ[\"PYTHONHASHSEED\"] = str(random_seed)\n torch.manual_seed(random_seed)\n torch.cuda.manual_seed(random_seed)\n torch.cuda.manual_seed_all(random_seed)\n torch.backends.cudnn.deterministic = True\n\n\nclass LitDataset(Dataset):\n def __init__(self, df, inference_only=False):\n super().__init__()\n\n self.df = df\n self.inference_only = inference_only\n self.text = df.excerpt.tolist()\n #self.text = [text.replace(\"\\n\", \" \") for text in self.text]\n\n if not self.inference_only:\n self.target = torch.tensor(df.target.values, dtype=torch.float32)\n\n self.encoded = tokenizer.batch_encode_plus(\n self.text,\n padding = 'max_length',\n max_length = MAX_LEN,\n truncation = True,\n return_attention_mask=True\n )\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n input_ids = torch.tensor(self.encoded['input_ids'][index])\n attention_mask = torch.tensor(self.encoded['attention_mask'][index])\n\n if self.inference_only:\n return (input_ids, attention_mask)\n else:\n target = self.target[index]\n return (input_ids, attention_mask, target)\n\n\nclass LitModel(nn.Module):\n def __init__(self):\n super().__init__()\n\n config = AutoConfig.from_pretrained(ROBERTA_PATH)\n config.update({\"output_hidden_states\":True,\n \"hidden_dropout_prob\": 0.0,\n \"layer_norm_eps\": 1e-7})\n\n self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config) \n self.attention = nn.Sequential(\n nn.Linear(1024, 512),\n nn.Tanh(),\n nn.Linear(512, 1),\n nn.Softmax(dim=1)\n )\n\n self.regressor = nn.Sequential(\n nn.Linear(1024, 1) \n )\n\n def forward(self, input_ids, attention_mask):\n roberta_output = self.roberta(input_ids=input_ids,\n attention_mask=attention_mask) \n\n # There are a total of 13 layers of hidden states.\n # 1 for the embedding layer, and 12 for the 12 Roberta layers.\n # We take the hidden states from the last Roberta layer.\n last_layer_hidden_states = roberta_output.hidden_states[-1]\n\n # The number of cells is MAX_LEN.\n # The size of the hidden state of each cell is 768 (for roberta-base).\n # In order to condense hidden states of all cells to a context vector,\n # we compute a weighted average of the hidden states of all cells.\n # We compute the weight of each cell, using the attention neural network.\n weights = self.attention(last_layer_hidden_states)\n\n # weights.shape is BATCH_SIZE x MAX_LEN x 1\n # last_layer_hidden_states.shape is BATCH_SIZE x MAX_LEN x 768 \n # Now we compute context_vector as the weighted average.\n # context_vector.shape is BATCH_SIZE x 768\n context_vector = torch.sum(weights * last_layer_hidden_states, dim=1) \n\n # Now we reduce the context vector to the prediction score.\n return self.regressor(context_vector)\n\n\ndef eval_mse(model, data_loader):\n \"\"\"Evaluates the mean squared error of the |model| on |data_loader|\"\"\"\n model.eval()\n mse_sum = 0\n\n with torch.no_grad():\n for batch_num, (input_ids, attention_mask, target) in enumerate(data_loader):\n input_ids = input_ids.to(DEVICE)\n attention_mask = attention_mask.to(DEVICE)\n target = target.to(DEVICE)\n\n pred = model(input_ids, attention_mask)\n mse_sum += nn.MSELoss(reduction=\"sum\")(pred.flatten(), target).item()\n\n return mse_sum / len(data_loader.dataset)\n\n\ndef predict(model, data_loader):\n \"\"\"Returns an np.array with predictions of the |model| on |data_loader|\"\"\"\n model.eval()\n\n result = np.zeros(len(data_loader.dataset))\n index = 0\n\n with torch.no_grad():\n for batch_num, (input_ids, attention_mask) in enumerate(data_loader):\n input_ids = input_ids.to(DEVICE)\n attention_mask = attention_mask.to(DEVICE)\n\n pred = model(input_ids, attention_mask)\n\n result[index : index + pred.shape[0]] = pred.flatten().to(\"cpu\")\n index += pred.shape[0]\n\n return result\n\n\ndef train(model, model_path, train_loader, val_loader,\n optimizer, scheduler=None, num_epochs=NUM_EPOCHS):\n best_val_rmse = None\n best_epoch = 0\n step = 0\n last_eval_step = 0\n eval_period = EVAL_SCHEDULE[0][1]\n\n start = time.time()\n\n for epoch in range(num_epochs): \n val_rmse = None \n\n for batch_num, (input_ids, attention_mask, target) in enumerate(train_loader):\n input_ids = input_ids.to(DEVICE)\n attention_mask = attention_mask.to(DEVICE)\n target = target.to(DEVICE)\n\n optimizer.zero_grad()\n\n model.train()\n\n pred = model(input_ids, attention_mask)\n mse = nn.MSELoss(reduction=\"mean\")(pred.flatten(), target)\n mse.backward()\n\n optimizer.step()\n if scheduler:\n scheduler.step()\n\n if step >= last_eval_step + eval_period:\n # Evaluate the model on val_loader.\n elapsed_seconds = time.time() - start\n num_steps = step - last_eval_step\n print(f\"\\n{num_steps} steps took {elapsed_seconds:0.3} seconds\")\n last_eval_step = step\n\n val_rmse = math.sqrt(eval_mse(model, val_loader)) \n\n print(f\"Epoch: {epoch} batch_num: {batch_num}\",\n f\"val_rmse: {val_rmse:0.4}\")\n\n for rmse, period in EVAL_SCHEDULE:\n if val_rmse >= rmse:\n eval_period = period\n break\n\n if not best_val_rmse or val_rmse < best_val_rmse:\n best_val_rmse = val_rmse\n best_epoch = epoch\n torch.save(model.state_dict(), model_path)\n print(f\"New best_val_rmse: {best_val_rmse:0.4}\")\n else:\n print(f\"Still best_val_rmse: {best_val_rmse:0.4}\",\n f\"(from epoch {best_epoch})\")\n\n start = time.time()\n step += 1\n return best_val_rmse\n\n\ndef create_optimizer(model):\n named_parameters = list(model.named_parameters())\n\n roberta_parameters = named_parameters[:197]\n attention_parameters = named_parameters[199:203]\n regressor_parameters = named_parameters[203:]\n\n attention_group = [params for (name, params) in attention_parameters]\n regressor_group = [params for (name, params) in regressor_parameters]\n\n parameters = []\n parameters.append({\"params\": attention_group})\n parameters.append({\"params\": regressor_group})\n\n for layer_num, (name, params) in enumerate(roberta_parameters):\n weight_decay = 0.0 if \"bias\" in name else 0.01\n\n lr = 2e-5\n\n if layer_num >= 69:\n lr = 5e-5\n\n if layer_num >= 133:\n lr = 1e-4\n\n parameters.append({\"params\": params,\n \"weight_decay\": weight_decay,\n \"lr\": lr})\n\n return AdamW(parameters)\n\n\nif __name__ == '__main__':\n\n train_df = pd.read_csv(\"../input/commonlitreadabilityprize/train.csv\")\n # Remove incomplete entries if any.\n train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index,\n inplace=True)\n train_df.reset_index(drop=True, inplace=True)\n test_df = pd.read_csv(\"../input/commonlitreadabilityprize/test.csv\")\n submission_df = pd.read_csv(\"../input/commonlitreadabilityprize/sample_submission.csv\")\n\n tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)\n\n gc.collect()\n\n SEED = 1000\n list_val_rmse = []\n\n kfold = KFold(n_splits=NUM_FOLDS, random_state=SEED, shuffle=True)\n\n for fold, (train_indices, val_indices) in enumerate(kfold.split(train_df)): \n print(f\"\\nFold {fold + 1}/{NUM_FOLDS}\")\n model_path = f\"large_model_{fold + 1}.pth\"\n\n set_random_seed(SEED + fold)\n\n train_dataset = LitDataset(train_df.loc[train_indices]) \n val_dataset = LitDataset(train_df.loc[val_indices]) \n\n train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,\n drop_last=True, shuffle=True, num_workers=2) \n val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE,\n drop_last=False, shuffle=False, num_workers=2) \n\n set_random_seed(SEED + fold)\n model = LitModel().to(DEVICE)\n\n optimizer = create_optimizer(model) \n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_training_steps=NUM_EPOCHS * len(train_loader),\n num_warmup_steps=50)\n\n list_val_rmse.append(train(model, model_path, train_loader,\n val_loader, optimizer, scheduler=scheduler))\n\n del model\n gc.collect()\n\n print(\"\\nPerformance estimates:\")\n print(list_val_rmse)\n print(\"Mean:\", np.array(list_val_rmse).mean())\n\n test_dataset = LitDataset(test_df, inference_only=True)\n all_predictions = np.zeros((len(list_val_rmse), len(test_df)))\n\n test_dataset = LitDataset(test_df, inference_only=True)\n test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,\n drop_last=False, shuffle=False, num_workers=2)\n\n for index in range(len(list_val_rmse)): \n model_path = f\"large_model_{index + 1}.pth\"\n print(f\"\\nUsing {model_path}\")\n \n model = LitModel()\n model.load_state_dict(torch.load(model_path)) \n model.to(DEVICE)\n \n all_predictions[index] = predict(model, test_loader)\n \n del model\n gc.collect()\n\n predictions = all_predictions.mean(axis=0)\n submission_df.target = predictions\n print(submission_df)\n submission_df.to_csv(\"submission.csv\", index=False)\n"
] | [
[
"pandas.read_csv",
"torch.utils.data.DataLoader",
"sklearn.model_selection.KFold",
"torch.tensor"
],
[
"torch.nn.Softmax",
"pandas.read_csv",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.sum",
"sklearn.model_selection.KFold",
"torch.tensor",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"numpy.array",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
StephenHogg/jax | [
"5c9438864e64c8b02b0e13fce9759d8a8ed3d488"
] | [
"tests/api_test.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\nfrom contextlib import contextmanager\nimport copy\nfrom functools import partial\nimport unittest\nimport warnings\nimport weakref\n\nfrom absl import logging\nfrom absl.testing import absltest, parameterized\nimport numpy as onp\n\nimport concurrent.futures\n\nimport jax\nimport jax.numpy as np\nfrom jax import jit, grad, device_put, jacfwd, jacrev, hessian\nfrom jax import api, core, lax, lax_reference\nfrom jax.core import Primitive\nfrom jax.interpreters import ad\nfrom jax.interpreters import xla\nfrom jax.abstract_arrays import concretization_err_msg\nfrom jax.lib import xla_bridge as xb\nfrom jax import test_util as jtu\nfrom jax import tree_util\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n\nclass APITest(jtu.JaxTestCase):\n\n def test_grad_argnums(self):\n def f(x, y, z, flag=False):\n assert flag\n return 1.0 * x + 2.0 * y + 3.0 * z\n\n assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0\n assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0\n assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)\n\n def test_value_and_grad_argnums(self):\n def f(x, y, z, flag=False):\n assert flag\n return 1.0 * x + 2.0 * y + 3.0 * z\n\n y = f(1.0, 1.0, 1.0, flag=True)\n assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)\n assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)\n assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))\n\n def test_jit_static_args(self):\n side = []\n\n def f(x, y, z, flag=False, flag2=False):\n assert flag\n side.append(None)\n return 100*x + 10*y + z\n\n f1 = jit(f, static_argnums=(3, 4))\n assert f1(1, 2, 3, True, False) == 123\n assert len(side) == 1\n assert f1(2, 1, 3, True, False) == 213\n assert len(side) == 1\n assert f1(2, 1, 3, True, True) == 213\n assert len(side) == 2\n\n side[:] = []\n f2 = jit(f, static_argnums=(0, 2, 3, 4))\n assert f2(1, 2, 3, True, False) == 123\n assert len(side) == 1\n assert f2(1, 3, 3, True, False) == 133\n assert len(side) == 1\n assert f2(2, 2, 3, True, False) == 223\n assert len(side) == 2\n assert f2(2, 4, 3, True, False) == 243\n assert len(side) == 2\n assert f2(2, 4, 3, True, True) == 243\n assert len(side) == 3\n assert f2(2, 5, 3, True, True) == 253\n assert len(side) == 3\n\n def test_jit_kwargs(self):\n side = []\n\n def f(x, y, z):\n side.append(None)\n return 100*x + 10*y + z\n\n f = jit(f)\n assert f(1, 2, 3) == 123\n assert len(side) == 1\n assert f(1, 2, 3) == 123\n assert len(side) == 1\n\n assert f(1, 2, z=3) == 123\n assert len(side) == 2 # actually recompiles from kwarg\n assert f(1, 2, z=3) == 123\n assert len(side) == 2 # but should still cache\n\n f(1, 2, z=onp.zeros(3)) # doesn't crash\n\n def test_jit_many_args_tuples(self):\n @jit\n def f(args_list):\n return sum(args_list)\n\n make_tuple = xla.make_tuple\n\n counts = [0]\n def make_tuple_and_count(*args, **kwargs):\n counts[0] += 1\n return make_tuple(*args, **kwargs)\n\n try:\n xla.make_tuple = make_tuple_and_count\n ans = f(list(range(500)))\n finally:\n xla.make_tuple = make_tuple\n\n expected = sum(range(500))\n self.assertEqual(counts[0], 1) # formed a tuple on dispatch\n self.assertEqual(ans, expected) # computed the correct result\n\n def test_grad_of_jit(self):\n side = []\n\n @jit\n def f(x):\n side.append(None)\n return x * x\n\n assert grad(f)(1.0) == 2.0\n assert len(side) == 1\n assert grad(f)(2.0) == 4.0\n assert len(side) == 1\n\n def test_jit_of_grad(self):\n side = []\n\n @jit\n def f(x):\n side.append(None)\n return x * x\n\n g = jit(grad(f))\n assert g(1.0) == 2.0\n assert len(side) == 1\n assert g(2.0) == 4.0\n assert len(side) == 1\n\n def test_bad_input(self):\n def f(x):\n return x\n\n self.assertRaisesRegex(\n TypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\n lambda: grad(f)(\"foo\"))\n\n self.assertRaisesRegex(\n TypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\n lambda: jit(f)(\"foo\"))\n\n def test_grad_tuple_output(self):\n jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,\n \"Gradient only defined for scalar-output functions. \")\n\n def test_grad_unit_output(self):\n jtu.check_raises(lambda: grad(lambda x: ())(onp.zeros(3)), TypeError,\n \"Gradient only defined for scalar-output functions. \")\n\n def test_grad_nonscalar_output(self):\n jtu.check_raises(lambda: grad(lambda x: x)(onp.zeros(3)), TypeError,\n \"Gradient only defined for scalar-output functions. \")\n\n def test_unwrapped_numpy(self):\n def f(x):\n return onp.exp(x)\n\n jtu.check_raises(lambda: grad(f)(onp.zeros(3)), Exception,\n \"Tracer can't be used with raw numpy functions. \"\n \"You might have\\n import numpy as np\\ninstead of\\n\"\n \" import jax.numpy as np\")\n\n def test_binop_mismatch(self):\n def f(x, y):\n return x + y\n\n jtu.check_raises(\n lambda: f(np.zeros(3), np.zeros(4)),\n TypeError,\n \"add got incompatible shapes for broadcasting: (3,), (4,).\")\n\n jtu.check_raises(\n lambda: grad(f)(onp.zeros(3), onp.zeros(4)),\n TypeError,\n \"add got incompatible shapes for broadcasting: (3,), (4,).\")\n\n def test_dot_mismatch(self):\n def f(x, y):\n return np.dot(x, y)\n\n self.assertRaisesRegex(\n TypeError, \"Incompatible shapes for dot: got \\\\(3L?,\\\\) and \\\\(4L?,\\\\).\",\n lambda: grad(f)(onp.zeros(3), onp.zeros(4)))\n \n def test_abstract_error_message(self):\n for castfun in [float, complex, int]:\n def f(x):\n return castfun(x)\n\n self.assertRaisesRegex(\n TypeError,\n \"Try using `value.astype\\({}\\)` instead\".format(castfun.__name__),\n lambda: jit(f)(1.0))\n\n def test_switch_value_jit(self):\n def f(x):\n y = x > 0\n if y:\n return x\n else:\n return -x\n\n assert grad(f)(1.0) == 1.0\n assert grad(f)(-1.0) == -1.0\n jtu.check_raises(lambda: jit(f)(1), TypeError, concretization_err_msg(bool))\n\n def test_range_err(self):\n def f(x, n):\n for i in range(n):\n x = x + i\n return x\n\n assert jit(f, static_argnums=(1,))(0, 5) == 10\n self.assertRaisesRegex(\n TypeError,\n \"('JaxprTracer' object cannot be interpreted as an integer\"\n \"|Abstract value passed to .*)\",\n lambda: jit(f)(0, 5))\n\n def test_casts(self):\n for castfun in [float, complex, hex, oct, int]:\n f = lambda x: castfun(x)\n self.assertRaisesRegex(\n TypeError,\n \"('JaxprTracer' object cannot be interpreted as an integer\"\n \"|Abstract value passed to .*)\", lambda: jit(f)(0))\n\n def test_unimplemented_interpreter_rules(self):\n foo_p = Primitive('foo')\n def foo(x):\n return foo_p.bind(x)\n\n jtu.check_raises(lambda: foo(1.0), NotImplementedError,\n \"Evaluation rule for 'foo' not implemented\")\n\n jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,\n \"Abstract evaluation for 'foo' not implemented\")\n\n jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,\n \"Forward-mode differentiation rule for 'foo' not implemented\")\n\n foo_p.def_abstract_eval(lambda x: x)\n\n jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,\n \"XLA translation rule for primitive 'foo' not found\")\n\n foo_p.def_impl(lambda x: x)\n ad.defjvp(foo_p, lambda g, x: foo(g))\n\n jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,\n \"Reverse-mode differentiation rule for 'foo' not implemented\")\n\n def test_device_put_and_get(self):\n x = onp.arange(12.).reshape((3, 4)).astype(\"float32\")\n dx = api.device_put(x)\n self.assertIsInstance(dx, xla.DeviceArray)\n x2 = api.device_get(dx)\n self.assertIsInstance(x2, onp.ndarray)\n assert onp.all(x == x2)\n\n y = [x, (2 * x, 3 * x)]\n dy = api.device_put(y)\n y2 = api.device_get(dy)\n self.assertIsInstance(y2, list)\n self.assertIsInstance(y2[0], onp.ndarray)\n assert onp.all(y2[0] == x)\n self.assertIsInstance(y2[1], tuple)\n self.assertIsInstance(y2[1][0], onp.ndarray)\n assert onp.all(y2[1][0] == 2 * x)\n self.assertIsInstance(y2[1][1], onp.ndarray)\n assert onp.all(y2[1][1] == 3 * x)\n\n def test_device_put_across_devices(self):\n if xb.device_count() == 1:\n raise unittest.SkipTest(\"this test requires multiple devices\")\n d1, d2 = xb.local_devices()[:2]\n x = api.device_put(onp.array([1,2,3]), device=d1)\n self.assertEqual(x.device_buffer.device(), d1)\n y = api.device_put(x, device=d2)\n self.assertEqual(y.device_buffer.device(), d2)\n # Make sure these don't crash\n api.device_put(x)\n api.device_put(y)\n\n @jtu.skip_on_devices(\"cpu\")\n def test_device_put_across_platforms(self):\n default_device = jax.devices()[0]\n cpu_device = jax.devices(\"cpu\")[0]\n\n onp_arr = onp.array([1,2,3])\n scalar = 1\n device_arr = np.array([1,2,3])\n assert device_arr.device_buffer.device() is default_device\n\n for val in [onp_arr, device_arr, scalar]:\n x = api.device_put(val, device=cpu_device)\n self.assertEqual(x.device_buffer.device(), cpu_device)\n\n y = api.device_put(x)\n self.assertEqual(y.device_buffer.device(), default_device)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_jacobian(self):\n R = onp.random.RandomState(0).randn\n A = R(4, 3)\n x = R(3)\n\n f = lambda x: np.dot(A, x)\n assert onp.allclose(jacfwd(f)(x), A)\n assert onp.allclose(jacrev(f)(x), A)\n\n f = lambda x: np.tanh(np.dot(A, x))\n assert onp.allclose(jacfwd(f)(x), jacrev(f)(x))\n\n @jtu.skip_on_devices(\"tpu\")\n def test_hessian(self):\n R = onp.random.RandomState(0).randn\n A = R(4, 4)\n x = R(4)\n\n f = lambda x: np.dot(x, np.dot(A, x))\n assert onp.allclose(hessian(f)(x), A + A.T)\n\n def test_std_basis(self):\n basis = api._std_basis(np.zeros(3))\n assert getattr(basis, \"shape\", None) == (3, 3)\n assert onp.allclose(basis, onp.eye(3))\n\n basis = api._std_basis(np.zeros((3, 3)))\n assert getattr(basis, \"shape\", None) == (9, 3, 3)\n assert onp.allclose(basis, onp.eye(9).reshape(9, 3, 3))\n\n basis = api._std_basis([0., (np.zeros(3), np.zeros((3, 4)))])\n assert isinstance(basis, list) and len(basis) == 2\n assert getattr(basis[0], \"shape\", None) == (16,)\n assert isinstance(basis[1], tuple) and len(basis[1]) == 2\n assert getattr(basis[1][0], \"shape\", None) == (16, 3)\n assert getattr(basis[1][1], \"shape\", None) == (16, 3, 4)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_jacobian_on_pytrees(self):\n for jacfun in [jacfwd, jacrev]:\n ans = jacfun(lambda x, y: (x, y))(0., 1.)\n expected = (1., 0.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)\n expected = (0., 1.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)\n expected = ((1., 0.),\n (0., 1.),)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = jacfun(lambda x: x[:2])((1., 2., 3.))\n expected = ((1., 0., 0.),\n (0., 1., 0.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n R = onp.random.RandomState(0).randn\n x = R(2)\n y = R(3)\n ans = jacfun(lambda x, y: {'x': x, 'xy': np.outer(x, y)})(x, y)\n expected = {'x': onp.eye(2),\n 'xy': onp.kron(onp.eye(2), y[:, None]).reshape(2, 3, 2)}\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_hessian_on_pytrees(self):\n ans = hessian(lambda x: np.array(x)**2)((1., 2.))\n expected = ((onp.array([2., 0.]), onp.array([0., 0.])),\n (onp.array([0., 0.]), onp.array([0., 2.])))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_issue1372(self):\n def quad(x):\n return np.dot(x, x)\n\n def f(x, u):\n return quad(x) + quad(u)\n\n x, u = np.ones(5), np.ones(2)\n\n rev = jacrev\n fwd = jacfwd\n\n # Diagonal entries\n self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))\n self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))\n self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))\n self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))\n self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))\n\n # Off-diagonal entries by reverse-mode on the outside\n self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))\n self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))\n\n # Off-diagonal entries by forward-mode on the outside\n self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))\n self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))\n self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))\n\n def test_disable_jit(self):\n effects = []\n\n @api.jit\n def f(x):\n effects.append(1)\n return x\n\n with api.disable_jit():\n f(2)\n f(2)\n assert len(effects) == 2\n\n f(2)\n f(2)\n assert len(effects) == 3\n\n def test_large_device_constant(self):\n ans = jit(lambda x: 2 * x)(np.ones(int(2e6))) # doesn't crash\n self.assertAllClose(ans, onp.ones(int(2e6)) * 2., check_dtypes=False)\n\n def test_grad_and_aux_basic(self):\n g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)\n self.assertAllClose(g, grad(lambda x: x**3)(3.), check_dtypes=True)\n self.assertAllClose(aux, [9.], check_dtypes=False)\n\n def test_grad_and_aux_nested(self):\n def f(x):\n g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)\n return aux[0]\n\n f2 = lambda x: x**3\n\n self.assertEqual(grad(f)(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))\n\n def f(x):\n g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)\n return aux[0] * np.sin(x)\n\n f2 = lambda x: x**3 * np.sin(x)\n\n self.assertEqual(grad(f)(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))\n self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))\n\n def test_grad_and_aux_constant(self):\n g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)\n self.assertEqual(g, grad(lambda x: x**3)(4.))\n self.assertEqual(aux, [4.])\n\n g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)\n self.assertEqual(g, grad(lambda x: x**3)(4.))\n self.assertEqual(aux, [4.**2, 4.])\n\n def test_grad_and_aux_no_tracers(self):\n # see https://github.com/google/jax/issues/1950\n def f(x):\n aux = dict(identity=x, p1=x+1)\n return x ** 2, aux\n\n _, aux = jax.grad(f, has_aux=True)(3.)\n self.assertIsInstance(aux, dict)\n for val in aux.values():\n self.assertNotIsInstance(val, core.Tracer)\n\n def test_jvp_mismatched_arguments(self):\n self.assertRaisesRegex(\n TypeError,\n (\"primal and tangent arguments to jax.jvp must have the same tree \"\n \"structure\"),\n lambda: api.jvp(lambda x, y: x * y, (onp.float32(2),), ()))\n # If primals and tangents must both be tuples or both lists\n self.assertRaisesRegex(\n TypeError,\n (\"primal and tangent arguments to jax.jvp must have the same tree \"\n \"structure\"),\n lambda: api.jvp(lambda x, y: x * y, (onp.float32(2),), [onp.float32(2)]))\n self.assertRaisesRegex(\n TypeError,\n \"primal and tangent arguments to jax.jvp must have equal types\",\n lambda: api.jvp(lambda x: -x, (onp.float16(2),), (onp.float32(4),)))\n\n def test_jvp_non_tuple_arguments(self):\n def f(x, y): return x + y\n self.assertRaisesRegex(\n TypeError,\n \"primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.\",\n lambda: api.jvp(f, 0., (1.,)))\n self.assertRaisesRegex(\n TypeError,\n \"primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.\",\n lambda: api.jvp(f, (0.,), onp.array([1., 2.])))\n\n def test_vjp_mismatched_arguments(self):\n _, pullback = api.vjp(lambda x, y: x * y, onp.float32(3), onp.float32(4))\n self.assertRaisesRegex(\n TypeError,\n \"Tree structure of cotangent input.*does not match\",\n lambda: pullback((onp.float32(7), onp.float32(100))))\n self.assertRaisesRegex(\n TypeError,\n \"Type of cotangent input to vjp pullback.*does not match type\",\n lambda: pullback((onp.float16(42))))\n\n def test_jarrett_jvps(self):\n def f1(x):\n return np.sin(np.sin(np.sin(x)))\n f2 = api.jarrett(f1)\n\n for x in [3., onp.array([2., 3., 4.])]:\n self.assertAllClose(f1(x), f2(x), check_dtypes=True)\n\n _, f1_vjp = api.vjp(f1, x)\n _, f2_vjp = api.vjp(f2, x)\n self.assertAllClose(f1_vjp(x), f2_vjp(x), check_dtypes=True)\n\n # TODO(mattjj): test that constants/literals are set up properly\n # jaxpr2 = api.make_jaxpr(f2_vjp)(x)\n # assert len(jaxpr2.constvars) == 1\n\n def test_jarrett_jvps2(self):\n def f1(x, y):\n return np.sin(x) * np.cos(y) * np.sin(x) * np.cos(y)\n f2 = api.jarrett(f1)\n\n # TODO(mattjj): doesn't work for (3., onp.array([4., 5.]))\n for x, y in [(3., 4.), (onp.array([5., 6.]), onp.array([7., 8.]))]:\n self.assertAllClose(f1(x, y), f2(x, y), check_dtypes=True)\n\n _, f1_vjp = api.vjp(f1, x, y)\n _, f2_vjp = api.vjp(f2, x, y)\n self.assertAllClose(f1_vjp(y), f2_vjp(y), check_dtypes=True)\n\n # TODO(mattjj): test that constants/literals are set up properly\n # jaxpr2 = api.make_jaxpr(f2_vjp)(y)\n # assert len(jaxpr2.constvars) == 2\n\n def test_jvp_jit_cached(self):\n \"\"\"Bug in caching in presence of JVP and JIT.\"\"\"\n\n def func(x):\n def inner(y):\n return y * x\n\n # Must have two calls to the inner jit (the second one hits the cache)\n res1 = api.jit(inner)(4.)\n res2 = api.jit(inner)(5.)\n return res1 + res2\n\n self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)), check_dtypes=True)\n\n\n def test_complex_grad_raises_error(self):\n self.assertRaises(TypeError, lambda: grad(lambda x: np.sin(x))(1 + 2j))\n\n def test_holomorphic_grad(self):\n out = grad(lambda x: np.sin(x), holomorphic=True)(1 + 2j)\n expected = 2.0327230070196656 - 3.0518977991518j\n self.assertAllClose(out, expected, check_dtypes=False)\n\n def test_nonholomorphic_grad(self):\n zs = 0.5j * onp.arange(5) + onp.arange(5)\n\n def f(z):\n return np.sum(np.cos(np.abs(z)))\n\n ans = grad(f)(zs)\n expected = onp.array([ 0. +0.j,\n -0.80430663+0.40215331j,\n -0.70368982+0.35184491j,\n 0.1886467 -0.09432335j,\n 0.86873727-0.43436864j])\n self.assertAllClose(ans, expected, check_dtypes=False,\n atol=jtu.default_gradient_tolerance,\n rtol=jtu.default_gradient_tolerance)\n\n def test_complex_output_jacrev_raises_error(self):\n self.assertRaises(TypeError, lambda: jacrev(lambda x: np.sin(x))(1 + 2j))\n\n def test_nonholomorphic_jacrev(self):\n # code based on https://github.com/google/jax/issues/603\n zs = 0.5j * onp.arange(5) + onp.arange(5)\n\n def f(z):\n return np.cos(np.linalg.norm(2 * z))\n\n ans = jacrev(f)(zs)\n expected = grad(f)(zs)\n self.assertAllClose(ans, expected, check_dtypes=True)\n\n def test_complex_input_jacfwd_raises_error(self):\n self.assertRaises(TypeError, lambda: jacfwd(lambda x: np.sin(x))(1 + 2j))\n\n def test_defvjp_all(self):\n foo_p = Primitive('foo')\n def foo(x): return 2. * foo_p.bind(x)\n\n ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (4 * g * np.sin(x),)))\n val_ans, grad_ans = api.value_and_grad(foo)(3.)\n self.assertAllClose(val_ans, 2 * 3.**2, check_dtypes=False)\n self.assertAllClose(grad_ans, 4 * 2 * onp.sin(3.), check_dtypes=False)\n\n def test_defvjp_all_const(self):\n foo_p = Primitive('foo')\n def foo(x): return foo_p.bind(x)\n\n ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (12.,)))\n val_ans, grad_ans = api.value_and_grad(foo)(3.)\n self.assertAllClose(val_ans, 9., check_dtypes=False)\n self.assertAllClose(grad_ans, 12., check_dtypes=True)\n\n def test_defvjp_all_higher_order_revmode(self):\n foo_p = Primitive('foo')\n def foo(x): return 2. * foo_p.bind(x)\n\n ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (g * x ** 2,)))\n ans = api.grad(api.grad(foo))(3.)\n self.assertAllClose(ans, 2 * 2 * 3., check_dtypes=False)\n\n def test_defvjp_all_multiple_arguments(self):\n # also tests passing in symbolic zero tangents b/c we differentiate wrt only\n # the first argument in one case\n\n foo_p = Primitive('foo')\n def foo(x, y): return foo_p.bind(x, y)\n\n def vjpfun(x, y):\n out = x**2 + y**3\n vjp = lambda g: (g + x + y, g * x * 9.)\n return out, vjp\n\n ad.defvjp_all(foo_p, vjpfun)\n val_ans, grad_ans = api.value_and_grad(foo)(3., 4.)\n self.assertAllClose(val_ans, 3.**2 + 4.**3, check_dtypes=False)\n self.assertAllClose(grad_ans, 1. + 3. + 4., check_dtypes=False)\n\n ans = api.grad(foo, (0, 1))(3., 4.)\n self.assertAllClose(ans, (1. + 3. + 4., 1. * 3. * 9.), check_dtypes=False)\n\n def test_defvjp_all(self):\n @api.custom_transforms\n def foo(x):\n return np.sin(x)\n\n api.defvjp_all(foo, lambda x: (np.sin(x), lambda g: (g * x,)))\n val_ans, grad_ans = api.value_and_grad(foo)(3.)\n self.assertAllClose(val_ans, onp.sin(3.), check_dtypes=False)\n self.assertAllClose(grad_ans, 3., check_dtypes=False)\n\n # TODO(mattjj): add defvjp_all test with pytree arguments\n\n def test_defvjp(self):\n @api.custom_transforms\n def foo(x, y):\n return np.sin(x * y)\n\n api.defvjp(foo, None, lambda g, _, x, y: g * x * y)\n val_ans, grad_ans = api.value_and_grad(foo)(3., 4.)\n self.assertAllClose(val_ans, onp.sin(3. * 4.), check_dtypes=False)\n self.assertAllClose(grad_ans, 0., check_dtypes=False)\n\n ans_0, ans_1 = api.grad(foo, (0, 1))(3., 4.)\n self.assertAllClose(ans_0, 0., check_dtypes=False)\n self.assertAllClose(ans_1, 3. * 4., check_dtypes=False)\n\n def test_defvjp_higher_order(self):\n @api.custom_transforms\n def foo(x):\n return np.sin(2. * x)\n\n api.defvjp(foo, lambda g, _, x: g * np.cos(x))\n ans = api.grad(api.grad(foo))(2.)\n expected = api.grad(api.grad(np.sin))(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_defvjp_use_ans(self):\n @api.custom_transforms\n def foo(x, y):\n return np.sin(x * y)\n\n api.defvjp(foo, None, lambda g, ans, x, y: g * x * y + np.cos(ans))\n val_ans, grad_ans = api.value_and_grad(foo, 1)(3., 4.)\n self.assertAllClose(val_ans, onp.sin(3. * 4.), check_dtypes=False)\n self.assertAllClose(grad_ans, 3. * 4. + onp.cos(onp.sin(3. * 4)),\n check_dtypes=False)\n\n # TODO\n # def test_defjvp_closure_error(self):\n # def foo(x):\n # @api.custom_transforms\n # def bar(y):\n # return x * y\n\n # api.defjvp(bar, lambda y_dot, ans, y: x * y)\n # return bar(x)\n # jtu.check_raises(\n # lambda: api.jvp(foo, (1.,), (1.,)), ValueError,\n # \"Detected differentiation with respect to closed-over values with \"\n # \"custom JVP rule, which isn't supported.\")\n\n # TODO\n # def test_defvjp_closure_error(self):\n # def foo(x):\n # @api.custom_transforms\n # def bar(y):\n # return x * y\n\n # api.defvjp(bar, lambda g, ans, y: x * y)\n # return bar(x)\n # jtu.check_raises(\n # lambda: grad(foo)(1.,), ValueError,\n # \"Detected differentiation w.r.t. variables from outside \"\n # \"the scope of <jax.custom_transforms function bar>, but defvjp and \"\n # \"defvjp_all only support differentiation w.r.t. positional arguments.\")\n\n def test_custom_transforms_eval_with_pytrees(self):\n @api.custom_transforms\n def f(x):\n a, b = x[0], x[1]\n return {'hi': 2 * a, 'bye': 2 * b}\n\n ans = f((1, 2))\n self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})\n\n def test_custom_transforms_jit_with_pytrees(self):\n @api.custom_transforms\n def f(x):\n a, b = x[0], x[1]\n return {'hi': 2 * a, 'bye': 2 * b}\n\n ans = jit(f)((1, 2))\n self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})\n\n def test_custom_transforms_jit_with_pytrees_consts(self):\n # The purpose of this test is to exercise the custom_transforms default\n # translation rule in how it deals with constants that are too large to be\n # treated as literals (at the time of writing).\n z = onp.arange(10.)\n\n @api.custom_transforms\n def f(x):\n a, b = x[0], x[1]\n return {'hi': 2 * a, 'bye': z * b}\n\n ans = jit(f)((1, 2))\n self.assertAllClose(ans, {'hi': 2 * 1, 'bye': z * 2}, check_dtypes=False)\n\n def test_custom_transforms_jvp_with_pytrees(self):\n @api.custom_transforms\n def f(x):\n a, b = x[0], x[1]\n return {'hi': 2 * a, 'bye': 2 * b}\n\n ans, out_tangent = api.jvp(f, ((1, 2),), ((3, 4),))\n self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})\n self.assertEqual(out_tangent, {'hi': 2 * 3, 'bye': 2 * 4})\n\n def test_custom_transforms_vmap_with_pytrees(self):\n @api.custom_transforms\n def f(x):\n a, b = x[0], x[1]\n return {'hi': 2 * a, 'bye': 2 * b}\n\n ans = api.vmap(f)((onp.arange(3), onp.ones((3, 2))))\n expected = {'hi': 2 * onp.arange(3), 'bye': 2 * onp.ones((3, 2))}\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_custom_transforms_jvp_with_closure(self):\n def f(x):\n @api.custom_transforms\n def g(y):\n return x * y\n return g(x)\n\n ans = api.grad(f)(1.)\n expected = 2.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_custom_gradient(self):\n @api.custom_gradient\n def f(x):\n return x ** 2, lambda g: (g * x,)\n\n self.assertAllClose(f(3.), 9., check_dtypes=False)\n self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)\n\n def test_legacy_devicearray_repr(self):\n dx = device_put(3.)\n str(dx.item()) # doesn't crash\n\n def test_devicearray_repr(self):\n x = device_put(np.zeros(3))\n self.assertIsInstance(x, xla.DeviceArray)\n repr(x) # doesn't crash\n\n x = device_put(np.ones(3) + 1j * np.ones(3))\n self.assertIsInstance(x, xla.DeviceArray)\n repr(x) # doesn't crash\n\n def test_devicearray_delete(self):\n x = device_put(1.)\n x.delete()\n self.assertRaisesRegex(ValueError, \"DeviceValue has been deleted.\",\n lambda: repr(x))\n\n def test_devicearray_block_until_ready(self):\n x = device_put(1.)\n y = x.block_until_ready()\n # Tests mostly that block_until_ready() does not produce an error.\n self.assertTrue(y is x)\n\n def test_namedtuple_transparency(self):\n # See https://github.com/google/jax/issues/446\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n\n def f(pt):\n return np.sqrt(pt.x ** 2 + pt.y ** 2)\n\n pt = Point(1., 2.)\n\n f(pt) # doesn't crash\n g = api.grad(f)(pt)\n self.assertIsInstance(g, Point)\n\n f_jit = api.jit(f)\n self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)\n\n def test_namedtuple_subclass_transparency(self):\n # See https://github.com/google/jax/issues/806\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n\n class ZeroPoint(Point):\n def is_zero(self):\n return (self.x == 0) and (self.y == 0)\n\n pt = ZeroPoint(0., 0.)\n\n def f(pt):\n return 0. if pt.is_zero() else np.sqrt(pt.x ** 2 + pt.y ** 2)\n\n f(pt) # doesn't crash\n g = api.grad(f)(pt)\n self.assertIsInstance(pt, ZeroPoint)\n\n def test_eval_shape(self):\n def fun(x, y):\n return np.tanh(np.dot(x, y) + 3.)\n\n x = np.ones((2, 3))\n y = np.ones((3, 4))\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.shape, (2, 4))\n\n def test_eval_shape_constants(self):\n def fun():\n x = np.ones((2, 3))\n y = np.ones((3, 4))\n return np.tanh(np.dot(x, y) + 3.)\n\n out_shape = api.eval_shape(fun)\n\n self.assertEqual(out_shape.shape, (2, 4))\n\n def test_eval_shape_tuple_unpacking(self):\n def fun(x, y):\n a, b = x\n return a + b + y\n\n x = (np.ones(2), np.ones(2))\n y = 3.\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.shape, (2,))\n\n def test_eval_shape_tuple_itemgetting(self):\n def fun(x, y):\n return x[0] + x[1] + y\n\n x = (np.ones(2), np.ones(2))\n y = 3.\n out_shape = api.eval_shape(fun, x, y)\n\n self.assertEqual(out_shape.shape, (2,))\n\n def test_eval_shape_output_dict(self):\n def fun(x, y):\n return {'hi': x[0] + x[1] + y}\n\n x = (np.ones(2), np.ones(2))\n y = 3.\n out_shape = api.eval_shape(fun, x, y)\n out_shape = tree_util.tree_map(onp.shape, out_shape)\n\n self.assertEqual(out_shape, {'hi': (2,)})\n\n def test_eval_shape_shape_error(self):\n def fun(x, y):\n return np.tanh(np.dot(x, y) + 3.)\n\n x = np.ones((3, 3))\n y = np.ones((4, 4))\n\n self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))\n\n def test_eval_shape_duck_typing(self):\n def fun(A, b, x):\n return np.dot(A, x) + b\n\n class MyArgArray(object):\n def __init__(self, shape, dtype):\n self.shape = shape\n self.dtype = dtype\n\n A = MyArgArray((3, 4), np.float32)\n b = MyArgArray((5,), np.float32)\n x = MyArgArray((4, 5), np.float32)\n out_shape = api.eval_shape(fun, A, b, x)\n\n self.assertEqual(out_shape.shape, (3, 5))\n\n def test_issue_871(self):\n T = np.array([[1., 2.], [3., 4.], [5., 6.]])\n x = np.array([1, 2, 3])\n\n y, f_jvp = api.linearize(np.sum, x)\n jtu.check_raises(lambda: f_jvp(T), ValueError,\n (\"linearized function called on tangent values \"\n \"inconsistent with the original primal values.\"))\n\n y, f_jvp = api.linearize(api.jit(np.sum), x)\n jtu.check_raises(lambda: f_jvp(T), ValueError,\n (\"linearized function called on tangent values \"\n \"inconsistent with the original primal values.\"))\n\n def test_partial_eval_lower(self):\n # this is a simplified model of a bug that arose when we first used @jit in\n # a jvp rule. it's in this file because we want to use make_jaxpr.\n\n # NOTE(mattjj): I no longer understand what this was meant to test. My guess\n # is it was related to staging out the broadcast into a jaxpr to be\n # transposed, but after #1749 that's no longer a problem. After changing\n # make_jaxpr (and jit) to stage out sub-calls fully, this test started to\n # fail; I left it in as skipped because deleting tests feels wrong.\n raise unittest.SkipTest(\"obsolete test\")\n\n @api.jit\n def f(a, b, c):\n a = lax.broadcast(a, (2,))\n return lax.select(a, b, c)\n\n a = onp.ones((3, 3), dtype=onp.bool_)\n b = onp.ones((2, 3, 3))\n c = onp.ones((2, 3, 3))\n\n jaxpr = api.make_jaxpr(lambda b, c: f(a, b, c))(b, c)\n subjaxpr = next(eqn.bound_subjaxpr for eqn in jaxpr.jaxpr.eqns\n if eqn.bound_subjaxpr)\n self.assertEqual(len(subjaxpr.eqns), 1)\n\n def test_grad_of_int_errors(self):\n dfn = grad(lambda x: x ** 2)\n self.assertRaisesRegex(\n TypeError,\n \"Primal inputs to reverse-mode differentiation must be of float or \"\n \"complex type, got type int..\", lambda: dfn(3))\n\n def test_xla_computation(self):\n # these tests basically check the examples in the xla_computation docstring\n\n def h(x):\n return np.sin(np.cos(x))\n c = api.xla_computation(h)(2.)\n self.assertIn('cosine', c.GetHloText())\n self.assertIn('sine', c.GetHloText())\n\n def f(x):\n return x - lax.psum(x, 'i')\n axis_env = [('i', 4)]\n c = api.xla_computation(f, axis_env=axis_env)(2)\n self.assertIn('all-reduce', c.GetHloText())\n self.assertIn('replica_groups={{0,1,2,3}}', c.GetHloText())\n\n def g(x):\n rowsum = lax.psum(x, 'i')\n colsum = lax.psum(x, 'j')\n allsum = lax.psum(x, ('i', 'j'))\n return rowsum, colsum, allsum\n axis_env = [('i', 4), ('j', 2)]\n c = api.xla_computation(g, axis_env=axis_env)(5.)\n self.assertIn('all-reduce', c.GetHloText())\n self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.GetHloText())\n self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.GetHloText())\n self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.GetHloText())\n\n def test_xla_computation_args(self):\n def foo(x, y, z):\n return x + y + z\n\n c = api.xla_computation(foo)(1., 2., 3.)\n self.assertEqual(len(c.GetProgramShape().parameter_shapes()), 3)\n\n c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)\n param_shapes = c.GetProgramShape().parameter_shapes()\n self.assertEqual(len(param_shapes), 1)\n self.assertEqual(param_shapes[0].xla_element_type(),\n xb.xla_client.PrimitiveType.TUPLE)\n\n def test_staging_out_multi_replica(self):\n def f(x):\n return api.pmap(np.mean)(x)\n xla_comp = api.xla_computation(f)\n xla_comp(np.arange(8)).GetHloText() # doesn't crash\n\n def test_xla_computation_instantiate_constant_outputs(self):\n def f():\n return np.zeros((3, 4))\n\n xla_comp = api.xla_computation(f, instantiate_const_outputs=True)()\n out_shape, = xla_comp.GetReturnValueShape().tuple_shapes()\n self.assertEqual(out_shape.dimensions(), (3, 4))\n\n def test_jit_device(self):\n device = xb.devices()[-1]\n x = api.jit(lambda x: x, device=device)(3.)\n self.assertIsInstance(x, xla.DeviceArray)\n self.assertEqual(x.device_buffer.device(), device)\n\n def test_jit_of_noncallable(self):\n self.assertRaisesRegex(TypeError, \"Expected a callable value.*\",\n lambda: api.jit(3))\n\n def test_issue_1062(self):\n # code from https://github.com/google/jax/issues/1062 @shoyer\n # this tests, among other things, whether ShardedDeviceTuple constants work\n device_count = xb.device_count()\n\n @jit\n def multi_step(state, count):\n return lax.fori_loop(0, count, lambda i, s: s, state)\n\n @jit\n def multi_step_pmap(state, count=2):\n @partial(api.pmap, axis_name='x')\n def pmapped_multi_step(state):\n return multi_step(state, count)\n\n return pmapped_multi_step(state)\n\n u = np.ones((device_count, 100))\n u_final = multi_step_pmap(u) # doesn't crash\n\n def test_concurrent_device_get_and_put(self):\n def f(x):\n for _ in range(100):\n y = jax.device_put(x)\n x = jax.device_get(y)\n return x\n\n xs = [onp.random.randn(i) for i in range(10)]\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = [executor.submit(partial(f, x)) for x in xs]\n ys = [f.result() for f in futures]\n for x, y in zip(xs, ys):\n self.assertAllClose(x, y, check_dtypes=True)\n\n def test_concurrent_jit(self):\n @jit\n def f(x):\n return x + x - 3.\n\n xs = [onp.random.randn(i) for i in range(10)]\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = [executor.submit(partial(f, x)) for x in xs]\n ys = [f.result() for f in futures]\n for x, y in zip(xs, ys):\n self.assertAllClose(x * 2 - 3., y, check_dtypes=True)\n\n def test_dtype_warning(self):\n # cf. issue #1230\n if FLAGS.jax_enable_x64:\n return # test only applies when x64 is disabled\n\n def check_warning(warn, nowarn):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n nowarn() # get rid of extra startup warning\n\n prev_len = len(w)\n nowarn()\n assert len(w) == prev_len\n\n warn()\n assert len(w) > 0\n msg = str(w[-1].message)\n expected_prefix = \"Explicitly requested dtype \"\n self.assertEqual(expected_prefix, msg[:len(expected_prefix)])\n\n prev_len = len(w)\n nowarn()\n assert len(w) == prev_len\n\n check_warning(lambda: np.array([1, 2, 3], dtype=\"float64\"),\n lambda: np.array([1, 2, 3], dtype=\"float32\"),)\n check_warning(lambda: np.ones(3, dtype=onp.float64),\n lambda: np.ones(3))\n check_warning(lambda: np.ones_like(3, dtype=onp.int64),\n lambda: np.ones_like(3, dtype=onp.int32))\n check_warning(lambda: np.zeros(3, dtype=\"int64\"),\n lambda: np.zeros(3, dtype=\"int32\"))\n check_warning(lambda: np.zeros_like(3, dtype=\"float64\"),\n lambda: np.zeros_like(3, dtype=\"float32\"))\n check_warning(lambda: np.full((2, 3), 1, dtype=\"int64\"),\n lambda: np.full((2, 3), 1))\n check_warning(lambda: np.ones(3).astype(\"float64\"),\n lambda: np.ones(3).astype(\"float32\"))\n check_warning(lambda: np.eye(3, dtype=onp.float64),\n lambda: np.eye(3))\n check_warning(lambda: np.arange(3, dtype=onp.float64),\n lambda: np.arange(3, dtype=onp.float32))\n check_warning(lambda: np.linspace(0, 3, dtype=onp.float64),\n lambda: np.linspace(0, 3, dtype=onp.float32))\n check_warning(lambda: np.tri(2, dtype=\"float64\"),\n lambda: np.tri(2, dtype=\"float32\"))\n\n def test_custom_vjp_zeros(self):\n @api.custom_transforms\n def f(x, y):\n return 2 * x, 3 * y\n\n def f_vjp(x, y):\n return (2 * x, 3 * y), lambda ts: (4 * ts[0], 5 * ts[1])\n\n api.defvjp_all(f, f_vjp, )\n api.grad(lambda x, y: f(x, y)[0])(1., 2.) # doesn't crash\n\n def test_custom_transforms_vjp_nones(self):\n # issue rasied by jsnoek@ and jumper@\n @jax.custom_transforms\n def solve(a, b):\n return np.dot(np.linalg.inv(a), b)\n # print(solve(a, b))\n\n def solve_vjp(a, b):\n x = solve(a, b)\n def vjp(x_tangent):\n dx = np.dot(solve(a, x_tangent), x.T)\n out = (dx, b * 0.)\n return out\n return x, vjp\n jax.defvjp_all(solve, solve_vjp)\n gf = grad(lambda a,b: np.sum(solve(a, b)))\n\n n = 3\n a_in = np.linspace(0, 1, n)[:, None]\n a = np.dot(a_in, a_in.T) + np.eye(n) * 0.1\n real_x = onp.random.RandomState(0).randn(n)\n b = np.dot(a + np.eye(a.shape[0]), real_x)\n print(gf(a, b)) # doesn't crash\n\n def test_vmap_in_axes_tree_prefix_error(self):\n # https://github.com/google/jax/issues/795\n self.assertRaisesRegex(\n ValueError,\n \"axes specification must be a tree prefix of the corresponding \"\n r\"value, got specification \\(0, 0\\) for value \"\n r\"PyTreeDef\\(tuple, \\[\\*\\]\\).\",\n lambda: api.vmap(lambda x: x, in_axes=(0, 0))(np.ones(3))\n )\n\n def test_vmap_unbatched_object_passthrough_issue_183(self):\n # https://github.com/google/jax/issues/183\n fun = lambda f, x: f(x)\n vfun = api.vmap(fun, (None, 0))\n ans = vfun(lambda x: x + 1, np.arange(3))\n self.assertAllClose(ans, onp.arange(1, 4), check_dtypes=False)\n\n def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):\n # https://github.com/google/jax/issues/705\n def h(a, b):\n return np.sum(a) + np.sum(b)\n\n X = onp.random.randn(10, 4)\n U = onp.random.randn(10, 2)\n\n self.assertRaisesRegex(\n ValueError,\n \"vmap got inconsistent sizes for array axes to be mapped:\\n\"\n r\"arg 0 has shape \\(10, 4\\) and axis 0 is to be mapped\" \"\\n\"\n r\"arg 1 has shape \\(10, 2\\) and axis 1 is to be mapped\" \"\\n\"\n \"so\\n\"\n \"arg 0 has an axis to be mapped of size 10\\n\"\n \"arg 1 has an axis to be mapped of size 2\",\n lambda: api.vmap(h, in_axes=(0, 1))(X, U))\n\n self.assertRaisesRegex(\n ValueError,\n \"vmap got inconsistent sizes for array axes to be mapped:\\n\"\n r\"arg 0 has shape \\(10, 4\\) and axis 0 is to be mapped\" \"\\n\"\n r\"arg 1 has shape \\(10, 2\\) and axis 1 is to be mapped\" \"\\n\"\n r\"arg 2 has shape \\(10, 4\\) and axis 0 is to be mapped\" \"\\n\"\n \"so\\n\"\n \"args 0, 2 have axes to be mapped of size 10\\n\"\n \"arg 1 has an axis to be mapped of size 2\",\n lambda: api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X))\n\n self.assertRaisesRegex(\n ValueError,\n \"vmap got inconsistent sizes for array axes to be mapped:\\n\"\n \"the tree of axis sizes is:\\n\"\n r\"\\(10, \\[2, 2\\]\\)\",\n lambda: api.vmap(h, in_axes=(0, 1))(X, [U, U]))\n\n def test_vmap_structured_in_axes(self):\n\n A, B, C, D = 2, 3, 4, 5\n K = 6 # batch size\n x = onp.ones((K, A, B)) # batch axis in different locations\n y = onp.ones((B, K, C))\n z = onp.ones((C, D, K))\n\n def foo(tree_arg):\n x, (y, z) = tree_arg\n return np.dot(x, np.dot(y, z))\n\n tree = (x, (y, z))\n vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n tree = (x, Point(y, z))\n vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n def foo(tree_arg):\n x, dct = tree_arg\n y, z = dct['a'], dct['b']\n return np.dot(x, np.dot(y, z))\n\n tree = (x, {'a':y, 'b':z})\n vfoo = api.vmap(foo, in_axes=((0, {'a':1, 'b':2}),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n tree = (x, collections.OrderedDict([('a', y), ('b', z)]))\n vfoo = api.vmap(\n foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))\n self.assertEqual(vfoo(tree).shape, (6, 2, 5))\n\n def test_jit_reference_dropping(self):\n x = onp.ones(10)\n f = (lambda x: lambda: x)(x) # reference to x in f's closure\n g = jit(f)\n x = weakref.ref(x) # no more strong ref to x in this scope\n assert x() is not None # x is still around\n f() # f runs\n g() # g runs\n g() # g runs a second time\n del f # delete the raw callable\n assert x() is not None # x is still around\n g() # g still runs\n del g # no more references to x\n assert x() is None # x is gone\n\n def test_jit_global_cache(self):\n def f(x):\n assert python_should_be_executing\n return x\n\n python_should_be_executing = True\n api.jit(f)(2)\n python_should_be_executing = False\n api.jit(f)(3)\n\n def test_jit_shallow_copy(self):\n def f(x):\n return copy.copy(x)\n api.jit(f)(1)\n\n def test_jit_deep_copy(self):\n def f(x):\n return copy.deepcopy(x)\n api.jit(f)(1)\n\n def test_pmap_global_cache(self):\n def f(x):\n assert python_should_be_executing\n return x\n\n x = onp.ones(1)\n\n python_should_be_executing = True\n api.pmap(f)(x)\n python_should_be_executing = False\n api.pmap(f)(x)\n\n python_should_be_executing = True\n api.pmap(f, 'i')(x)\n python_should_be_executing = False\n api.pmap(f, 'i')(x)\n\n def test_device_array_repr(self):\n rep = repr(np.ones(()) + 1.)\n self.assertStartsWith(rep, 'DeviceArray')\n\n def test_grad_without_enough_args_error_message(self):\n # https://github.com/google/jax/issues/1696\n def f(x, y): return x + y\n df = api.grad(f, argnums=0)\n self.assertRaisesRegex(\n TypeError,\n \"differentiating with respect to argnums=0 requires at least 1 \"\n \"positional arguments to be passed by the caller, but got only 0 \"\n \"positional arguments.\",\n lambda: partial(df, x=0.)(y=1.))\n\n def test_grad_of_jit_compilation_caching(self):\n if not hasattr(self, \"assertLogs\"):\n raise unittest.SkipTest(\"test requires assertLogs (python 3)\")\n\n lax.add(1, 2) # make sure some initial warnings are already printed\n\n sin = api.jit(np.sin)\n\n prev_level = logging.get_verbosity()\n try:\n logging.set_verbosity('DEBUG')\n with self.assertLogs(level=logging.DEBUG) as l:\n ans1 = api.grad(sin)(2.)\n ans2 = api.grad(sin)(3.)\n finally:\n logging.set_verbosity(prev_level)\n self.assertLen(l.output, 2)\n\n self.assertAllClose(ans1, onp.cos(2.), check_dtypes=False)\n self.assertAllClose(ans2, onp.cos(3.), check_dtypes=False)\n\n def test_remat_basic(self):\n @api.remat\n def g(x):\n return lax.sin(lax.sin(x)), 3.\n\n def f(x):\n x, _ = g(x)\n return x\n\n ans = f(2.)\n expected = onp.sin(onp.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans, f_lin = api.linearize(f, 2.)\n expected = onp.sin(onp.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = f_lin(3.)\n expected = onp.cos(onp.sin(2.)) * onp.cos(2.) * 3.\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n sin_calls = []\n cos_calls = []\n sin_impl = lax.sin_p.impl\n cos_impl = lax.cos_p.impl\n try:\n lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))\n lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))\n f_lin(3.)\n finally:\n lax.sin_p.def_impl(sin_impl)\n lax.cos_p.def_impl(cos_impl)\n self.assertEqual(len(sin_calls), 1)\n self.assertEqual(len(cos_calls), 2)\n\n def test_remat_freevars(self):\n def f1(x):\n y = 2 * np.sin(x)\n z = np.cos(x) * np.sin(y)\n return z\n\n def f2(x):\n y = 2 * np.sin(x)\n z = api.remat(lambda x: np.cos(x) * np.sin(y))(x)\n return z\n\n ans, f_lin = api.linearize(f2, 2.)\n expected, f_lin_expected = api.linearize(f1, 2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = f_lin(3.)\n expected = f_lin_expected(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_grad_python_control_flow(self):\n @partial(api.remat, concrete=True)\n def g(x):\n if x > 0:\n return lax.sin(x), 3.\n else:\n return lax.cos(x), 4.\n\n def f(x):\n x, _ = g(x)\n return x\n\n ans = f(2.)\n expected = onp.sin(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(f)(2.)\n expected = onp.cos(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_jit(self):\n @api.remat\n def g(x):\n return lax.sin(lax.sin(x))\n\n def f_(x):\n return g(x)\n f = api.jit(f_)\n\n ans = f(2.)\n expected = onp.sin(onp.sin(2.))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(f)(2.)\n expected = onp.cos(onp.sin(2.)) * onp.cos(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jit(api.grad(f_))(2.)\n expected = onp.cos(onp.sin(2.)) * onp.cos(2.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_vmap(self):\n @api.remat\n def g(x):\n return lax.sin(lax.sin(x))\n\n x = onp.arange(3.)\n\n ans = api.vmap(g)(x)\n expected = onp.sin(onp.sin(x))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jacfwd(g)(x)\n expected = onp.diag(onp.cos(onp.sin(x)) * onp.cos(x))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.jacrev(g)(x)\n expected = onp.diag(onp.cos(onp.sin(x)) * onp.cos(x))\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_higher_order_autodiff(self):\n def f(x):\n return lax.cos(lax.sin(x))\n g = api.remat(f)\n\n ans = api.grad(api.grad(g))(3.)\n expected = api.grad(api.grad(f))(3.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def test_remat_scan(self):\n to_scan = lambda c, x: (np.sin(c), None)\n\n def f_noremat(x):\n y, _ = lax.scan(to_scan, x, onp.arange(3.))\n return y\n\n def f_yesremat(x):\n y, _ = lax.scan(api.remat(to_scan), x, onp.arange(3.))\n return y\n\n ans = f_yesremat(4.)\n expected = f_noremat(4.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n ans = api.grad(f_yesremat)(4.)\n expected = api.grad(f_noremat)(4.)\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)\n scan_eqn, = jaxpr.jaxpr.eqns\n self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))\n\n jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)\n scan_eqn, = jaxpr.jaxpr.eqns\n self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))\n\n def test_remat_no_redundant_flops(self):\n # see https://github.com/google/jax/pull/1749#issuecomment-558267584\n\n @api.jit\n def g(x):\n return f(2., x)\n\n @api.remat\n def f(x, y):\n return np.sin(x) * y\n\n # We swap out sin_p's impl rule to count how many times it's invoked\n called = []\n sin_impl = lax.sin_p.impl\n try:\n lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))\n api.grad(g)(3.)\n finally:\n lax.sin_p.def_impl(sin_impl)\n num_calls = len(called)\n self.assertEqual(num_calls, 1)\n\n def test_remat_binomial_checkpointing(self):\n def binom_checkpoint(funs):\n if len(funs) == 1:\n return funs[0]\n else:\n f1 = binom_checkpoint(funs[:len(funs)//2])\n f2 = binom_checkpoint(funs[len(funs)//2:])\n return api.remat(lambda x: f1(f2(x)))\n\n f1 = binom_checkpoint([np.sin, np.sin, np.sin, np.sin])\n f2 = lambda x: np.sin(np.sin(np.sin(np.sin(x))))\n x = 4.\n self.assertAllClose(f1(x), f2(x), check_dtypes=False)\n self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)\n\n def test_remat_symbolic_zeros(self):\n # code from https://github.com/google/jax/issues/1907\n test_remat = True\n test_scan = True\n\n key = jax.random.PRNGKey(0)\n key, split = jax.random.split(key)\n n = 5\n\n def func(D0):\n def shift(R, dR, **unused_kwargs):\n return R + dR\n\n def apply_fn(R):\n return D0 * R\n\n Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,\n dtype=np.float32)\n\n def move(R,i):\n F = apply_fn(R)\n return shift(R, 0.001 * F), np.array([0.])\n\n move = api.remat(move)\n R, temp = lax.scan(move, Rinit, np.arange(2))\n return R[0, 0]\n\n api.grad(func)(5.0) # doesn't crash\n\n def test_remat_jit2(self):\n @api.jit\n def f(x):\n y = 2 * x\n\n @api.remat\n def g():\n return y\n\n return g()\n\n self.assertAllClose(f(3), 6, check_dtypes=False)\n\n def test_remat_nontrivial_env(self):\n # simplified from https://github.com/google/jax/issues/2030\n\n @api.remat\n def foo(state, dt=0.5, c=1):\n u, u_t = state\n u_tt = c**2 * u\n u_t = u_t + u_tt * dt\n return (u, u_t)\n\n @partial(api.jit, static_argnums=(1,))\n def _multi_step(state, count, dt, c):\n f = lambda s, _: (foo(s, dt, c), _)\n return lax.scan(f, state, None, count)\n\n def multi_step(state, count, dt=1/np.sqrt(2), c=1):\n return _multi_step(state, count, dt, c)\n\n def loss(u0, target, steps, dt=1/np.sqrt(2), c=1):\n init = (u0, np.zeros_like(u0))\n (uf, _), _ = multi_step(init, steps, dt, c)\n return ((uf - target) ** 2).mean()\n\n target = np.zeros((128, 128))\n u0 = np.ones_like(target)\n loss(u0, target, 10) # doesn't crash\n\n def test_trivial_computations(self):\n x = np.array([1, 2, 3])\n y = api.jit(lambda x: x)(x)\n self.assertIs(x, y)\n\n z1, z2 = api.jit(lambda x: (x, x))(x)\n self.assertIs(z1, z2)\n\n x1, x2 = np.array([1, 2]), np.array([2, 3])\n z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)\n self.assertIs(z1, x2)\n self.assertIs(z3, x1)\n self.assertEqual(z2, 1)\n\n def test_nested_jit_hoisting(self):\n @api.jit\n def f(x, y):\n z = 2 * x\n return y + z, 3\n\n @api.jit\n def g(x):\n return f(2, x)\n\n jaxpr_subcomp = xla.jaxpr_subcomp\n\n jaxprs = []\n def jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):\n jaxprs.append(jaxpr)\n return jaxpr_subcomp(c, jaxpr, *args, **kwargs)\n\n try:\n xla.jaxpr_subcomp = jaxpr_subcomp_and_collect\n ans = g(3)\n finally:\n xla.jaxpr_subcomp = jaxpr_subcomp\n\n self.assertEqual(ans, (7, 3))\n self.assertLen(jaxprs, 2)\n outer_jaxpr, inner_jaxpr = jaxprs\n\n self.assertLen(outer_jaxpr.eqns, 1)\n self.assertEqual(outer_jaxpr.eqns[0].primitive.name, 'xla_call')\n subjaxpr_1 = outer_jaxpr.eqns[0].bound_subjaxpr\n self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))\n self.assertLen(inner_jaxpr.eqns, 2)\n self.assertEqual(inner_jaxpr.eqns[0].primitive.name, 'mul')\n self.assertEqual(inner_jaxpr.eqns[1].primitive.name, 'add')\n\n def test_primitive_compilation_cache(self):\n with jtu.count_primitive_compiles() as count:\n lax.add(1, 2)\n lax.add(2, 3)\n self.assertEqual(count[0], 1)\n\n def test_arange_jit(self):\n # see https://github.com/google/jax/issues/553\n def fun(x):\n r = np.arange(x.shape[0])[x]\n return r\n\n jit(fun)(np.array([0, 1, 2], dtype=np.int32)) # doesn't crash\n\n\nclass JaxprTest(jtu.JaxTestCase):\n\n def test_scalar_literals(self):\n jaxpr = api.make_jaxpr(lambda x: x + 2)(42)\n self.assertLen(jaxpr.jaxpr.constvars, 0)\n\n def test_const(self):\n def fun(x):\n return (x, 1., np.zeros(1))\n\n jaxpr = api.make_jaxpr(fun)(0.)\n self.assertMultiLineStrippedEqual(str(jaxpr), \"\"\"\n { lambda b ; a.\n let\n in [a, 1.0, b] }\n \"\"\")\n\n def test_cond(self):\n def f(x):\n return lax.cond(x >= 0.,\n x + 1.,\n lambda xt: xt + x,\n x + 2.,\n lambda xf: xf - x)\n jaxpr = api.make_jaxpr(f)(3.)\n self.assertMultiLineStrippedEqual(str(jaxpr), \"\"\"\n { lambda ; a.\n let b = ge a 0.0\n c = add a 1.0\n d = add a 2.0\n e = cond[ false_jaxpr={ lambda ; b a.\n let c = sub a b\n in [c] }\n linear=(False, False, False, False)\n true_jaxpr={ lambda ; b a.\n let c = add a b\n in [c] } ] b a c a d\n in [e] }\n \"\"\")\n\n\nclass LazyTest(jtu.JaxTestCase):\n\n @contextmanager\n def count_compiles(self):\n\n make_computation_builder = xb.make_computation_builder\n count = [0]\n\n def make_computation_builder_and_count(*args, **kwargs):\n count[0] += 1\n return make_computation_builder(*args, **kwargs)\n\n xb.make_computation_builder = make_computation_builder_and_count\n try:\n yield count\n finally:\n xb.make_computation_builder = make_computation_builder\n\n @jtu.skip_on_devices(\"tpu\")\n def test_lazy_jit_closed_over_values(self):\n if not core.skip_checks:\n raise unittest.SkipTest(\"oom test skipped when core.skip_checks is False\")\n\n y = np.arange(int(1e12)) # will likely oom if materialized\n ans = jit(lambda x: (x + y)[1])(1)\n self.assertEqual(ans, 2)\n\n def test_jit_forces_arguments(self):\n\n @api.jit\n def f(x):\n assert python_should_be_executing\n return np.sum(x)\n\n x = np.arange(10, dtype=np.int32)\n assert xla.is_device_constant(x) # lazy iota\n\n python_should_be_executing = True\n _ = f(x)\n\n python_should_be_executing = False # should not recompile\n x = onp.arange(10, dtype=onp.int32)\n _ = f(x)\n\n @parameterized.parameters(jtu.cases_from_list(range(10000)))\n def test_random_lazy_program(self, seed):\n\n def random_array(rng):\n kind = rng.choice(['arr', 'iota', 'eye', 'tri'])\n if kind == 'arr':\n dtype = [onp.float32, onp.int32][rng.choice(2)]\n dim = rng.randint(4)\n shape = rng.randint(4, size=dim)\n onp_x = onp.asarray(rng.randn(*shape), dtype=dtype)\n jax_x = np.array(onp_x, dtype=dtype)\n elif kind == 'iota':\n dtype = [onp.float32, onp.int32][rng.choice(2)]\n size = rng.randint(5)\n onp_x = onp.arange(size, dtype=dtype)\n jax_x = lax.iota(dtype, size)\n elif kind == 'eye':\n dtype = [onp.float32, onp.int32][rng.choice(2)]\n N = rng.randint(2, 5)\n M = None if rng.rand() < 0.5 else rng.randint(2, 5)\n k = rng.choice([-1, 0, 1])\n onp_x = onp.eye(N, M, k, dtype=dtype)\n jax_x = np.eye(N, M, k, dtype=dtype)\n elif kind == 'tri':\n dtype = [onp.float32, onp.int32][rng.choice(2)]\n N = rng.randint(2, 5)\n M = None if rng.rand() < 0.5 else rng.randint(2, 5)\n k = rng.choice([-1, 0, 1])\n onp_x = onp.tri(N, M, k, dtype=dtype)\n jax_x = np.tri(N, M, k, dtype=dtype)\n else:\n assert False\n assert type(onp_x) is onp.ndarray and type(jax_x) is xla.DeviceArray\n return onp_x, jax_x\n\n def random_op(rng, shape):\n kind = rng.choice(['transpose', 'broadcast', 'reshape'])\n if kind == 'transpose':\n perm = tuple(rng.permutation(len(shape)))\n return Op(partial(onp.transpose, axes=perm),\n partial(lax.transpose, permutation=perm))\n elif kind == 'broadcast':\n n = rng.randint(1, 3)\n new_sizes = rng.randint(1, 4, size=n)\n new_ndim = n + len(shape)\n bcast_dims = tuple(sorted(rng.permutation(new_ndim)[:len(shape)]))\n shape_iter = iter(shape)\n new_sizes = iter(rng.randint(1, 4, size=n))\n new_shape = [next(shape_iter) if i in bcast_dims else next(new_sizes)\n for i in range(new_ndim)]\n return Op(partial(lax_reference.broadcast_in_dim, shape=new_shape,\n broadcast_dimensions=bcast_dims),\n partial(lax.broadcast_in_dim, shape=new_shape,\n broadcast_dimensions=bcast_dims))\n elif kind == 'reshape':\n new_shape = list(shape)\n for _ in range(rng.randint(1, 3)):\n loc = len(new_shape) and rng.randint(len(new_shape))\n new_shape.insert(loc, 1)\n new_shape = tuple(new_shape)\n return Op(partial(onp.reshape, newshape=new_shape),\n partial(lax.reshape, new_sizes=new_shape))\n else:\n assert False\n Op = collections.namedtuple('Op', ['onp_fn', 'jax_fn'])\n\n rng = onp.random.RandomState(seed)\n onp_x, jax_x = _, orig_x = random_array(rng)\n ops = []\n with jtu.count_primitive_compiles() as count:\n for _ in range(rng.randint(5)):\n op = random_op(rng, onp.shape(onp_x))\n onp_x = op.onp_fn(onp_x)\n jax_x = op.jax_fn(jax_x)\n ops.append(op)\n self.assertEqual(count[0], 0)\n\n kind = rng.choice(['closure', 'npy_value', 'force', 'add'])\n if kind == 'closure':\n result = api.jit(lambda x: x + jax_x)(0)\n self.assertAllClose(onp_x, result, check_dtypes=False)\n elif kind == 'npy_value':\n self.assertAllClose(onp_x, jax_x, check_dtypes=False)\n elif kind == 'force':\n result = xla._force(jax_x)\n self.assertAllClose(onp_x, result, check_dtypes=False)\n elif kind == 'add':\n result = jax_x + onp.zeros(jax_x.shape, dtype=jax_x.dtype)\n self.assertAllClose(onp_x, result, check_dtypes=False)\n else:\n assert False\n\n @jit\n def apply_ops(x):\n for op in ops:\n x = op.jax_fn(x)\n return x\n\n jit_result = apply_ops(orig_x)\n self.assertAllClose(jit_result, onp_x, check_dtypes=False)\n\n @jit\n def apply_ops_closure():\n x = orig_x\n for op in ops:\n x = op.jax_fn(x)\n return x\n\n jit_result = apply_ops_closure()\n self.assertAllClose(jit_result, onp_x, check_dtypes=False)\n\n def test_constant_forcing_computations_cached(self):\n # from https://github.com/google/jax/issues/1909\n xla._lazy_force_computation.cache_clear() # clear force compile cache\n big_lazy_x = np.ones((api.device_count(), 100))\n f = api.pmap(lambda x: 2 * x)\n _ = f(big_lazy_x)\n\n with self.count_compiles() as count:\n _ = f(big_lazy_x)\n self.assertEqual(count[0], 0)\n\n def test_zeros_ones_compilation(self):\n w = np.ones(3) + np.ones(3) # ensure + has a cache entry\n w.block_until_ready()\n\n xla._lazy_force_computation.cache_clear() # clear force compile cache\n\n with self.count_compiles() as count:\n x = np.ones(3) + np.zeros(3)\n y = np.ones(3) + np.ones(3)\n\n self.assertEqual(count[0], 1)\n self.assertAllClose(x, onp.ones(3), check_dtypes=False)\n self.assertAllClose(y, onp.ones(3) + onp.ones(3), check_dtypes=False)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"numpy.arange",
"numpy.eye",
"numpy.float16",
"numpy.cos",
"numpy.ones",
"numpy.all",
"numpy.sin",
"numpy.random.randn",
"numpy.shape",
"numpy.float32",
"numpy.tri",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liuchenxjtu/Lung_Cancer | [
"fcdaaab44b13dbd9c077470312922f1d640e86b3"
] | [
"convert0.py"
] | [
"#!/usr/bin/env python\n#\n# Copyright 2017 Anil Thomas\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nConvert DICOM images to video data\n\"\"\"\n\nfrom __future__ import print_function, division\nimport os\nimport sys\nimport SimpleITK as sitk\nimport numpy as np\nimport pandas as pd\nimport functools\nimport multiprocessing\nfrom scipy import ndimage\nfrom glob import glob\n\nimport settings\nimport video\nimport mask\n\n\ndef process_annotations(uid, annots, origin, labels,shape, starts):\n if not is_train:\n return (True, -1)\n\n uid_data = annots[annots['seriesuid'] == uid]\n if uid_data.shape[0] == 0:\n return (True, 0)\n\n locs = []\n for idx in range(uid_data.shape[0]):\n row = uid_data.iloc[idx]\n center = np.array([row['coordZ'], row['coordY'], row['coordX']])\n diam = row['diameter_mm']\n if diam == -1:\n diam = 0\n diam /= settings.resolution\n flag = 0 if diam == 0 else 1\n vox_center = np.int32(np.rint((center - origin)/settings.resolution))\n vox_center -= starts\n for i in range(3):\n if vox_center[i] < 0:\n return (False, 0)\n if vox_center[i] >= shape[i]:\n return (False, 0)\n vol = 0 if diam == 0 else 4*np.pi*((diam/2)**3)/3\n locs.append(dict(uid=uid, flag=flag,\n z=vox_center[0], y=vox_center[1], x=vox_center[2],\n diam=diam, vol=vol))\n\n filtered, is_positive = filter_cands(locs)\n for line in filtered:\n labels.loc[labels.shape[0]] = line\n return (True, is_positive)\ndef process_candidates(uid, annots, origin, candidates,shape, starts):\n if not is_train:\n return (True, -1)\n\n uid_data = annots[annots['uid'] == uid]\n if uid_data.shape[0] == 0:\n return (True, 0)\n\n locs = []\n for idx in range(uid_data.shape[0]):\n row = uid_data.iloc[idx]\n center = np.array([row['z'], row['y'], row['x']])\n diam = row['diam']\n if diam == 1:\n diam = 0\n diam /= settings.resolution\n flag = 0 if diam == 0 else 1\n vox_center = np.int32(np.rint((center - origin)/settings.resolution))\n vox_center -= starts\n for i in range(3):\n if vox_center[i] < 0:\n return (False, 0)\n if vox_center[i] >= shape[i]:\n return (False, 0)\n vol = 0 if diam == 0 else 4*np.pi*((diam/2)**3)/3\n locs.append(dict(uid=uid, flag=row['diam'],\n z=vox_center[0], y=vox_center[1], x=vox_center[2],\n diam=diam, vol=vol))\n\n # filtered, is_positive = filter_cands(locs)\n for line in locs:\n candidates.loc[candidates.shape[0]] = line\n return (True, 1)\n\n\ndef filter_cands(locs):\n diams = [loc['diam'] for loc in locs]\n if np.sum(diams) == 0:\n # No malignancy - no need to filter\n return locs, 0\n # Do not take negative candidates from a sample with malignancy\n result = [loc for loc in locs if loc['diam'] != 0]\n return result, 1\n\n\ndef read_scan(path):\n uid = os.path.basename(path)\n if uid.split('.')[-1] == 'mhd':\n uid = uid[:-4]\n return sitk.ReadImage(path), uid\n\n reader = sitk.ImageSeriesReader()\n image_files = reader.GetGDCMSeriesFileNames(path)\n assert len(image_files) > 0\n if len(image_files) < settings.chunk_size:\n print('Ignoring %s - only %d slices' % (path, len(image_files)))\n return None, uid\n\n reader.SetFileNames(image_files)\n return reader.Execute(), uid\n\n\ndef get_data(scan_data):\n data = sitk.GetArrayFromImage(scan_data)\n # Convert to (z, y, x) ordering\n spacing = np.array(list(reversed(scan_data.GetSpacing())))\n spacing /= settings.resolution\n slices = ndimage.interpolation.zoom(data, spacing, mode='nearest')\n origin = np.array(list(reversed(scan_data.GetOrigin())))\n return slices, spacing, origin\n\n\ndef trim(slices, min_bound=0):\n starts = np.zeros(3, dtype=np.int32)\n end_vals = [slices.shape[i] for i in range(3)]\n ends = np.array(end_vals, dtype=np.int32)\n\n while slices[starts[0]].sum() == min_bound*slices.shape[0]:\n starts[0] += 1\n while slices[:, starts[1]].sum() == min_bound*slices.shape[1]:\n starts[1] += 1\n while slices[..., starts[2]].sum() == min_bound*slices.shape[2]:\n starts[2] += 1\n\n while slices[ends[0] - 1].sum() == min_bound*slices.shape[0]:\n ends[0] -= 1\n while slices[:, ends[1] - 1].sum() == min_bound*slices.shape[1]:\n ends[1] -= 1\n while slices[..., ends[2] - 1].sum() == min_bound*slices.shape[2]:\n ends[2] -= 1\n\n trimmed = slices[starts[0]:ends[0], starts[1]:ends[1], starts[2]:ends[2]]\n return trimmed, starts\n\n\ndef convert(path_list, annots, cans,batch_size, max_idx, idx):\n start = idx * batch_size\n end = min(start + batch_size, max_idx)\n path_list = path_list[start:end]\n meta = pd.DataFrame(columns=['uid', 'flag', 'z_len', 'y_len', 'x_len'])\n labels = pd.DataFrame(columns=['uid', 'flag', 'z', 'y', 'x', 'diam', 'vol'])\n candidates = pd.DataFrame(columns=['uid', 'flag', 'z', 'y', 'x', 'diam', 'vol'])\n\n for i, path in enumerate(path_list):\n if os.path.basename(path) == 'b8bb02d229361a623a4dc57aa0e5c485':\n # ITK cannot read this file\n continue\n print('Converting %s' % path)\n scan_data, uid = read_scan(path)\n if scan_data is None:\n continue\n\n slices, spacing, origin = get_data(scan_data)\n skip = 0\n\n video.clip(slices, settings.low_thresh, settings.high_thresh)\n msk = mask.get_mask(slices, uid)\n # msk = mask.segment_lung_mask(slices, uid)\n slices = video.normalize(slices, settings.low_thresh, settings.high_thresh)\n mask.apply_mask(slices, msk)\n slices, starts = trim(slices)\n valid, flag = process_annotations(uid, annots, origin, labels,slices.shape, starts)\n process_candidates(uid, cans, origin, candidates,slices.shape, starts)\n\n if not valid:\n print('Ignoring %s - bad metadata' % path)\n continue\n\n video.write_data(slices, os.path.join(output_path, uid))\n meta.loc[meta.shape[0]] = dict(uid=uid, flag=flag, z_len=slices.shape[0],\n y_len=slices.shape[1], x_len=slices.shape[2])\n return meta, labels,candidates\n\n\nif len(sys.argv) < 4:\n print('Usage %s <input-path> <output-path> train/test' % sys.argv[0])\n sys.exit(0)\n\nnp.random.seed(0)\ninput_path = sys.argv[1]\noutput_path = sys.argv[2]\nis_train = sys.argv[3] == 'train'\nif is_train:\n search_path = os.path.join(input_path, 'subset*', '*.mhd')\nelse:\n search_path = os.path.join(input_path, '*')\n\nif not os.path.exists(output_path):\n os.mkdir(output_path)\n\npath_list = glob(search_path)\npath_list = sorted(path_list)\nnp.random.shuffle(path_list)\nif is_train:\n annots = pd.read_csv(os.path.join(input_path, 'annotations_excluded.csv'))\n cans = pd.read_csv(os.path.join(input_path, 'candidates_V2.csv'))\nelse:\n annots = None\n cans = None\n\ncount = len(path_list)\nassert count > 0, 'Could not find %s' % search_path\nprint('Converting %d scans...' % count)\n\ncpu_count = multiprocessing.cpu_count()\nbatch_size = (count - 1) // cpu_count + 1\nprocesses = (count - 1) // batch_size + 1\n\nfunc = functools.partial(convert, path_list, annots, cans, batch_size, count)\npool = multiprocessing.Pool(processes=processes)\nret_list = pool.map(func, range(processes))\npool.close()\nmeta_list, labels_list,candidates_list = zip(*ret_list)\nmeta = pd.concat(meta_list)\nlabels = pd.concat(labels_list)\ncandidates = pd.concat(candidates_list)\nmeta.to_csv(os.path.join(output_path, 'metadata.csv'), index=False)\nif is_train:\n labels.to_csv(os.path.join(output_path, 'labels.csv'), index=False)\n candidates.to_csv(os.path.join(output_path, 'candidates.csv'), index=False)\n"
] | [
[
"scipy.ndimage.interpolation.zoom",
"pandas.concat",
"numpy.random.seed",
"numpy.rint",
"numpy.random.shuffle",
"pandas.DataFrame",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
wuye999/DouZero_For_HLDDZ_FullAuto | [
"11cb3610392fdfcc81b062b73c565b5cb32c2fa3"
] | [
"main.py"
] | [
"# -*- coding: utf-8 -*-\n# Created by: Raf\n# Modify by: Vincentzyx\nimport collections\nimport random\n\nimport PIL\n\nimport GameHelper as gh\nfrom GameHelper import GameHelper\nimport os\nimport sys\nimport time\nfrom threading import Thread\nimport pyautogui\nimport win32gui\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport traceback\nimport datetime\nimport re\nimport warnings\nimport requests\n\nfrom PyQt5 import QtGui, QtWidgets, QtCore\nfrom PyQt5.QtWidgets import QMessageBox, QTextBrowser, QTextEdit\nfrom PyQt5.QtGui import QTextCursor\nfrom PyQt5.QtCore import QTime, QEventLoop\nfrom MainWindow import Ui_Form\n\nfrom douzero.env.game import GameEnv\nfrom douzero.evaluation.deep_agent import DeepAgent\nfrom douzero.env.move_detector import get_move_type\nimport BidModel\nimport LandlordModel\nimport FarmerModel\n\nwarnings.filterwarnings('ignore')\n\n\nEnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7',\n 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q',\n 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'}\n\nRealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7,\n '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12,\n 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30}\n\nAllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n\nAllCards = ['rD', 'bX', 'b2', 'r2', 'bA', 'rA', 'bK', 'rK', 'bQ', 'rQ', 'bJ', 'rJ', 'bT', 'rT',\n 'b9', 'r9', 'b8', 'r8', 'b7', 'r7', 'b6', 'r6', 'b5', 'r5', 'b4', 'r4', 'b3', 'r3']\n\nhelper = GameHelper()\nhelper.ScreenZoomRate = 1.25\n\ndef manual_landlord_requirements(cards_str):\n counter = collections.Counter(cards_str)\n if (counter['D'] == 1 and counter['2'] >= 1 and counter[\"A\"] >= 1) \\\n or (counter['D'] == 1 and counter['2'] >= 2) \\\n or (counter['D'] == 1 and len([key for key in counter if counter[key] == 4]) >= 1) \\\n or (counter['D'] == 1 and counter['X'] == 1) \\\n or (len([key for key in counter if counter[key] == 4]) >= 2) \\\n or (counter[\"X\"] == 1 and ((counter[\"2\"] >= 2) or (counter[\"2\"] >= 2 and counter[\"A\"] >= 2) or (\n counter[\"2\"] >= 2 and len([key for key in counter if counter[key] == 4]) >= 1))) \\\n or (counter[\"2\"] >= 2 and len([key for key in counter if counter[key] == 4]) >= 1):\n return True\n else:\n return False\n\n\ndef manual_mingpai_requirements(cards_str):\n counter = collections.Counter(cards_str)\n if (counter['D'] == 1 and counter['2'] >= 2) \\\n or (counter['D'] == 1 and counter['2'] >= 1 and counter['X'] == 1) \\\n or (counter['D'] == 1 and counter['2'] >= 1 and counter['A'] >= 2) \\\n or (len([key for key in counter if counter[key] == 4]) >= 2) \\\n or (counter[\"X\"] == 1 and ((counter[\"2\"] >= 2) or (counter[\"2\"] >= 2 and counter[\"A\"] >= 2) or (\n counter[\"2\"] >= 2 and len([key for key in counter if counter[key] == 4]) >= 1))) \\\n or (\"DX\" in cards_str and len([key for key in counter if counter[key] == 4]) >= 1):\n return True\n else:\n return False\n\n\nclass MyPyQT_Form(QtWidgets.QWidget, Ui_Form):\n def __init__(self):\n super(MyPyQT_Form, self).__init__()\n self.setupUi(self)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮\n QtCore.Qt.WindowCloseButtonHint) # 窗体总在最前端 QtCore.Qt.WindowStaysOnTopHint\n self.setFixedSize(self.width(), self.height()) # 固定窗体大小\n # self.setWindowIcon(QIcon('pics/favicon.ico'))\n window_pale = QtGui.QPalette()\n # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(\"pics/bg.png\")))\n self.setPalette(window_pale)\n self.Players = [self.RPlayer, self.Player, self.LPlayer]\n self.env = None\n self.counter = QTime()\n\n # 参数\n self.MyConfidence = 0.95 # 我的牌的置信度\n self.OtherConfidence = 0.89 # 别人的牌的置信度\n self.WhiteConfidence = 0.95 # 检测白块的置信度\n self.LandlordFlagConfidence = 0.85 # # 检测地主标志的置信度\n self.ThreeLandlordCardsConfidence = 0.9 # 检测地主底牌的置信度\n self.PassConfidence = 0.85\n self.WaitTime = 1 # 等待状态稳定延时\n self.MyFilter = 40 # 我的牌检测结果过滤参数\n self.OtherFilter = 25 # 别人的牌检测结果过滤参数\n self.SleepTime = 0.1 # 循环中睡眠时间\n self.RunGame = False\n self.AutoPlay = False\n # ------ 阈值 ------\n self.BidThresholds = [0, # 叫地主阈值\n 0.3, # 抢地主阈值 (自己第一个叫地主)\n 0] # 抢地主阈值 (自己非第一个叫地主)\n self.JiabeiThreshold = (\n (0.3, 0.15), # 叫地主 超级加倍 加倍 阈值\n (0.5, 0.15) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下)\n )\n self.FarmerJiabeiThreshold = (6, 1.2)\n self.MingpaiThreshold = 0.93\n self.stop_when_no_chaojia = True # 是否在没有超级加倍的时候关闭自动模式\n self.use_manual_landlord_requirements = False # 手动规则\n self.use_manual_mingpai_requirements = True # Manual Mingpai\n # ------------------\n # 坐标\n self.landlord_position_code = 0\n self.play_order = 0\n self.MyHandCardsPos = (250, 764, 1141, 70) # 我的截图区域\n self.LPassPos = (463, 355, 380, 250) # 左边不出截图区域\n self.RPassPos = (946, 355, 380, 250) # 右边不出截图区域\n self.LPlayedCardsPos = (463, 392, 327, 90) # 左边出牌截图区域\n self.RPlayedCardsPos = (936, 392, 337, 90) # 右边出牌截图区域\n self.LandlordFlagPos = [(1281, 276, 110, 140), (267, 695, 110, 140), (424, 237, 110, 140)] # 地主标志截图区域(右-我-左)\n self.ThreeLandlordCardsPos = (763, 37, 287, 136) # 地主底牌截图区域,resize成349x168\n self.PassBtnPos = (686, 659, 419, 100)\n\n self.GeneralBtnPos = (616, 631, 576, 117)\n self.LastValidPlayCardEnv = []\n self.LastValidPlayPos = 0\n # Game Log Variables\n self.GameRecord = []\n self.game_type = \"\"\n self.initial_cards = \"\"\n self.initial_bid_rate = \"\"\n self.initial_model_rate = \"\"\n self.initial_mingpai = \"\"\n self.initial_multiply = \"\"\n # -------------------\n self.shouldExit = 0 # 通知上一轮记牌结束\n self.card_play_model_path_dict = {\n 'landlord': \"baselines/resnet/resnet_landlord.ckpt\",\n 'landlord_up': \"baselines/resnet/resnet_landlord_up.ckpt\",\n 'landlord_down': \"baselines/resnet/resnet_landlord_down.ckpt\"\n }\n self.card_play_wp_model_path = {\n 'landlord': \"baselines/douzero_WP/landlord.ckpt\",\n 'landlord_up': \"baselines/douzero_WP/landlord_up.ckpt\",\n 'landlord_down': \"baselines/douzero_WP/landlord_down.ckpt\"\n }\n LandlordModel.init_model(\"baselines/douzero_WP/landlord.ckpt\")\n\n def init_display(self):\n self.WinRate.setText(\"评分\")\n self.InitCard.setText(\"开始\")\n self.UserHandCards.setText(\"手牌\")\n self.LPlayedCard.setText(\"上家出牌区域\")\n self.RPlayedCard.setText(\"下家出牌区域\")\n self.PredictedCard.setText(\"AI出牌区域\")\n self.ThreeLandlordCards.setText(\"地主牌\")\n self.SwitchMode.setText(\"自动\" if self.AutoPlay else \"单局\")\n for player in self.Players:\n player.setStyleSheet('background-color: rgba(255, 0, 0, 0);')\n\n def auto_start(self):\n self.game_loop()\n\n def switch_mode(self):\n self.AutoPlay = not self.AutoPlay\n self.SwitchMode.setText(\"自动\" if self.AutoPlay else \"单局\")\n\n def init_cards(self):\n self.RunGame = True\n GameHelper.Interrupt = False\n self.init_display()\n self.initial_model_rate = 0\n\n self.user_hand_cards_real = \"\"\n self.user_hand_cards_env = []\n self.other_played_cards_real = \"\"\n self.other_played_cards_env = []\n self.upper_played_cards_real = \"\"\n self.lower_played_cards_real = \"\"\n\n self.other_hand_cards = []\n\n self.three_landlord_cards_real = \"\"\n self.three_landlord_cards_env = []\n # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家\n self.user_position_code = None\n self.user_position = \"\"\n\n self.card_play_data_list = {}\n\n self.play_order = 0\n\n self.env = None\n\n self.user_hand_cards_real = self.find_my_cards(self.MyHandCardsPos)\n self.UserHandCards.setText(self.user_hand_cards_real)\n self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)]\n\n self.three_landlord_cards_real = self.find_three_landlord_cards(self.ThreeLandlordCardsPos)\n self.ThreeLandlordCards.setText(\"底牌:\" + self.three_landlord_cards_real)\n self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)]\n for testCount in range(1, 5):\n if len(self.three_landlord_cards_env) > 3:\n self.ThreeLandlordCardsConfidence += 0.05\n elif len(self.three_landlord_cards_env) < 3:\n self.ThreeLandlordCardsConfidence -= 0.05\n else:\n break\n self.three_landlord_cards_real = self.find_three_landlord_cards(self.ThreeLandlordCardsPos)\n self.ThreeLandlordCards.setText(\"底牌:\" + self.three_landlord_cards_real)\n self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)]\n\n self.user_position_code = self.find_landlord(self.LandlordFlagPos)\n try_count = 5\n while self.user_position_code is None and self.RunGame and try_count > 0:\n print(\"玩家角色获取失败!重试中…\")\n try_count -= 1\n helper.LeftClick((900, 550))\n self.sleep(500)\n self.user_position_code = self.find_landlord(self.LandlordFlagPos)\n if self.user_position_code is None:\n return\n self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code]\n for player in self.Players:\n player.setStyleSheet('background-color: rgba(255, 0, 0, 0);')\n self.Players[self.user_position_code].setStyleSheet('background-color: rgba(255, 0, 0, 0.1);')\n\n for i in set(AllEnvCard):\n self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i)))\n self.card_play_data_list.update({\n 'three_landlord_cards': self.three_landlord_cards_env,\n ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]:\n self.user_hand_cards_env,\n ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]:\n self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:],\n ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]:\n self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:]\n })\n\n self.play_order = 0 if self.user_position == \"landlord\" else 1 if self.user_position == \"landlord_up\" else 2\n self.LastValidPlayPos = self.play_order\n\n ai_players = [self.user_position,\n DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])]\n # ai_players2 = [self.user_position,\n # DeepAgent(self.user_position, self.card_play_wp_model_path[self.user_position])]\n self.env = GameEnv(ai_players, None)\n\n try:\n self.start()\n except Exception as e:\n print(\"运行时出现错误,已重置\\n\", repr(e))\n traceback.print_exc()\n self.stop()\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def waitUntilNoAnimation(self, ms=150):\n ani = self.haveAnimation(ms)\n first_run = 0\n while ani:\n if first_run == 0:\n print(\"等待动画\", end=\"\")\n else:\n if first_run % 2 == 0:\n print(\".\", end=\"\")\n first_run += 1\n ani = self.haveAnimation(ms)\n print()\n self.sleep(600)\n\n def real_to_env(self, cards):\n env_card = [RealCard2EnvCard[c] for c in cards]\n env_card.sort()\n return env_card\n\n @staticmethod\n def move_type_tostr(move_type):\n mtype = move_type[\"type\"]\n mtype_map = [\"\", \"单张\", \"对子\", \"三张\", \"炸弹\", \"王炸\", \"三带一\", \"三带一对\", \"顺子\", \"连对\", \"飞机\", \"飞机带单根\", \"飞机带一对\", \"四带二\", \"四带两对\",\n \"不是合法牌型\"]\n t_str = mtype_map[mtype]\n if \"len\" in move_type:\n t_str += \" 长度: \" + str(move_type[\"len\"])\n return t_str\n\n # Optimize by 错过\n def handle_others(self, playPos, label, nick):\n label.setText(\"等待出牌中\")\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 10) # 更新界面\n # passPos 识别\"不出\"区域和识别牌的区域不一样(识别牌的区域很小),所以用 passPos\n passPos = self.LPassPos if nick == \"上家\" else self.RPassPos\n pass_flag = helper.LocateOnScreen('pass', region=passPos, confidence=self.PassConfidence)\n lastCards = \"\"\n sameCount = 0\n sameCountSingle = 0 # 如果长度为 1,可能是顺子的起始,所以算两次\n need_newline = 2\n print(\"等待\", nick, \"出牌\", end=\"\")\n while self.RunGame and pass_flag is None:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 10)\n img, _ = helper.Screenshot() # 只用一次截图,find_other_cards 和 LocateOnScreen 添加了 img 参数\n need_newline += 1\n if need_newline % 2 == 0:\n print(\".\", end=\"\")\n need_newline = 0\n st = time.time()\n cards = self.find_other_cards(pos=playPos, img=img) # img\n move_type = get_move_type(self.real_to_env(cards))\n\n last_played_cards = self.upper_played_cards_real if nick == \"上家\" else self.lower_played_cards_real\n last_play_count = 0\n if cards == last_played_cards and last_play_count <= 2:\n last_play_count += 1\n self.sleep(300)\n\n if len(cards) == 0: # 如果没有卡,不要等 300 毫秒,直接搜索 pass\n pass_flag = helper.LocateOnScreen('pass', region=passPos, confidence=self.PassConfidence,\n img=img) # 需要在 helper 中增加img这个参数,默认为 None\n elif cards == lastCards and len(cards) > 0:\n sameCount += 1\n requireCounts = 2 if len(cards) == 1 else 1\n if sameCount >= requireCounts and move_type[\"type\"] != 15:\n break\n else:\n if need_newline > 2:\n need_newline = 0\n print()\n # print(\"检测到\", sameCount, \"次\", self.move_type_tostr(move_type))\n need_newline += 1\n label.setText(cards)\n self.sleep(100)\n else:\n lastCards = cards\n sameCount = 0\n print(cards, end=\" \")\n et = time.time()\n if et - st < 0.3:\n self.sleep(300 - (et - st) * 1000)\n # 不管牌的长度,都要执行,除非 break 了\n # 20% 的概率寻找 change_player\n self.detect_start_btn()\n\n if pass_flag is None:\n self.other_played_cards_real = lastCards\n print(\"\\n\" + nick, \"出牌\", self.other_played_cards_real)\n label.setText(self.other_played_cards_real)\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 10) # 更新界面\n else:\n self.other_played_cards_real = \"\"\n label.setText(\"不出\")\n self.sleep(200)\n print(\"\\n\" + nick, \"不要\")\n if not self.RunGame:\n self.other_played_cards_real = \"\"\n self.other_played_cards_env = [RealCard2EnvCard[c] for c in list(self.other_played_cards_real)]\n self.other_played_cards_env.sort()\n self.env.step(self.user_position, self.other_played_cards_env)\n # self.animation_sleep(cards)\n cards = self.other_played_cards_real\n # 更新上下家手牌\n if nick == \"上家\":\n self.upper_played_cards_real = cards\n else:\n self.lower_played_cards_real = cards\n\n move_type = get_move_type(self.real_to_env(cards))\n animation_types = {4, 5, 13, 14, 8, 9, 10, 11, 12}\n if move_type[\"type\"] in animation_types or len(cards) >= 6:\n self.waitUntilNoAnimation()\n\n def animation_sleep(self, cards, normalTime=0):\n if (len(cards) == 4 and len(set(cards)) == 1) or \\\n len(cards) >= 6: # 飞机也要休息一下\n print(\"飞机休息 2秒\")\n self.sleep(2200)\n elif \"D\" in cards and \"X\" in cards:\n print(\"王炸休息 3 秒\")\n self.sleep(3000)\n elif len(cards) == 5 and len(set(cards)) == 5:\n print(\"顺子休息 2 秒\")\n self.sleep(2200)\n else:\n print(\"休息{}秒\".format(normalTime / 1000))\n self.sleep(normalTime)\n\n @staticmethod\n def action_to_str(action):\n if len(action) == 0:\n return \"Pass\"\n else:\n return \"\".join([EnvCard2RealCard[card] for card in action])\n\n def card_play_data_tostr(self, card_play_data):\n s = \"---------- 对局信息 ----------\\n\"\n s += \" 地主牌: \" + self.action_to_str(card_play_data[\"three_landlord_cards\"]) + \"\\n\" + \\\n \" 地主手牌: \" + self.action_to_str(card_play_data[\"landlord\"]) + \"\\n\" + \\\n \"地主上家手牌:\" + self.action_to_str(card_play_data[\"landlord_up\"]) + \"\\n\" + \\\n \"地主下家手牌:\" + self.action_to_str(card_play_data[\"landlord_down\"])\n s += \"\\n------------------------------\"\n return s\n\n def record_cards(self):\n try:\n for card in self.other_played_cards_env:\n self.other_hand_cards.remove(card)\n except ValueError as e:\n traceback.print_exc()\n\n def game_loop(self):\n while True:\n try_count = 0\n while self.detect_start_btn(True) is None and try_count < 5:\n try_count += 1\n self.sleep(300)\n self.before_start()\n self.init_cards()\n if not self.AutoPlay:\n break\n\n def start(self):\n self.GameRecord.clear()\n self.env.card_play_init(self.card_play_data_list)\n cards_left = []\n print(\"开始对局\")\n print(\"手牌:\", self.user_hand_cards_real)\n first_run = True\n st = time.time()\n step_count = 0\n while not self.env.game_over and self.RunGame:\n if self.play_order == 0:\n self.PredictedCard.setText(\"...\")\n action_message, action_list = self.env.step(self.user_position)\n self.UserHandCards.setText(\"手牌:\" + str(''.join(\n [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]))[::-1])\n action_list = action_list[:8]\n action_list_str = \"\\n\".join([ainfo[0] + \" \" + ainfo[1] for ainfo in action_list])\n self.PredictedCard.setText(action_message[\"action\"] if action_message[\"action\"] else \"不出\")\n self.WinRate.setText(action_list_str)\n action_list_str = \" | \".join([ainfo[0] + \" \" + ainfo[1] for ainfo in action_list])\n # self.sleep(400)\n hand_cards_str = ''.join(\n [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards])\n if first_run:\n self.initial_model_rate = round(float(action_message[\"win_rate\"]), 3) # win_rate at start\n first_run = False\n print(\"出牌:\", action_message[\"action\"] if action_message[\"action\"] else \"Pass\", \"| 得分:\",\n round(action_message[\"win_rate\"], 3), \"| 剩余手牌:\", hand_cards_str)\n print(action_list_str)\n if not (self.upper_played_cards_real == \"DX\" or self.lower_played_cards_real == \"DX\" or\n (len(hand_cards_str + action_message[\"action\"]) == 1 and len(\n self.upper_played_cards_real) > 1) or\n (len(hand_cards_str + action_message[\"action\"]) == 1 and len(\n self.lower_played_cards_real) > 1)):\n if action_message[\"action\"] == \"\":\n tryCount = 2\n result = helper.LocateOnScreen(\"pass_btn\", region=self.PassBtnPos, confidence=0.85)\n passSign = helper.LocateOnScreen(\"pass\", region=(830, 655, 150, 70), confidence=0.85)\n while result is None is None and tryCount > 0:\n if not self.RunGame:\n break\n if passSign is not None and tryCount <= 0:\n break\n print(\"等待不出按钮\")\n self.detect_start_btn()\n tryCount -= 1\n result = helper.LocateOnScreen(\"pass_btn\", region=self.PassBtnPos, confidence=0.85)\n passSign = helper.LocateOnScreen(\"pass\", region=(830, 655, 150, 70), confidence=0.85)\n self.sleep(100)\n helper.ClickOnImage(\"pass_btn\", region=self.PassBtnPos, confidence=0.85)\n else:\n if len(hand_cards_str) == 0 and len(action_message[\"action\"]) == 1:\n helper.SelectCards(action_message[\"action\"], True)\n else:\n helper.SelectCards(action_message[\"action\"])\n tryCount = 10\n result = helper.LocateOnScreen(\"play_card\", region=self.PassBtnPos, confidence=0.85)\n while result is None and tryCount > 0:\n print(\"等待出牌按钮\")\n tryCount -= 1\n result = helper.LocateOnScreen(\"play_card\", region=self.PassBtnPos, confidence=0.85)\n self.sleep(100)\n self.sleep(100)\n helper.ClickOnImage(\"play_card\", region=self.PassBtnPos, confidence=0.85)\n self.sleep(300)\n else:\n print(\"要不起,跳过出牌\")\n self.GameRecord.append(action_message[\"action\"] if action_message[\"action\"] != \"\" else \"Pass\")\n self.sleep(500)\n if action_message[\"action\"]:\n cards = action_message[\"action\"]\n move_type = get_move_type(self.real_to_env(cards))\n animation_types = {4, 5, 13, 14, 8, 9, 10, 11, 12}\n if move_type[\"type\"] in animation_types or len(cards) >= 6:\n self.waitUntilNoAnimation()\n\n self.detect_start_btn()\n\n self.play_order = 1\n\n elif self.play_order == 1:\n if self.other_played_cards_real != \"DX\" or len(self.other_played_cards_real) == 4 and len(\n set(self.other_played_cards_real)) == 1:\n self.handle_others(self.RPlayedCardsPos, self.RPlayedCard, \"下家\")\n else:\n self.other_played_cards_real = \"\"\n self.other_played_cards_env = \"\"\n self.env.step(self.user_position, [])\n self.GameRecord.append(self.other_played_cards_real if self.other_played_cards_real != \"\" else \"Pass\")\n self.record_cards()\n self.play_order = 2\n self.sleep(200)\n\n elif self.play_order == 2:\n if self.other_played_cards_real != \"DX\" or len(self.other_played_cards_real) == 4 and len(\n set(self.other_played_cards_real)) == 1:\n self.handle_others(self.LPlayedCardsPos, self.LPlayedCard, \"上家\")\n else:\n self.other_played_cards_real = \"\"\n self.other_played_cards_env = \"\"\n self.env.step(self.user_position, [])\n self.GameRecord.append(self.other_played_cards_real if self.other_played_cards_real != \"\" else \"Pass\")\n self.record_cards()\n self.play_order = 0\n self.sleep(100)\n step_count = (step_count + 1) % 3\n self.sleep(20)\n\n self.sleep(500)\n self.RunGame = False\n\n def find_landlord(self, landlord_flag_pos):\n for pos in landlord_flag_pos:\n result = helper.LocateOnScreen(\"landlord_words\", region=pos, confidence=self.LandlordFlagConfidence)\n if result is not None:\n return landlord_flag_pos.index(pos)\n return None\n\n # 先看有没有换对手这个按钮,如果有的话,启动 detect_start_btn, 耗时 0.16秒\n def detect_change_player(self, image=None):\n if image:\n result = helper.LocateOnScreen(\"change_player_btn\", region=(400, 400, 934, 800), img=image)\n else:\n result = helper.LocateOnScreen(\"change_player_btn\", region=(400, 400, 934, 800))\n if result is not None:\n self.detect_start_btn()\n\n def detect_popup(self):\n img, _ = helper.Screenshot()\n result = helper.LocateOnScreen(\"yes_btn\", region=(680, 661, 435, 225), img=img)\n if result is not None:\n helper.ClickOnImage(\"yes_btn\", region=(680, 661, 435, 225), img=img)\n self.sleep(1000)\n result = helper.LocateOnScreen(\"get_award_btn\", region=(680, 661, 435, 225), img=img)\n if result is not None:\n helper.ClickOnImage(\"get_award_btn\", region=(680, 661, 435, 225), img=img)\n self.sleep(1000)\n result = helper.LocateOnScreen(\"yes_btn_sm\", region=(669, 583, 468, 100), img=img)\n if result is not None:\n helper.ClickOnImage(\"yes_btn_sm\", region=(669, 583, 468, 100), img=img)\n self.sleep(1000)\n\n # 耗时 0.7 秒\n def detect_start_btn(self, click=False):\n img, _ = helper.Screenshot()\n result = helper.LocateOnScreen(\"change_player_btn\", region=(400, 400, 934, 800), img=img, confidence=0.8)\n if self.AutoPlay and result is not None:\n print(\"检测到换对手按钮\")\n self.stop()\n self.RunGame = False\n if self.AutoPlay:\n if click:\n print(\"点击换对手\")\n helper.ClickOnImage(\"change_player_btn\", region=(400, 400, 934, 800), img=img, confidence=0.8)\n self.sleep(1000)\n return True\n else:\n return\n result = helper.LocateOnScreen(\"finish_round\", region=(810, 840, 200, 80), confidence=0.8, img=img)\n if result is not None:\n helper.ClickOnImage(\"finish_round\", region=(810, 840, 200, 80), confidence=0.8, img=img)\n self.sleep(1000)\n result = helper.LocateOnScreen(\"next_round\", region=(958, 869, 300, 100), confidence=0.8, img=img)\n if result is not None:\n helper.ClickOnImage(\"next_round\", region=(958, 869, 300, 100), confidence=0.8, img=img)\n self.sleep(1000)\n result = helper.LocateOnScreen(\"yes_btn\", region=(680, 661, 435, 225), img=img)\n if result is not None:\n helper.ClickOnImage(\"yes_btn\", region=(680, 661, 435, 225), img=img)\n self.sleep(1000)\n result = helper.LocateOnScreen(\"get_award_btn\", region=(680, 661, 435, 225), img=img)\n if result is not None:\n helper.ClickOnImage(\"get_award_btn\", region=(680, 661, 435, 225), img=img)\n self.sleep(1000)\n result = helper.LocateOnScreen(\"yes_btn_sm\", region=(669, 583, 468, 100), img=img)\n if result is not None:\n helper.ClickOnImage(\"yes_btn_sm\", region=(669, 583, 468, 100), img=img)\n self.sleep(200)\n\n def find_three_landlord_cards(self, pos):\n img, _ = helper.Screenshot(region=pos)\n # img = img.crop((pos[0], pos[1], pos[0] + pos[2], pos[1] + pos[3]))\n img = img.resize((349, 168))\n three_landlord_cards_real = \"\"\n for card in AllCards:\n result = pyautogui.locateAll(needleImage=helper.Pics['o' + card], haystackImage=img,\n confidence=self.ThreeLandlordCardsConfidence)\n three_landlord_cards_real += card[1] * self.cards_filter(list(result), self.OtherFilter)\n if len(three_landlord_cards_real) > 3:\n three_landlord_cards_real = \"\"\n for card in AllCards:\n result = pyautogui.locateAll(needleImage=helper.Pics['o' + card], haystackImage=img,\n confidence=self.ThreeLandlordCardsConfidence - 0.1)\n three_landlord_cards_real += card[1] * self.cards_filter(list(result), self.OtherFilter)\n if len(three_landlord_cards_real) < 3:\n three_landlord_cards_real = \"\"\n for card in AllCards:\n result = pyautogui.locateAll(needleImage=helper.Pics['o' + card], haystackImage=img,\n confidence=self.ThreeLandlordCardsConfidence + 0.1)\n three_landlord_cards_real += card[1] * self.cards_filter(list(result), self.OtherFilter)\n return three_landlord_cards_real\n\n def find_my_cards(self, pos):\n user_hand_cards_real = \"\"\n img, _ = helper.Screenshot()\n cards, _ = helper.GetCards(img)\n for c in cards:\n user_hand_cards_real += c[0]\n return user_hand_cards_real\n\n def find_other_cards(self, pos, img=None):\n other_played_cards_real = \"\"\n if not img:\n img, _ = helper.Screenshot()\n imgCv = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n for card in AllCards:\n result = gh.LocateAllOnImage(imgCv, helper.PicsCV['o' + card], region=pos, confidence=self.OtherConfidence)\n if len(result) > 0:\n other_played_cards_real += card[1] * self.cards_filter(list(result), self.OtherFilter)\n return other_played_cards_real\n\n @staticmethod\n def filter_image_white(img: Image, threshold=170):\n width, height = img.size\n pixels = img.load()\n for y in range(height):\n for x in range(width):\n r, g, b = pixels[x, y]\n if abs(r - g) < 20 and abs(r - b) < 20 and r > threshold:\n pixels[x, y] = (255 - r, 255 - g, 255 - b)\n else:\n pixels[x, y] = (255, 255, 255)\n\n @staticmethod\n def filter_image_orange(img: Image, threshold=200):\n width, height = img.size\n pixels = img.load()\n for y in range(height):\n for x in range(width):\n r, g, b = pixels[x, y]\n if r - b > 30 and r > threshold:\n pixels[x, y] = (255 - r, 255 - r, 255 - r)\n else:\n pixels[x, y] = (255, 255, 255)\n\n @staticmethod\n def filter_image_dark(img: Image, threshold=550):\n width, height = img.size\n pixels = img.load()\n for y in range(height):\n for x in range(width):\n r, g, b = pixels[x, y]\n if r + g + b > threshold:\n pixels[x, y] = (255, 255, 255)\n else:\n pixels[x, y] = (0, 0, 0)\n\n def cards_filter(self, location, distance): # 牌检测结果滤波\n if len(location) == 0:\n return 0\n locList = [location[0][0]]\n count = 1\n for e in location:\n flag = 1 # “是新的”标志\n for have in locList:\n if abs(e[0] - have) <= distance:\n flag = 0\n break\n if flag:\n count += 1\n locList.append(e[0])\n return count\n\n def have_white(self, pos): # 是否有白块\n img, _ = helper.Screenshot()\n result = pyautogui.locate(needleImage=helper.Pics[\"white\"], haystackImage=img,\n region=pos, confidence=self.WhiteConfidence)\n if result is None:\n return 0\n else:\n return 1\n\n def stop(self):\n try:\n self.RunGame = False\n if self.env is not None:\n self.env.game_over = True\n self.env.reset()\n self.init_display()\n self.PreWinrate.setText(\"局前预估得分: \")\n self.BidWinrate.setText(\"叫牌预估得分: \")\n except AttributeError as e:\n traceback.print_exc()\n\n def compareImage(self, im1, im2):\n if im1.size != im2.size:\n return False\n size = im1.size\n for y in range(size[1]):\n for x in range(size[0]):\n if im1.getpixel((x, y)) != im2.getpixel((x, y)):\n return False\n return True\n\n def haveAnimation(self, waitTime=200):\n regions = [\n (1122, 585, 1122 + 30, 585 + 30), # 开始游戏右上\n (763, 625, 763 + 30, 625 + 30), # 自家出牌上方\n (478, 433, 852, 630), # 经典玩法新手场 对家使用\n (880, 540, 880 + 20, 540 + 20) # 炸弹时使用,正中央\n ]\n img, _ = helper.Screenshot()\n lastImg = img\n for i in range(2):\n self.sleep(waitTime)\n img, _ = helper.Screenshot()\n for region in regions:\n if not self.compareImage(img.crop(region), lastImg.crop(region)):\n return True\n lastImg = img\n\n return False\n\n def before_start(self):\n self.RunGame = True\n GameHelper.Interrupt = True\n have_bid = False\n is_taodou = False\n is_stolen = 0\n self.initial_multiply = 0\n self.initial_mingpai = 0\n self.initial_bid_rate = 0\n while self.RunGame:\n outterBreak = False\n jiaodizhu_btn = helper.LocateOnScreen(\"jiaodizhu_btn\", region=(765, 663, 116, 50))\n qiangdizhu_btn = helper.LocateOnScreen(\"qiangdizhu_btn\", region=(783, 663, 116, 50))\n jiabei_btn = helper.LocateOnScreen(\"jiabei_btn\", region=self.GeneralBtnPos)\n self.detect_start_btn()\n print(\"等待加倍或叫地主\", end=\"\")\n while jiaodizhu_btn is None and qiangdizhu_btn is None and jiabei_btn is None and self.RunGame:\n self.detect_start_btn()\n print(\".\", end=\"\")\n self.sleep(100)\n jiaodizhu_btn = helper.LocateOnScreen(\"jiaodizhu_btn\", region=(765, 663, 116, 50))\n qiangdizhu_btn = helper.LocateOnScreen(\"qiangdizhu_btn\", region=(783, 663, 116, 50))\n jiabei_btn = helper.LocateOnScreen(\"jiabei_btn\", region=self.GeneralBtnPos)\n if jiabei_btn is None:\n img, _ = helper.Screenshot()\n cards, _ = helper.GetCards(img)\n cards_str = \"\".join([card[0] for card in cards])\n win_rate = BidModel.predict_score(cards_str)\n farmer_score = FarmerModel.predict(cards_str, \"farmer\")\n if not have_bid:\n with open(\"cardslog.txt\", \"a\") as f:\n f.write(str(int(time.time())) + \" \" + cards_str + \" \" + str(round(win_rate, 2)) + \"\\n\")\n print(\"\\n叫牌预估得分: \" + str(round(win_rate, 3)) + \" 不叫预估得分: \" + str(round(farmer_score, 3)))\n self.BidWinrate.setText(\n \"叫牌预估得分: \" + str(round(win_rate, 3)) + \" 不叫预估得分: \" + str(round(farmer_score, 3)))\n self.sleep(10)\n self.initial_bid_rate = round(win_rate, 3)\n is_stolen = 0\n compare_winrate = win_rate\n if compare_winrate > 0:\n compare_winrate *= 2.5\n landlord_requirement = True\n if self.use_manual_landlord_requirements:\n landlord_requirement = manual_landlord_requirements(cards_str)\n\n if jiaodizhu_btn is not None:\n have_bid = True\n if win_rate > self.BidThresholds[0] and compare_winrate > farmer_score and landlord_requirement:\n helper.ClickOnImage(\"jiaodizhu_btn\", region=(765, 663, 116, 50), confidence=0.9)\n else:\n helper.ClickOnImage(\"bujiao_btn\", region=self.GeneralBtnPos)\n elif qiangdizhu_btn is not None:\n is_stolen = 1\n if have_bid:\n threshold_index = 1\n else:\n threshold_index = 2\n if win_rate > self.BidThresholds[\n threshold_index] and compare_winrate > farmer_score and landlord_requirement:\n helper.ClickOnImage(\"qiangdizhu_btn\", region=(783, 663, 116, 50), confidence=0.9)\n else:\n helper.ClickOnImage(\"buqiang_btn\", region=self.GeneralBtnPos)\n have_bid = True\n else:\n pass\n if have_bid:\n result = helper.LocateOnScreen(\"taodouchang\", region=(835, 439, 140, 40), confidence=0.9)\n if result is not None:\n is_taodou = True\n print(\"淘豆场,跳过加倍\")\n break\n else:\n llcards = self.find_three_landlord_cards(self.ThreeLandlordCardsPos)\n print(\"地主牌:\", llcards)\n img, _ = helper.Screenshot()\n cards, _ = helper.GetCards(img)\n cards_str = \"\".join([card[0] for card in cards])\n self.initial_cards = cards_str\n if len(cards_str) == 20:\n # win_rate = LandlordModel.predict(cards_str)\n win_rate = LandlordModel.predict_by_model(cards_str, llcards)\n self.PreWinrate.setText(\"局前预估得分: \" + str(round(win_rate, 3)))\n print(\"预估地主得分:\", round(win_rate, 3))\n else:\n user_position_code = self.find_landlord(self.LandlordFlagPos)\n user_position = \"up\"\n while user_position_code is None:\n user_position_code = self.find_landlord(self.LandlordFlagPos)\n self.sleep(50)\n user_position = ['up', 'landlord', 'down'][user_position_code]\n self.landlord_position_code = user_position_code\n win_rate = FarmerModel.predict(cards_str, user_position)\n print(\"预估农民得分:\", round(win_rate, 3))\n self.PreWinrate.setText(\"局前预估得分: \" + str(round(win_rate, 3)))\n if len(cards_str) == 20:\n JiabeiThreshold = self.JiabeiThreshold[is_stolen]\n else:\n JiabeiThreshold = self.FarmerJiabeiThreshold\n\n print(\"等待其他人加倍……\")\n self.sleep(3500)\n\n chaojijiabei_btn = helper.LocateOnScreen(\"chaojijiabei_btn\", region=self.GeneralBtnPos, confidence=0.78)\n if chaojijiabei_btn is None and self.stop_when_no_chaojia:\n self.AutoPlay = False\n self.SwitchMode.setText(\"自动\" if self.AutoPlay else \"单局\")\n self.sleep(10)\n print(\"检测到没有超级加倍卡,已停止自动模式\")\n if win_rate > JiabeiThreshold[0]:\n chaojijiabei_btn = helper.LocateOnScreen(\"chaojijiabei_btn\", region=self.GeneralBtnPos, confidence=0.78)\n if chaojijiabei_btn is not None:\n helper.ClickOnImage(\"chaojijiabei_btn\", region=self.GeneralBtnPos, confidence=0.78)\n self.initial_multiply = 4\n else:\n helper.ClickOnImage(\"jiabei_btn\", region=self.GeneralBtnPos)\n self.initial_multiply = 2\n elif win_rate > JiabeiThreshold[1]:\n helper.ClickOnImage(\"jiabei_btn\", region=self.GeneralBtnPos)\n self.initial_multiply = 2\n else:\n helper.ClickOnImage(\"bujiabei_btn\", region=self.GeneralBtnPos)\n self.initial_multiply = 1\n outterBreak = True\n break\n if outterBreak:\n break\n\n llcards = self.find_three_landlord_cards(self.ThreeLandlordCardsPos)\n wait_count = 0\n while len(llcards) != 3 and self.RunGame and wait_count < 15:\n print(\"等待地主牌\", llcards)\n self.sleep(200)\n wait_count += 1\n llcards = self.find_three_landlord_cards(self.ThreeLandlordCardsPos)\n\n print(\"等待加倍环节结束\")\n if not is_taodou:\n if len(cards_str) == 20:\n self.sleep(5000)\n else:\n self.sleep(3000)\n if win_rate > self.MingpaiThreshold:\n helper.ClickOnImage(\"mingpai_btn\", region=self.GeneralBtnPos)\n self.initial_mingpai = 1\n print(\"结束\")\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n app.setStyleSheet(\"\"\"\n QPushButton{\n text-align : center;\n background-color : white;\n font: bold;\n border-color: gray;\n border-width: 2px;\n border-radius: 10px;\n padding: 6px;\n height : 14px;\n border-style: outset;\n font : 14px;\n }\n QPushButton:hover{\n background-color : light gray;\n }\n QPushButton:pressed{\n text-align : center;\n background-color : gray;\n font: bold;\n border-color: gray;\n border-width: 2px;\n border-radius: 10px;\n padding: 6px;\n height : 14px;\n border-style: outset;\n font : 14px;\n padding-left:9px;\n padding-top:9px;\n }\n QComboBox{\n background:transparent;\n border: 1px solid rgba(200, 200, 200, 100);\n font-weight: bold;\n }\n QComboBox:drop-down{\n border: 0px;\n }\n QComboBox QAbstractItemView:item{\n height: 30px;\n }\n QLabel{\n background:transparent;\n font-weight: bold;\n }\n \"\"\")\n my_pyqt_form = MyPyQT_Form()\n my_pyqt_form.show()\n sys.exit(app.exec_())\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaxInGaussian/VAFnet | [
"618a16abae08a193b94072d5d5ff176f02bb1288"
] | [
"vafnet/util/Optimizer.py"
] | [
"# Copyright 2017 Max W. Y. Lam\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport numpy.random as npr\nimport theano\nimport theano.tensor as TT\nfrom collections import OrderedDict\n\n__all__ = [\n \"Optimizer\",\n]\n\nclass Optimizer(object):\n\n algos = [\n \"momentum\",\n \"nesterov\",\n \"sgd\",\n \"adagrad\",\n \"rmsprop\",\n \"adadelta\",\n \"adam\",\n \"adamax\",\n ]\n \n @staticmethod\n def momentum(updates,\n momentum=0.9):\n \"\"\"Returns a modified update dictionary including momentum\n Generates update expressions of the form:\n *``velocity := momentum*velocity+updates[param]-param``\n *``param := param+velocity``\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1-momentum`.\n \"\"\"\n params = list(updates.keys())[0]\n updates = OrderedDict(updates)\n value = params.get_value(borrow=True)\n velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n x = momentum*velocity+updates[params]\n updates[velocity] = x-params\n updates[params] = x\n return updates\n \n @staticmethod\n def nesterov(updates,\n momentum=0.9):\n \"\"\"Returns a modified update dictionary including Nesterov momentum\n Generates update expressions of the form:\n *``velocity := momentum*velocity+updates[params]-params``\n *``params := params+momentum*velocity+updates[params]-params``\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1-momentum`.\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n \"\"\"\n params = list(updates.keys())[0]\n updates = OrderedDict(updates)\n value = params.get_value(borrow=True)\n velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n x = momentum*velocity+updates[params]-params\n updates[velocity] = x\n updates[params] = momentum*x+updates[params]\n return updates\n \n @staticmethod\n def sgd(updates,\n learning_rate=0.01,\n **args):\n \"\"\"Stochastic Gradient Descent (SGD) updates\n Generates update expressions of the form:\n *``params := params-learning_rate*gradient``\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n \"\"\"\n params, grads = list(updates.items())[0]\n updates = OrderedDict(updates)\n updates[params] = params-learning_rate*grads\n return updates\n \n @staticmethod\n def adagrad(updates,\n learning_rate=0.01,\n epsilon=1e-6,\n **args):\n \"\"\"Adagrad updates\n Scale learning rates by dividing with the square root of accumulated\n squared gradients. See [1]_ for further description.\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n Notes\n -----\n Using step size eta Adagrad calculates the learning rate for feature i at\n time step t as:\n .. math:: \\\\eta_{t,i} = \\\\frac{\\\\eta}\n {\\\\sqrt{\\\\sum^t_{t^\\\\prime} g^2_{t^\\\\prime,i}+\\\\epsilon}} g_{t,i}\n as such the learning rate is monotonically decreasing.\n Epsilon is not included in the typical formula, see [2]_.\n References\n ----------\n .. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):\n Adaptive subgradient methods for online learning and stochastic\n optimization. JMLR, 12:2121-2159.\n .. [2] Chris Dyer:\n Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf\n \"\"\"\n params, grads = list(updates.items())[0]\n updates = OrderedDict(updates)\n value = params.get_value(borrow=True)\n accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n accu_new = accu+grads**2\n updates[accu] = accu_new\n updates[params] = params-(learning_rate*grads/TT.sqrt(accu_new+epsilon))\n return updates\n \n @staticmethod\n def rmsprop(updates,\n learning_rate=0.01,\n rho=0.9,\n epsilon=1e-6,\n **args):\n \"\"\"RMSProp updates\n Scale learning rates by dividing with the moving average of the root mean\n squared (RMS) gradients. See [1]_ for further description.\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n Notes\n -----\n `rho` should be between 0 and 1. A value of `rho` close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n Using the step size :math:`\\\\eta` and a decay factor :math:`\\\\rho` the\n learning rate :math:`\\\\eta_t` is calculated as:\n .. math::\n r_t &= \\\\rho r_{t-1}+(1-\\\\rho)*g^2\\\\\\\\\n \\\\eta_t &= \\\\frac{\\\\eta}{\\\\sqrt{r_t+\\\\epsilon}}\n References\n ----------\n .. [1] Tieleman, TT. and Hinton, G. (2012):\n Neural Networks for Machine Learning, Lecture 6.5-rmsprop.\n Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)\n \"\"\"\n params, grads = list(updates.items())[0]\n updates = OrderedDict(updates)\n one = TT.constant(1)\n value = params.get_value(borrow=True)\n accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n accu_new = rho*accu+(one-rho)*grad**2\n updates[accu] = accu_new\n updates[params] = params-(learning_rate*grads/TT.sqrt(accu_new+epsilon))\n return updates\n \n @staticmethod\n def adadelta(updates,\n learning_rate=1.,\n rho=0.95,\n epsilon=1e-6,\n **args):\n \"\"\" Adadelta updates\n Scale learning rates by the ratio of accumulated gradients to accumulated\n updates, see [1]_ and notes for further description.\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Squared gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n Notes\n -----\n rho should be between 0 and 1. A value of rho close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to\n work for multiple datasets (MNIST, speech).\n In the paper, no learning rate is considered (so learning_rate=1.0).\n Probably best to keep it at this value.\n epsilon is important for the very first update (so the numerator does\n not become 0).\n Using the step size eta and a decay factor rho the learning rate is\n calculated as:\n .. math::\n r_t &= \\\\rho r_{t-1}+(1-\\\\rho)*g^2\\\\\\\\\n \\\\eta_t &= \\\\eta \\\\frac{\\\\sqrt{s_{t-1}+\\\\epsilon}}\n {\\sqrt{r_t+\\epsilon}}\\\\\\\\\n s_t &= \\\\rho s_{t-1}+(1-\\\\rho)*(\\\\eta_t*g)^2\n References\n ----------\n .. [1] Zeiler, M. D. (2012):\n ADADELTA: An Adaptive Learning Rate Method.\n arXiv Preprint arXiv:1212.5701.\n \"\"\"\n params, grads = list(updates.items())[0]\n updates = OrderedDict(updates)\n one = TT.constant(1)\n value = params.get_value(borrow=True)\n accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n accu_new = rho*accu+(one-rho)*grads**2\n updates[accu] = accu_new\n update = (grads*TT.sqrt(delta_accu+epsilon)/\n TT.sqrt(accu_new+epsilon))\n updates[params] = params-learning_rate*update\n delta_accu_new = rho*delta_accu+(one-rho)*update**2\n updates[delta_accu] = delta_accu_new\n return updates\n \n @staticmethod\n def adam(updates,\n learning_rate=0.01,\n beta1=0.9,\n beta2=0.99,\n epsilon=1e-8,\n **args):\n \"\"\"Adam updates\n Adam updates implemented as in [1]_.\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n learning_rate : float\n Learning rate\n beta1 : float\n Exponential decay rate for the first moment estimates.\n beta2 : float\n Exponential decay rate for the second moment estimates.\n epsilon : float\n Constant for numerical stability.\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n Notes\n -----\n The paper [1]_ includes an additional hyperparameter lambda. This is only\n needed to prove convergence of the algorithm and has no practical use\n (personal communication with the authors), it is therefore omitted here.\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n \"\"\"\n params, grads = list(updates.items())[0]\n updates = OrderedDict(updates)\n t_prev = theano.shared(np.asarray(0., dtype=theano.config.floatX))\n one = TT.constant(1)\n t = t_prev+1\n a_t = learning_rate*TT.sqrt(one-beta2**t)/(one-beta1**t)\n value = params.get_value(borrow=True)\n m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n m_t = beta1*m_prev+(one-beta1)*grads\n v_t = beta2*v_prev+(one-beta2)*grads**2\n step = a_t*m_t/(TT.sqrt(v_t)+epsilon)\n updates[m_prev] = m_t\n updates[v_prev] = v_t\n updates[params] = params-step\n updates[t_prev] = t\n return updates\n \n @staticmethod\n def adamax(updates,\n learning_rate=0.01,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n **args):\n \"\"\"Adamax updates\n Adamax updates implemented as in [1]_. This is a variant of of the Adam\n algorithm based on the infinity norm.\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n learning_rate : float\n Learning rate\n beta1 : float\n Exponential decay rate for the first moment estimates.\n beta2 : float\n Exponential decay rate for the weighted infinity norm estimates.\n epsilon : float\n Constant for numerical stability.\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n \"\"\"\n params, grads = list(updates.items())[0]\n updates = OrderedDict(updates)\n t_prev = theano.shared(np.asarray(0., dtype=theano.config.floatX))\n one = TT.constant(1)\n t = t_prev+1\n a_t = learning_rate/(one-beta1**t)\n value = params.get_value(borrow=True)\n m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n u_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),\n broadcastable=params.broadcastable)\n m_t = beta1*m_prev+(one-beta1)*grads\n u_t = TT.maximum(beta2*u_prev, abs(grads))\n step = a_t*m_t/(u_t+epsilon)\n updates[m_prev] = m_t\n updates[u_prev] = u_t\n updates[params] = params-step\n updates[t_prev] = t\n return updates"
] | [
[
"numpy.asarray",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
loekgugten/fltk-testbed | [
"9af7b40c877d6f07a1ec24fe078ea379a0152745"
] | [
"fltk/nets/simple.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SimpleNet(nn.Module):\n def __init__(self, name=None, created_time=None):\n super(SimpleNet, self).__init__()\n self.created_time = created_time\n self.name = name\n\n def visualize(self, vis, epoch, acc, loss=None, eid='main', is_poisoned=False, name=None):\n if name is None:\n name = self.name + '_poisoned' if is_poisoned else self.name\n vis.line(X=np.array([epoch]), Y=np.array([acc]), name=name, win='vacc_{0}'.format(self.created_time), env=eid,\n update='append' if vis.win_exists('vacc_{0}'.format(self.created_time), env=eid) else None,\n opts=dict(showlegend=True, title='Accuracy_{0}'.format(self.created_time),\n width=700, height=400))\n if loss is not None:\n vis.line(X=np.array([epoch]), Y=np.array([loss]), name=name, env=eid,\n win='vloss_{0}'.format(self.created_time),\n update='append' if vis.win_exists('vloss_{0}'.format(self.created_time), env=eid) else None,\n opts=dict(showlegend=True, title='Loss_{0}'.format(self.created_time), width=700, height=400))\n\n return\n\n def train_vis(self, vis, epoch, data_len, batch, loss, eid='main', name=None, win='vtrain'):\n\n vis.line(X=np.array([(epoch - 1) * data_len + batch]), Y=np.array([loss]),\n env=eid,\n name=f'{name}' if name is not None else self.name, win=f'{win}_{self.created_time}',\n update='append' if vis.win_exists(f'{win}_{self.created_time}', env=eid) else None,\n opts=dict(showlegend=True, width=700, height=400, title='Train loss_{0}'.format(self.created_time)))\n\n def save_stats(self, epoch, loss, acc):\n self.stats['epoch'].append(epoch)\n self.stats['loss'].append(loss)\n self.stats['acc'].append(acc)\n\n def copy_params(self, state_dict, coefficient_transfer=100):\n\n own_state = self.state_dict()\n\n for name, param in state_dict.items():\n if name in own_state:\n shape = param.shape\n # no cuda\n # random_tensor = (torch.cuda.FloatTensor(shape).random_(0, 100) <= coefficient_transfer).type(\n # torch.cuda.FloatTensor)\n random_tensor = (torch.FloatTensor(shape).random_(0, 100) <= coefficient_transfer).type(\n torch.FloatTensor)\n negative_tensor = (random_tensor * -1) + 1\n # own_state[name].copy_(param)\n own_state[name].copy_(param.clone())\n\n\nclass SimpleMnist(SimpleNet):\n def __init__(self, name=None, created_time=None):\n super(SimpleMnist, self).__init__(name, created_time)\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n"
] | [
[
"torch.nn.Dropout2d",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.FloatTensor",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NeuronQ/nmlu | [
"f2f37320144a0d41cbdc4afafe1251f759c1841e"
] | [
"nmlu/eda.py"
] | [
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport scipy\n\n\ndef set_plot_sane_defaults(mode='classic'):\n set_plot_sizes(sml=12, med=14, big=16)\n # see https://matplotlib.org/gallery/style_sheets/style_sheets_reference.html\n mpl.style.use({\n 'classic': 'default',\n 'serious': 'bmh',\n 'dark': 'dark_background',\n 'boring': 'classic',\n 'cool': 'ggplot',\n 'seaborn': 'seaborn',\n }[mode])\n mpl.rcParams['figure.facecolor'] = 'white' if mode != 'dark' else 'black'\n\n\ndef set_plot_sizes(sml=12, med=14, big=16):\n plt.rc('font', size=sml) # controls default text sizes\n plt.rc('axes', titlesize=sml) # fontsize of the axes title\n plt.rc('axes', labelsize=med) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=sml) # fontsize of the tick labels\n plt.rc('ytick', labelsize=sml) # fontsize of the tick labels\n plt.rc('legend', fontsize=sml) # legend fontsize\n plt.rc('figure', titlesize=big) # fontsize of the figure title\n\n\ndef set_plot_bgs(color='white'):\n mpl.rcParams['figure.facecolor'] = 'white'\n\n\ndef plot_pairs_dists(df, y_col=None, except_cols=None, figsize=None, palette=None):\n if except_cols is None:\n except_cols = set()\n if y_col is not None:\n except_cols.add(y_col)\n return sns.pairplot(\n df,\n hue=y_col,\n palette=palette,\n vars=set(df.columns.values).difference(except_cols),\n size=figsize\n )\n\n\ndef plot_heatmap(df, figsize=(16, 16)):\n fig, ax = plt.subplots(figsize=figsize)\n return sns.heatmap(df.corr(), annot=True, ax=ax)\n\n\ndef plot_pairs_corr(df, figsize=(18, 16)):\n axes = pd.plotting.scatter_matrix(df, alpha=0.3, figsize=figsize, diagonal='kde')\n corr = df.corr().values\n for i, j in zip(*np.triu_indices_from(axes, k=1)):\n axes[i, j].annotate(\"%.3f\" % corr[i, j], (0.8, 0.8), xycoords='axes fraction', ha='center', va='center')\n\n\ndef show_cat_feature_vs_y(df, fld, y_fld):\n df = df.reset_index()\n pivot_args = dict(\n data=df, index=fld, columns=y_fld,\n aggfunc='size', fill_value=0,\n )\n tbl_args = pivot_args.copy()\n tbl_args.update(aggfunc='count', values='index', margins=True)\n tbl = pd.pivot_table(**tbl_args)\n print(tbl)\n plot_tbl = pd.pivot_table(**pivot_args)\n plot_tbl.plot.bar()\n plt.show()\n\n\ndef plot_dendrogram(df, figsize=(16, 10)):\n corr = np.round(scipy.stats.spearmanr(df).correlation, 4)\n corr_condensed = scipy.cluster.hierarchy.distance.squareform(1 - corr)\n z = scipy.cluster.hierarchy.linkage(corr_condensed, method='average')\n plt.figure(figsize=figsize)\n return scipy.cluster.hierarchy.dendrogram(\n z, labels=df.columns, orientation='left', leaf_font_size=16)\n"
] | [
[
"scipy.cluster.hierarchy.distance.squareform",
"matplotlib.style.use",
"matplotlib.pyplot.figure",
"numpy.triu_indices_from",
"scipy.cluster.hierarchy.dendrogram",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"scipy.cluster.hierarchy.linkage",
"scipy.stats.spearmanr",
"matplotlib.pyplot.show",
"pandas.plotting.scatter_matrix",
"pandas.pivot_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
ghelia/deel | [
"6ff67d7246daf12d1884357010dd82842fbc31d1"
] | [
"deel/model/librcnn/proposal_layer.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Mofidied by:\n# Copyright (c) 2016 Shunta Saito\n\n# Original work by:\n# -----------------------------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see fast-rcnn/LICENSE for details]\n# Written by Ross Girshick\n# https://github.com/rbgirshick/py-faster-rcnn\n# -----------------------------------------------------------------------------\n\nfrom chainer.cuda import to_cpu\nfrom cpu_nms import cpu_nms as nms\nfrom bbox_transform import bbox_transform_inv\nfrom bbox_transform import clip_boxes\nfrom generate_anchors import generate_anchors\n\nimport numpy as np\n\n\nclass ProposalLayer(object):\n \"\"\"Generate deterministic proposal regions (All on CPU)\n\n Outputs object detection proposals by applying estimated bounding-box\n transformations to a set of regular boxes (called \"anchors\").\n \"\"\"\n RPN_NMS_THRESH = 0.7\n RPN_PRE_NMS_TOP_N = 12000\n RPN_POST_NMS_TOP_N = 2000\n RPN_MIN_SIZE = 16\n\n def __init__(self, feat_stride=16, anchor_scales=[4, 8, 16, 32]):\n self._feat_stride = feat_stride\n self._anchors = generate_anchors(scales=np.array(anchor_scales))\n self._num_anchors = self._anchors.shape[0]\n\n def __call__(self, rpn_cls_prob, rpn_bbox_pred, im_info, train):\n # Algorithm:\n #\n # for each (H, W) location i\n # generate A anchor boxes centered on cell i\n # apply predicted bbox deltas at cell i to each of the A anchors\n # clip predicted boxes to image\n # remove predicted boxes with either height or width < threshold\n # sort all (proposal, score) pairs by score from highest to lowest\n # take top pre_nms_topN proposals before NMS\n # apply NMS with threshold 0.7 to remaining proposals\n # take after_nms_topN proposals after NMS\n # return the top proposals (-> RoIs top, scores top)\n\n pre_nms_topN = self.RPN_PRE_NMS_TOP_N if train else 6000\n post_nms_topN = self.RPN_POST_NMS_TOP_N if train else 300\n nms_thresh = self.RPN_NMS_THRESH\n min_size = self.RPN_MIN_SIZE\n\n # the first set of _num_anchors channels are bg probs\n # the second set are the fg probs, which we want\n scores = to_cpu(rpn_cls_prob.data[:, self._num_anchors:, :, :])\n bbox_deltas = to_cpu(rpn_bbox_pred.data)\n im_info = im_info[0, :]\n\n # 1. Generate proposals from bbox deltas and shifted anchors\n height, width = scores.shape[-2:]\n\n # Enumerate all shifts\n shift_x = np.arange(0, width) * self._feat_stride\n shift_y = np.arange(0, height) * self._feat_stride\n shift_x, shift_y = np.asarray(np.meshgrid(shift_x, shift_y))\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose()\n\n # Enumerate all shifted anchors:\n #\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = self._num_anchors\n K = shifts.shape[0]\n anchors = self._anchors.reshape((1, A, 4)) + \\\n shifts.reshape((1, K, 4)).transpose((1, 0, 2))\n anchors = anchors.reshape((K * A, 4))\n\n # Transpose and reshape predicted bbox transformations to get them\n # into the same order as the anchors:\n #\n # bbox deltas will be (1, 4 * A, H, W) format\n # transpose to (1, H, W, 4 * A)\n # reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)\n # in slowest to fastest order\n bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))\n\n # Same story for the scores:\n #\n # scores are (1, A, H, W) format\n # transpose to (1, H, W, A)\n # reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)\n scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))\n\n # Convert anchors into proposals via bbox transformations\n proposals = bbox_transform_inv(anchors, bbox_deltas, -1)\n\n # 2. clip predicted boxes to image\n proposals = clip_boxes(proposals, im_info[:2])\n\n # 3. remove predicted boxes with either height or width < threshold\n # (NOTE: convert min_size to input image scale stored in im_info[2])\n keep = _filter_boxes(proposals, min_size * im_info[2])\n proposals = proposals[keep, :]\n scores = scores[keep]\n\n # 4. sort all (proposal, score) pairs by score from highest to lowest\n # 5. take top pre_nms_topN (e.g. 6000)\n order = scores.ravel().argsort()[::-1]\n if pre_nms_topN > 0:\n order = order[:pre_nms_topN]\n proposals = proposals[order, :]\n scores = scores[order]\n\n # 6. apply nms (e.g. threshold = 0.7)\n # 7. take after_nms_topN (e.g. 300)\n # 8. return the top proposals (-> RoIs top)\n keep = nms(np.hstack((proposals, scores)), nms_thresh)\n if post_nms_topN > 0:\n keep = keep[:post_nms_topN]\n proposals = proposals[keep, :]\n scores = scores[keep]\n\n # Output rois blob\n # Our RPN implementation only supports a single input image, so all\n # batch inds are 0\n batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)\n rois = np.asarray(np.hstack((batch_inds, proposals)), dtype=np.float32)\n\n return rois\n\n\ndef _filter_boxes(boxes, min_size):\n \"\"\"Remove all boxes with any side smaller than min_size.\"\"\"\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep\n"
] | [
[
"numpy.hstack",
"numpy.arange",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chen-Wang-CUHK/fast_abs_rl | [
"1416fa9ed7b7c35581945c5b455442e3343ecbda"
] | [
"decode_full_model.py"
] | [
"\"\"\" run decoding of rnn-ext + abs + RL (+ rerank)\"\"\"\nimport argparse\nimport json\nimport os\nfrom os.path import join\nfrom datetime import timedelta\nfrom time import time\nfrom collections import Counter, defaultdict\nfrom itertools import product\nfrom functools import reduce\nimport operator as op\n\nfrom cytoolz import identity, concat, curry\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import multiprocessing as mp\n\nfrom data.batcher import tokenize\n\nfrom decoding import Abstractor, RLExtractor, DecodeDataset, BeamAbstractor\nfrom decoding import make_html_safe\n\n\ndef decode(save_path, model_dir, split, batch_size,\n beam_size, diverse, max_len, cuda):\n start = time()\n # setup model\n with open(join(model_dir, 'meta.json')) as f:\n meta = json.loads(f.read())\n if meta['net_args']['abstractor'] is None:\n # NOTE: if no abstractor is provided then\n # the whole model would be extractive summarization\n assert beam_size == 1\n abstractor = identity\n else:\n if beam_size == 1:\n abstractor = Abstractor(join(model_dir, 'abstractor'),\n max_len, cuda)\n else:\n abstractor = BeamAbstractor(join(model_dir, 'abstractor'),\n max_len, cuda)\n extractor = RLExtractor(model_dir, cuda=cuda)\n\n # setup loader\n def coll(batch):\n articles = list(filter(bool, batch))\n return articles\n dataset = DecodeDataset(split)\n\n n_data = len(dataset)\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=False, num_workers=4,\n collate_fn=coll\n )\n\n # prepare save paths and logs\n os.makedirs(join(save_path, 'output'))\n dec_log = {}\n dec_log['abstractor'] = meta['net_args']['abstractor']\n dec_log['extractor'] = meta['net_args']['extractor']\n dec_log['rl'] = True\n dec_log['split'] = split\n dec_log['beam'] = beam_size\n dec_log['diverse'] = diverse\n with open(join(save_path, 'log.json'), 'w') as f:\n json.dump(dec_log, f, indent=4)\n\n # Decoding\n i = 0\n with torch.no_grad():\n for i_debug, raw_article_batch in enumerate(loader):\n tokenized_article_batch = map(tokenize(None), raw_article_batch)\n ext_arts = []\n ext_inds = []\n for raw_art_sents in tokenized_article_batch:\n ext = extractor(raw_art_sents)[:-1] # exclude EOE\n if not ext:\n # use top-5 if nothing is extracted\n # in some rare cases rnn-ext does not extract at all\n ext = list(range(5))[:len(raw_art_sents)]\n else:\n ext = [i.item() for i in ext]\n ext_inds += [(len(ext_arts), len(ext))]\n ext_arts += [raw_art_sents[i] for i in ext]\n if beam_size > 1:\n all_beams = abstractor(ext_arts, beam_size, diverse)\n dec_outs = rerank_mp(all_beams, ext_inds)\n else:\n dec_outs = abstractor(ext_arts)\n assert i == batch_size*i_debug\n for j, n in ext_inds:\n decoded_sents = [' '.join(dec) for dec in dec_outs[j:j+n]]\n with open(join(save_path, 'output/{}.dec'.format(i)),\n 'w') as f:\n f.write(make_html_safe('\\n'.join(decoded_sents)))\n i += 1\n print('{}/{} ({:.2f}%) decoded in {} seconds\\r'.format(\n i, n_data, i/n_data*100,\n timedelta(seconds=int(time()-start))\n ), end='')\n print()\n\n_PRUNE = defaultdict(\n lambda: 2,\n {1:5, 2:5, 3:5, 4:5, 5:5, 6:4, 7:3, 8:3}\n)\n\ndef rerank(all_beams, ext_inds):\n beam_lists = (all_beams[i: i+n] for i, n in ext_inds if n > 0)\n return list(concat(map(rerank_one, beam_lists)))\n\ndef rerank_mp(all_beams, ext_inds):\n beam_lists = [all_beams[i: i+n] for i, n in ext_inds if n > 0]\n with mp.Pool(8) as pool:\n reranked = pool.map(rerank_one, beam_lists)\n return list(concat(reranked))\n\ndef rerank_one(beams):\n @curry\n def process_beam(beam, n):\n for b in beam[:n]:\n b.gram_cnt = Counter(_make_n_gram(b.sequence))\n return beam[:n]\n beams = map(process_beam(n=_PRUNE[len(beams)]), beams)\n best_hyps = max(product(*beams), key=_compute_score)\n dec_outs = [h.sequence for h in best_hyps]\n return dec_outs\n\ndef _make_n_gram(sequence, n=2):\n return (tuple(sequence[i:i+n]) for i in range(len(sequence)-(n-1)))\n\ndef _compute_score(hyps):\n all_cnt = reduce(op.iadd, (h.gram_cnt for h in hyps), Counter())\n repeat = sum(c-1 for g, c in all_cnt.items() if c > 1)\n # changed by wchen to fix the zero division bug\n if sum(len(h.sequence) for h in hyps) !=0:\n lp = sum(h.logprob for h in hyps) / sum(len(h.sequence) for h in hyps)\n else:\n lp = 0.0\n return (-repeat, lp)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='run decoding of the full model (RL)')\n parser.add_argument('--path', required=True, help='path to store/eval')\n parser.add_argument('--model_dir', help='root of the full model')\n\n # dataset split\n data = parser.add_mutually_exclusive_group(required=True)\n data.add_argument('--val', action='store_true', help='use validation set')\n data.add_argument('--test', action='store_true', help='use test set')\n\n # decode options\n parser.add_argument('--batch', type=int, action='store', default=32,\n help='batch size of faster decoding')\n parser.add_argument('--beam', type=int, action='store', default=1,\n help='beam size for beam-search (reranking included)')\n parser.add_argument('--div', type=float, action='store', default=1.0,\n help='diverse ratio for the diverse beam-search')\n parser.add_argument('--max_dec_word', type=int, action='store', default=30,\n help='maximun words to be decoded for the abstractor')\n\n parser.add_argument('--no-cuda', action='store_true',\n help='disable GPU training')\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available() and not args.no_cuda\n\n data_split = 'test' if args.test else 'val'\n decode(args.path, args.model_dir,\n data_split, args.batch, args.beam, args.div,\n args.max_dec_word, args.cuda)\n"
] | [
[
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.multiprocessing.Pool",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rosteen/glue | [
"ed71979f8e0e41f993a2363b3b5a8f8c3167a130"
] | [
"glue/utils/array.py"
] | [
"import warnings\n\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\n\nimport pandas as pd\nimport bottleneck as bt\n\n__all__ = ['unique', 'shape_to_string', 'view_shape', 'stack_view',\n 'coerce_numeric', 'check_sorted', 'broadcast_to', 'unbroadcast',\n 'iterate_chunks', 'combine_slices', 'nanmean', 'nanmedian', 'nansum',\n 'nanmin', 'nanmax', 'format_minimal', 'compute_statistic',\n 'categorical_ndarray', 'index_lookup', 'ensure_numerical',\n 'broadcast_arrays_minimal', 'random_views_for_dask_array']\n\n\ndef unbroadcast(array):\n \"\"\"\n Given an array, return a new array that is the smallest subset of the\n original array that can be re-broadcasted back to the original array.\n\n See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays\n for more details.\n \"\"\"\n\n if array.ndim == 0 or not hasattr(array, 'strides'):\n return array\n\n new_shape = np.where(np.array(array.strides) == 0, 1, array.shape)\n return as_strided(array, shape=new_shape)\n\n\ndef broadcast_arrays_minimal(*arrays):\n \"\"\"\n Unbroadcast arrays then broadcast to smallest common shape.\n \"\"\"\n return np.broadcast_arrays(*[unbroadcast(array) for array in arrays])\n\n\ndef unique(array):\n \"\"\"\n Return the unique elements of the array U, as well as\n the index array I such that U[I] == array\n\n Parameters\n ----------\n array : `numpy.ndarray`\n The array to use\n\n Returns\n -------\n U : `numpy.ndarray`\n The unique elements of the array\n I : `numpy.ndarray`\n The indices such that ``U[I] == array``\n \"\"\"\n # numpy.unique doesn't handle mixed-types on python3,\n # so we use pandas\n array = np.asarray(array)\n I, U = pd.factorize(array.ravel(), sort=True)\n return U.astype(array.dtype), I.reshape(array.shape)\n\n\ndef shape_to_string(shape):\n \"\"\"\n On Windows, shape tuples use long ints which results in formatted shapes\n such as (2L, 3L). This function ensures that the shape is always formatted\n without the Ls.\n \"\"\"\n return \"({0})\".format(\", \".join(str(int(item)) for item in shape))\n\n\ndef view_shape(shape, view):\n \"\"\"\n Return the shape of a view of an array.\n\n Returns equivalent of ``np.zeros(shape)[view].shape`` but with minimal\n memory usage.\n\n Parameters\n ----------\n shape : tuple\n The shape of the array\n view : slice\n A valid index into a Numpy array, or None\n \"\"\"\n if view is None:\n return shape\n else:\n return np.broadcast_to(1, shape)[view].shape\n\n\ndef stack_view(shape, *views):\n shp = tuple(slice(0, s, 1) for s in shape)\n result = np.broadcast_arrays(*np.ogrid[shp])\n for v in views:\n if isinstance(v, str) and v == 'transpose':\n result = [r.T for r in result]\n continue\n\n result = [r[v] for r in result]\n\n return tuple(result)\n\n\ndef coerce_numeric(arr):\n \"\"\"\n Coerce an array into a numeric array, replacing non-numeric elements with\n nans.\n\n If the array is already a numeric type, it is returned unchanged\n\n Parameters\n ----------\n arr : `numpy.ndarray`\n The array to coerce\n \"\"\"\n\n # Already numeric type\n if np.issubdtype(arr.dtype, np.number):\n return arr\n\n # Numpy datetime64 format\n if np.issubdtype(arr.dtype, np.datetime64):\n return arr\n\n # Convert booleans to integers\n if np.issubdtype(arr.dtype, np.bool_):\n return arr.astype(np.int)\n\n # a string dtype, or anything else\n try:\n return pd.to_numeric(arr, errors='coerce')\n except AttributeError: # pandas < 0.19\n return pd.Series(arr).convert_objects(convert_numeric=True).values\n\n\ndef check_sorted(array):\n \"\"\"\n Return `True` if the array is sorted, `False` otherwise.\n \"\"\"\n # this ignores NANs, and does the right thing if nans\n # are concentrated at beginning or end of array\n # otherwise, it will miss things at nan/finite boundaries\n array = np.asarray(array)\n return not (array[:-1] > array[1:]).any()\n\n\ndef pretty_number(numbers):\n \"\"\"\n Convert a list/array of numbers into a nice list of strings\n\n Parameters\n ----------\n numbers : list\n The numbers to convert\n \"\"\"\n try:\n return [pretty_number(n) for n in numbers]\n except TypeError:\n pass\n\n n = numbers\n if n == 0:\n result = '0'\n elif (abs(n) < 1e-3) or (abs(n) > 1e3):\n result = \"%0.3e\" % n\n elif abs(int(n) - n) < 1e-3 and int(n) != 0:\n result = \"%i\" % n\n else:\n result = \"%0.3f\" % n\n if result.find('.') != -1:\n result = result.rstrip('0')\n\n return result\n\n\ndef broadcast_to(array, shape):\n \"\"\"\n Compatibility function - can be removed once we support only Numpy 1.10\n and above\n \"\"\"\n try:\n return np.broadcast_to(array, shape)\n except AttributeError:\n array = np.asarray(array)\n return np.broadcast_arrays(array, np.ones(shape, array.dtype))[0]\n\n\ndef find_chunk_shape(shape, n_max=None):\n \"\"\"\n Given the shape of an n-dimensional array, and the maximum number of\n elements in a chunk, return the largest chunk shape to use for iteration.\n\n This currently assumes the optimal chunk shape to return is for C-contiguous\n arrays.\n \"\"\"\n\n if n_max is None:\n return tuple(shape)\n\n block_shape = []\n\n max_repeat_remaining = n_max\n\n for size in shape[::-1]:\n\n if max_repeat_remaining > size:\n block_shape.append(size)\n max_repeat_remaining = max_repeat_remaining // size\n else:\n block_shape.append(max_repeat_remaining)\n max_repeat_remaining = 1\n\n return tuple(block_shape[::-1])\n\n\ndef iterate_chunks(shape, chunk_shape=None, n_max=None):\n \"\"\"\n Given a data shape and a chunk shape (or maximum chunk size), iteratively\n return slice objects that can be used to slice the array.\n \"\"\"\n\n # Shortcut - if there are any 0 elements in the shape, there are no\n # chunks to iterate over.\n if np.prod(shape) == 0:\n return\n\n if chunk_shape is None and n_max is None:\n raise ValueError('Either chunk_shape or n_max should be specified')\n elif chunk_shape is not None and n_max is not None:\n raise ValueError('Either chunk_shape or n_max should be specified (not both)')\n elif chunk_shape is None:\n chunk_shape = find_chunk_shape(shape, n_max)\n else:\n if len(chunk_shape) != len(shape):\n raise ValueError('chunk_shape should have the same length as shape')\n elif any(x > y for (x, y) in zip(chunk_shape, shape)):\n raise ValueError('chunk_shape should fit within shape')\n\n ndim = len(chunk_shape)\n start_index = [0] * ndim\n\n shape = list(shape)\n\n while start_index <= shape:\n\n end_index = [min(start_index[i] + chunk_shape[i], shape[i]) for i in range(ndim)]\n\n slices = tuple([slice(start_index[i], end_index[i]) for i in range(ndim)])\n\n yield slices\n\n # Update chunk index. What we do is to increment the\n # counter for the first dimension, and then if it\n # exceeds the number of elements in that direction,\n # cycle back to zero and advance in the next dimension,\n # and so on.\n start_index[0] += chunk_shape[0]\n for i in range(ndim - 1):\n if start_index[i] >= shape[i]:\n start_index[i] = 0\n start_index[i + 1] += chunk_shape[i + 1]\n\n # We can now check whether the iteration is finished\n if start_index[-1] >= shape[-1]:\n break\n\n\ndef combine_slices(slice1, slice2, length):\n \"\"\"\n Given two slices that can be applied to a 1D array and the length of that\n array, this returns a new slice which is the one that should be applied to\n the array instead of slice2 if slice1 has already been applied.\n \"\"\"\n\n beg1, end1, step1 = slice1.indices(length)\n beg2, end2, step2 = slice2.indices(length)\n\n if step1 < 0 or step2 < 0:\n raise ValueError(\"combine_slices does not support slices with negative step\")\n\n if beg2 >= end1 or end2 <= beg1:\n return slice(0, 0, 1)\n\n beg = max(beg1, beg2)\n end = min(end1, end2)\n if (beg - beg2) % step2 != 0:\n beg += step2 - ((beg - beg2) % step2)\n\n # Now we want to find the two first overlap indices inside the overlap\n # range. Loop over indices of second slice (but with min/max constraints\n # of first added) and check if they are valid indices given slice1\n\n indices = []\n\n for idx in range(beg, end, step2):\n if (idx - beg1) % step1 == 0:\n indices.append((idx - beg1) // step1)\n if len(indices) == 2:\n break\n\n if len(indices) == 0:\n return slice(0, 0, 1)\n elif len(indices) == 1:\n return slice(indices[0], indices[0] + 1, 1)\n else:\n end_new = (end - beg1) // step1\n if (end - beg1) % step1 != 0:\n end_new += 1\n return slice(indices[0], end_new, indices[1] - indices[0])\n\n\ndef _move_tuple_axes_first(array, axis):\n \"\"\"\n Bottleneck can only take integer axis, not tuple, so this function takes all\n the axes to be operated on and combines them into the first dimension of the\n array so that we can then use axis=0\n \"\"\"\n\n # Figure out how many axes we are operating over\n naxis = len(axis)\n\n # Add remaining axes to the axis tuple\n axis += tuple(i for i in range(array.ndim) if i not in axis)\n\n # The new position of each axis is just in order\n destination = tuple(range(array.ndim))\n\n # Reorder the array so that the axes being operated on are at the beginning\n array_new = np.moveaxis(array, axis, destination)\n\n # Figure out the size of the product of the dimensions being operated on\n first = np.prod(array_new.shape[:naxis])\n\n # Collapse the dimensions being operated on into a single dimension so that\n # we can then use axis=0 with the bottleneck functions\n array_new = array_new.reshape((first,) + array_new.shape[naxis:])\n\n return array_new\n\n\ndef nanmean(array, axis=None):\n if isinstance(axis, tuple):\n array = _move_tuple_axes_first(array, axis=axis)\n axis = 0\n return bt.nanmean(array, axis=axis)\n\n\ndef nanmedian(array, axis=None):\n if isinstance(axis, tuple):\n array = _move_tuple_axes_first(array, axis=axis)\n axis = 0\n return bt.nanmedian(array, axis=axis)\n\n\ndef nansum(array, axis=None):\n if isinstance(axis, tuple):\n array = _move_tuple_axes_first(array, axis=axis)\n axis = 0\n return bt.nansum(array, axis=axis)\n\n\ndef nanmin(array, axis=None):\n if isinstance(axis, tuple):\n array = _move_tuple_axes_first(array, axis=axis)\n axis = 0\n return bt.nanmin(array, axis=axis)\n\n\ndef nanmax(array, axis=None):\n if isinstance(axis, tuple):\n array = _move_tuple_axes_first(array, axis=axis)\n axis = 0\n return bt.nanmax(array, axis=axis)\n\n\ndef format_minimal(values):\n \"\"\"\n Find the shortest format that can be used to represent all values in an\n array such that all the string representations are different.\n\n The current implementation is not incredibly efficient, but it takes only\n ~30ms for a 1000 element array and 200ms for a 10000 element array. One\n could probably make a more efficient implementation but this is good enough\n for now for what we use it for.\n\n Returns the optimal format as well as an array of formatted values.\n \"\"\"\n values = np.asarray(values)\n if np.max(np.abs(values)) > 1e5 or np.min(np.diff(values)) < 1e-5:\n fmt_type = 'e'\n else:\n fmt_type = 'f'\n for ndec in range(1, 15):\n fmt = '{{:.{0}{1}}}'.format(ndec, fmt_type)\n strings = [fmt.format(x) for x in values]\n if len(strings) == len(set(strings)):\n break\n return fmt, strings\n\n\nPLAIN_FUNCTIONS = {'minimum': np.min,\n 'maximum': np.max,\n 'mean': np.mean,\n 'median': np.median,\n 'sum': np.sum,\n 'percentile': np.percentile}\n\nNAN_FUNCTIONS = {'minimum': nanmin,\n 'maximum': nanmax,\n 'mean': nanmean,\n 'median': nanmedian,\n 'sum': nansum,\n 'percentile': np.nanpercentile}\n\n\ndef compute_statistic(statistic, data, mask=None, axis=None, finite=True,\n positive=False, percentile=None):\n \"\"\"\n Compute a statistic for the data.\n\n Parameters\n ----------\n statistic : {'minimum', 'maximum', 'mean', 'median', 'sum', 'percentile'}\n The statistic to compute\n data : `numpy.ndarray`\n The data to compute the statistic for.\n mask : `numpy.ndarray`\n The mask to apply when computing the statistic.\n axis : None or int or tuple of int\n If specified, the axis/axes to compute the statistic over.\n finite : bool, optional\n Whether to include only finite values in the statistic. This should\n be `True` to ignore NaN/Inf values\n positive : bool, optional\n Whether to include only (strictly) positive values in the statistic.\n This is used for example when computing statistics of data shown in\n log space.\n percentile : float, optional\n If ``statistic`` is ``'percentile'``, the ``percentile`` argument\n should be given and specify the percentile to calculate in the\n range [0:100]\n \"\"\"\n\n data = np.asanyarray(data)\n if mask is not None:\n mask = np.asanyarray(mask, dtype=bool)\n\n # NOTE: this function should not ever have to use glue-specific objects.\n # The aim is to eventually use a fast C implementation of this function.\n\n if statistic not in PLAIN_FUNCTIONS:\n raise ValueError(\"Unrecognized statistic: {0}\".format(statistic))\n\n if (finite or positive or mask is not None) and data.dtype.kind != 'M':\n\n keep = np.ones(data.shape, dtype=bool)\n\n if finite:\n keep &= np.isfinite(data)\n\n if positive:\n keep &= data > 0\n\n if mask is not None:\n keep &= mask\n\n if axis is None:\n data = data[keep]\n else:\n # We need to force a copy since we are editing the values and we\n # might as well convert to float just in case\n data = np.array(data, dtype=float)\n data[~keep] = np.nan\n\n function = NAN_FUNCTIONS[statistic]\n\n else:\n\n function = PLAIN_FUNCTIONS[statistic]\n\n if data.size == 0:\n return np.nan\n\n if isinstance(axis, tuple) and len(axis) == 0:\n return data\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n if statistic == 'percentile':\n return function(data, percentile, axis=axis)\n else:\n return function(data, axis=axis)\n\n\nclass categorical_ndarray(np.ndarray):\n \"\"\"\n A Numpy array subclass that includes properties to find the categories and\n unique integer codes for array values.\n \"\"\"\n\n _jitter = None\n\n def __new__(cls, value, dtype=None, copy=True, order=None, subok=False,\n ndmin=0, categories=None):\n result = np.array(value, dtype=dtype, copy=copy, order=order,\n subok=True, ndmin=ndmin).view(categorical_ndarray)\n if categories is not None:\n result.categories = categories\n return result\n\n def __array_finalize__(self, obj):\n if isinstance(obj, categorical_ndarray):\n self.categories = obj.categories\n\n def _update_categories_and_codes(self):\n if hasattr(self, '_categories'):\n self._codes = index_lookup(self, self._categories)\n else:\n self._categories, self._codes = unique(self)\n self._categories.setflags(write=False)\n self._codes = self._codes.astype(float)\n self._codes.setflags(write=False)\n\n @property\n def categories(self):\n if not hasattr(self, '_categories'):\n self._update_categories_and_codes()\n return self._categories\n\n @categories.setter\n def categories(self, value):\n self._categories = value\n\n @property\n def codes(self):\n if not hasattr(self, '_codes'):\n self._update_categories_and_codes()\n if self._jitter is None:\n return self._codes\n else:\n return self._codes + self._jitter\n\n def jitter(self, method=None):\n \"\"\"\n Jitter the codes.\n\n Parameters\n ----------\n method : {None, 'uniform'}\n If `None`, not jittering is done (or any jittering is undone).\n If ``'uniform'``, the codes are randomized by a uniformly\n distributed random variable.\n \"\"\"\n if method is None:\n self._jitter = None\n elif method == 'uniform':\n self._jitter = np.random.random(self.shape)\n self._jitter -= 0.5\n else:\n raise ValueError(\"method should be None or 'uniform'\")\n\n\ndef ensure_numerical(values):\n if isinstance(values, categorical_ndarray):\n return values.codes\n else:\n return values\n\n\ndef index_lookup(data, items):\n \"\"\"\n Lookup which index in items each data value is equal to\n\n Parameters\n ----------\n data\n An array-like object\n items\n Array-like of unique values\n\n Returns\n -------\n array\n If result[i] is finite, then data[i] = categories[result[i]]\n Otherwise, data[i] is not in the categories list\n \"\"\"\n\n # np.searchsorted doesn't work on mixed types in Python3\n\n ndata, ncat = len(data), len(items)\n data = pd.DataFrame({'data': data, 'row': np.arange(ndata)})\n cats = pd.DataFrame({'items': items,\n 'cat_row': np.arange(ncat)})\n\n m = pd.merge(data, cats, left_on='data', right_on='items')\n result = np.zeros(ndata, dtype=float) * np.nan\n result[np.array(m.row)] = m.cat_row\n return result\n\n\ndef random_views_for_dask_array(array, n_random_samples, n_chunks):\n \"\"\"\n Return a list of views to extract random values from a dask array in an\n efficient way taking into account the chunk layout. This will return\n n_chunks views such that all views together add up to approximately\n n_random_samples samples.\n \"\"\"\n\n # Find the indices of the chunks to extract\n indices = [np.random.randint(dimsize, size=n_chunks) for dimsize in array.numblocks]\n\n # Determine the boundaries of chunks along each dimension\n chunk_indices = [np.hstack([0, np.cumsum([size for size in sizes])]) for sizes in array.chunks]\n\n n_per_chunk = n_random_samples // n_chunks\n\n all_slices = []\n for ichunk in range(n_chunks):\n slices = []\n remaining_size = n_per_chunk\n for idim in range(array.ndim):\n start = chunk_indices[idim][indices[idim][ichunk]]\n stop = chunk_indices[idim][indices[idim][ichunk] + 1]\n if stop - start > remaining_size:\n stop = start + remaining_size\n slices.append(slice(start, stop))\n remaining_size //= (stop - start)\n remaining_size = max(1, remaining_size)\n all_slices.append(tuple(slices))\n\n return all_slices\n"
] | [
[
"pandas.merge",
"pandas.Series",
"numpy.asarray",
"numpy.issubdtype",
"numpy.cumsum",
"numpy.lib.stride_tricks.as_strided",
"numpy.moveaxis",
"numpy.random.randint",
"numpy.arange",
"numpy.asanyarray",
"numpy.diff",
"numpy.zeros",
"pandas.to_numeric",
"numpy.broadcast_arrays",
"numpy.array",
"numpy.random.random",
"numpy.abs",
"numpy.isfinite",
"numpy.ones",
"numpy.broadcast_to",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Rabrg/OpenNMT-py | [
"6b142fdce81edbb31cffebce89b7dbd93c35a1f8"
] | [
"tools/embeddings_to_torch.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import division\nimport six\nimport sys\nimport numpy as np\nimport argparse\nimport torch\n\n\ndef get_vocabs(dict_file):\n vocabs = torch.load(dict_file)\n\n enc_vocab, dec_vocab = None, None\n\n # the vocab object is a list of tuple (name, torchtext.Vocab)\n # we iterate over this list and associate vocabularies based on the name\n for vocab in vocabs:\n if vocab[0] == 'src':\n enc_vocab = vocab[1]\n if vocab[0] == 'tgt':\n dec_vocab = vocab[1]\n assert type(None) not in [type(enc_vocab), type(dec_vocab)]\n\n print(\"From: %s\" % dict_file)\n print(\"\\t* source vocab: %d words\" % len(enc_vocab))\n print(\"\\t* target vocab: %d words\" % len(dec_vocab))\n\n return enc_vocab, dec_vocab\n\n\ndef get_embeddings(file, opt):\n embs = dict()\n\n for (i, l) in enumerate(open(file, 'rb')):\n if i < opt.skip_lines:\n continue\n if not l:\n break\n if len(l) == 0:\n continue\n\n l_split = l.decode('utf8').strip().split()\n if len(l_split) == 2:\n continue\n embs[l_split[0]] = [float(em) for em in l_split[1:]]\n print(\"Got {} embeddings from {}\".format(len(embs), file))\n\n return embs\n\n\ndef match_embeddings(vocab, emb, opt):\n dim = len(six.next(six.itervalues(emb)))\n filtered_embeddings = np.zeros((len(vocab), dim))\n count = {\"match\": 0, \"miss\": 0}\n for w, w_id in vocab.stoi.items():\n if w in emb:\n filtered_embeddings[w_id] = emb[w]\n count['match'] += 1\n else:\n if opt.verbose:\n print(u\"not found:\\t{}\".format(w), file=sys.stderr)\n count['miss'] += 1\n\n return torch.Tensor(filtered_embeddings), count\n\n\nTYPES = [\"GloVe\", \"word2vec\"]\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='embeddings_to_torch.py')\n parser.add_argument('-emb_file', required=True,\n help=\"Embeddings from this file\")\n parser.add_argument('-output_file', required=True,\n help=\"Output file for the prepared data\")\n parser.add_argument('-dict_file', required=True,\n help=\"Dictionary file\")\n parser.add_argument('-verbose', action=\"store_true\", default=False)\n parser.add_argument('-skip_lines', type=int, default=0,\n help=\"Skip first lines of the embedding file\")\n parser.add_argument('-type', choices=TYPES, default=\"GloVe\")\n opt = parser.parse_args()\n\n enc_vocab, dec_vocab = get_vocabs(opt.dict_file)\n if opt.type == \"word2vec\":\n opt.skip_lines = 1\n\n embeddings = get_embeddings(opt.emb_file, opt)\n\n filtered_enc_embeddings, enc_count = match_embeddings(enc_vocab,\n embeddings,\n opt)\n filtered_dec_embeddings, dec_count = match_embeddings(dec_vocab,\n embeddings,\n opt)\n\n print(\"\\nMatching: \")\n match_percent = [_['match'] / (_['match'] + _['miss']) * 100\n for _ in [enc_count, dec_count]]\n print(\"\\t* enc: %d match, %d missing, (%.2f%%)\" % (enc_count['match'],\n enc_count['miss'],\n match_percent[0]))\n print(\"\\t* dec: %d match, %d missing, (%.2f%%)\" % (dec_count['match'],\n dec_count['miss'],\n match_percent[1]))\n\n print(\"\\nFiltered embeddings:\")\n print(\"\\t* enc: \", filtered_enc_embeddings.size())\n print(\"\\t* dec: \", filtered_dec_embeddings.size())\n\n enc_output_file = opt.output_file + \".enc.pt\"\n dec_output_file = opt.output_file + \".dec.pt\"\n print(\"\\nSaving embedding as:\\n\\t* enc: %s\\n\\t* dec: %s\"\n % (enc_output_file, dec_output_file))\n torch.save(filtered_enc_embeddings, enc_output_file)\n torch.save(filtered_dec_embeddings, dec_output_file)\n print(\"\\nDone.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.save",
"torch.Tensor",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RandolphVI/Music-Recommendation | [
"2e5d4e57675ff9a69a7445876bebc242d94b1314"
] | [
"NN/input/training/script/isrc_process.py"
] | [
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\n\n## load the data\ntrain = pd.read_csv('../temporal_data/train_id.csv')\ntest = pd.read_csv('../temporal_data/test_id.csv')\nsong = pd.read_csv('../temporal_data/songs_id_cnt.csv')\n\ndata = train[['msno', 'song_id']].append(test[['msno', 'song_id']])\n\nprint('Data loaded.')\n\n## isrc process\nisrc = song['isrc']\nsong['cc'] = isrc.str.slice(0, 2)\nsong['xxx'] = isrc.str.slice(2, 5)\nsong['yy'] = isrc.str.slice(5, 7).astype(float)\nsong['yy'] = song['yy'].apply(lambda x: 2000+x if x < 18 else 1900+x)\n\nsong['cc'] = LabelEncoder().fit_transform(song['cc'].astype(str))\nsong['xxx'] = LabelEncoder().fit_transform(song['xxx'].astype(str))\nsong['isrc_missing'] = (song['cc'] == 0) * 1.0\n\n## song_cnt\n# 根据歌曲国家划分歌曲\nsong_cc_cnt = song.groupby(by='cc').count()['song_id'].to_dict()\nsong_cc_cnt[0] = None\nsong['cc_song_cnt'] = song['cc'].apply(lambda x: song_cc_cnt[x] if not np.isnan(x) else None)\n\n# 根据歌曲出版商划分歌曲\nsong_xxx_cnt = song.groupby(by='xxx').count()['song_id'].to_dict()\nsong_xxx_cnt[0] = None\nsong['xxx_song_cnt'] = song['xxx'].apply(lambda x: song_xxx_cnt[x] if not np.isnan(x) else None)\n\n# 根据歌曲年份划分歌曲\nsong_yy_cnt = song.groupby(by='yy').count()['song_id'].to_dict()\nsong_yy_cnt[0] = None\nsong['yy_song_cnt'] = song['yy'].apply(lambda x: song_yy_cnt[x] if not np.isnan(x) else None)\n\ndata = data.merge(song, on='song_id', how='left')\n\n# 每个国家被多少用户听过\nsong_cc_cnt = data.groupby(by='cc').count()['msno'].to_dict()\nsong_cc_cnt[0] = None\nsong['cc_rec_cnt'] = song['cc'].apply(lambda x: song_cc_cnt[x] if not np.isnan(x) else None)\n\n# 每个出版商被多少用户听过\nsong_xxx_cnt = data.groupby(by='xxx').count()['msno'].to_dict()\nsong_xxx_cnt[0] = None\nsong['xxx_rec_cnt'] = song['xxx'].apply(lambda x: song_xxx_cnt[x] if not np.isnan(x) else None)\n\n# 每个年份被多少用户听过\nsong_yy_cnt = data.groupby(by='yy').count()['msno'].to_dict()\nsong_yy_cnt[0] = None\nsong['yy_rec_cnt'] = song['yy'].apply(lambda x: song_yy_cnt[x] if not np.isnan(x) else None)\n\n## to_csv\nfeatures = ['cc_song_cnt', 'xxx_song_cnt', 'yy_song_cnt', 'cc_rec_cnt', 'xxx_rec_cnt', 'yy_rec_cnt']\nfor feat in features:\n song[feat] = np.log1p(song[feat])\n\n# 删去歌曲 'name' 与 'isrc' 特征\nsong.drop(['name', 'isrc'], axis=1, inplace=True)\nsong.to_csv('../temporal_data/songs_id_cnt_isrc.csv', index=False)\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"numpy.log1p",
"numpy.isnan",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ph09/fogernetes | [
"f766434388ef8cd76e9e909c6f4bef2eb112f642"
] | [
"fodeo/core/central/YOLO/test/test_images.py"
] | [
"# coding='utf-8'\nimport os\nimport sys\nimport numpy as np\nimport time\nimport datetime\nimport json\nimport importlib\nimport logging\nimport shutil\nimport cv2\nimport random\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.ticker import NullLocator\n\nimport torch\nimport torch.nn as nn\n\n\nMY_DIRNAME = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.join(MY_DIRNAME, '..'))\nfrom nets.model_main import ModelMain\nfrom nets.yolo_loss import YOLOLoss\nfrom common.utils import non_max_suppression, bbox_iou\n\ncmap = plt.get_cmap('tab20b')\ncolors = [cmap(i) for i in np.linspace(0, 1, 20)]\n\n\ndef test(config):\n is_training = False\n # Load and initialize network\n net = ModelMain(config, is_training=is_training)\n net.train(is_training)\n\n # Set data parallel\n net = nn.DataParallel(net)\n # net = net.cuda()\n\n # Restore pretrain model\n if config[\"pretrain_snapshot\"]:\n logging.info(\"load checkpoint from {}\".format(config[\"pretrain_snapshot\"]))\n state_dict = torch.load(config[\"pretrain_snapshot\"], map_location='cpu')\n net.load_state_dict(state_dict)\n else:\n raise Exception(\"missing pretrain_snapshot!!!\")\n\n # YOLO loss with 3 scales\n yolo_losses = []\n for i in range(3):\n yolo_losses.append(YOLOLoss(config[\"yolo\"][\"anchors\"][i],\n config[\"yolo\"][\"classes\"], (config[\"img_w\"], config[\"img_h\"])))\n\n # prepare images path\n images_name = os.listdir(config[\"images_path\"])\n images_path = [os.path.join(config[\"images_path\"], name) for name in images_name]\n if len(images_path) == 0:\n raise Exception(\"no image found in {}\".format(config[\"images_path\"]))\n\n # Start inference\n batch_size = config[\"batch_size\"]\n for step in range(0, len(images_path), batch_size):\n # preprocess\n images = []\n images_origin = []\n for path in images_path[step*batch_size: (step+1)*batch_size]:\n logging.info(\"processing: {}\".format(path))\n image = cv2.imread(path, cv2.IMREAD_COLOR)\n if image is None:\n logging.error(\"read path error: {}. skip it.\".format(path))\n continue\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images_origin.append(image) # keep for save result\n image = cv2.resize(image, (config[\"img_w\"], config[\"img_h\"]),\n interpolation=cv2.INTER_LINEAR)\n image = image.astype(np.float32)\n image /= 255.0\n image = np.transpose(image, (2, 0, 1))\n image = image.astype(np.float32)\n images.append(image)\n images = np.asarray(images)\n # images = torch.from_numpy(images).cuda()\n images = torch.from_numpy(images)\n # inference\n with torch.no_grad():\n outputs = net(images)\n output_list = []\n for i in range(3):\n output_list.append(yolo_losses[i](outputs[i]))\n output = torch.cat(output_list, 1)\n batch_detections = non_max_suppression(output, config[\"yolo\"][\"classes\"],\n conf_thres=config[\"confidence_threshold\"],\n nms_thres=0.45)\n\n # write result images. Draw bounding boxes and labels of detections\n classes = open(config[\"classes_names_path\"], \"r\").read().split(\"\\n\")[:-1]\n if not os.path.isdir(\"./output/\"):\n os.makedirs(\"./output/\")\n for idx, detections in enumerate(batch_detections):\n plt.figure()\n fig, ax = plt.subplots(1)\n ax.imshow(images_origin[idx])\n if detections is not None:\n unique_labels = detections[:, -1].cpu().unique()\n n_cls_preds = len(unique_labels)\n bbox_colors = random.sample(colors, n_cls_preds)\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:\n color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]\n # Rescale coordinates to original dimensions\n ori_h, ori_w = images_origin[idx].shape[:2]\n pre_h, pre_w = config[\"img_h\"], config[\"img_w\"]\n box_h = ((y2 - y1) / pre_h) * ori_h\n box_w = ((x2 - x1) / pre_w) * ori_w\n y1 = (y1 / pre_h) * ori_h\n x1 = (x1 / pre_w) * ori_w\n # Create a Rectangle patch\n bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2,\n edgecolor=color,\n facecolor='none')\n # Add the bbox to the plot\n ax.add_patch(bbox)\n # Add label\n plt.text(x1, y1, s=classes[int(cls_pred)], color='white',\n verticalalignment='top',\n bbox={'color': color, 'pad': 0})\n # Save generated image with detections\n plt.axis('off')\n plt.gca().xaxis.set_major_locator(NullLocator())\n plt.gca().yaxis.set_major_locator(NullLocator())\n plt.savefig('output/{}_{}.jpg'.format(step, idx), bbox_inches='tight', pad_inches=0.0)\n plt.close()\n logging.info(\"Save all results to ./output/\") \n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG,\n format=\"[%(asctime)s %(filename)s] %(message)s\")\n\n if len(sys.argv) != 2:\n logging.error(\"Usage: python test_images.py params.py\")\n sys.exit()\n params_path = sys.argv[1]\n if not os.path.isfile(params_path):\n logging.error(\"no params file found! path: {}\".format(params_path))\n sys.exit()\n config = importlib.import_module(params_path[:-3]).TRAINING_PARAMS\n config[\"batch_size\"] *= len(config[\"parallels\"])\n\n # Start training\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(map(str, config[\"parallels\"]))\n test(config)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.linspace",
"torch.load",
"numpy.asarray",
"matplotlib.use",
"torch.cat",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.get_cmap",
"torch.from_numpy",
"matplotlib.pyplot.subplots",
"torch.no_grad",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.close",
"numpy.transpose",
"torch.nn.DataParallel",
"matplotlib.ticker.NullLocator",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fethan/LabanotationSuite | [
"fcfbecd92d4eed0fd888e75677c92d669d58662d"
] | [
"GestureAuthoringTools/LabanEditor/src/graphLaban/graphLaban.py"
] | [
"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------------------------\n\nimport os\n\nimport matplotlib.pyplot as plt\nplt.rcParams['toolbar'] = 'None'\nfrom matplotlib.pyplot import figure, show\nfrom matplotlib.widgets import Slider, Button, RadioButtons\nimport matplotlib.cm as cm\nimport matplotlib.patches as patches\n\nimport tkMessageBox\n\nimport cv2\n\nimport settings\nimport scrollbar\n\nclass graphLaban:\n fig = None\n ax = None\n im = None\n view = None\n imgWidth = 0\n imgHeight = 0\n axesAspect = 1.0\n axesHeight = 0.0\n drag_active = False\n x0 = None\n y0 = None\n press = None\n timeOffset = 0\n selectedFrame = 0\n selectedFrameMarker = None\n currentTime = 0\n\n #------------------------------------------------------------------------------\n # Class initialization\n #\n def __init__(self):\n self.strTitle = 'Labanotation Score'\n self.fig = figure()\n\n nc = 20\n self.ax = plt.subplot2grid((1, nc), (0, 0), rowspan=1, colspan=(nc-1), aspect=1, anchor='E')\n self.axSlider = plt.subplot2grid((1, nc), (0, (nc-1)), rowspan=1, colspan=1, anchor='W')\n\n self.fig.canvas.set_window_title(self.strTitle)\n self.fig.set_size_inches((settings.screen_cx * 0.33) / self.fig.dpi, (settings.screen_cy * 0.465) / self.fig.dpi)\n\n self.fig.canvas.mpl_connect('resize_event', self.onresize)\n self.fig.canvas.mpl_connect('close_event', self.onclose)\n\n self.fig.canvas.mpl_connect('button_press_event', self.onPress)\n self.fig.canvas.mpl_connect('button_release_event', self.onRelease)\n self.fig.canvas.mpl_connect('motion_notify_event', self.onMotion)\n\n self.scrollbar = scrollbar.VScrollbar(self.axSlider, callback=self.onScrollbarUpdate)\n\n plt.tight_layout()\n\n # -----------------------------------------------------------------------------\n # canvas close event\n #\n def onclose(self, event):\n self.fig = None\n # if user closes this figure, let the main application know and to exit\n settings.application.close()\n\n #------------------------------------------------------------------------------\n # canvas resize event\n #\n def onresize(self, event):\n if (self.imgWidth == 0):\n return\n\n self.axesAspect = self.ax.figure.bbox_inches.width / self.ax.figure.bbox_inches.height\n self.axesHeight = self.imgWidth / self.axesAspect\n self.ax.set_xlim((0, self.imgWidth))\n self.ax.set_ylim((self.imgHeight, self.imgHeight - self.axesHeight))\n\n self.scrollbar.setScrollbarSize(self.imgHeight - self.axesHeight)\n self.scrollbar.setThumbSize(self.axesHeight)\n self.scrollbar.setPosition(0)\n\n if (self.selectedFrameMarker is not None):\n xx = self.ax.get_xlim()\n self.selectedFrameMarker.set_width(int(xx[1]-xx[0]))\n\n #------------------------------------------------------------------------------\n #\n def onPress(self, event):\n if event.inaxes != self.ax: return\n\n self.drag_active = True\n\n self.cur_xlim = self.ax.get_xlim()\n self.cur_ylim = self.ax.get_ylim()\n self.press = self.x0, self.y0, event.xdata, event.ydata\n self.x0, self.y0, self.xpress, self.ypress = self.press\n\n # set the child axes to pipe consequitive mouse events\n event.canvas.grab_mouse(self.ax)\n\n #------------------------------------------------------------------------------\n #\n def onMotion(self, event):\n if self.press is None: return\n if event.inaxes != self.ax: return\n if (self.drag_active == False): return\n\n #dx = event.xdata - self.xpress\n dy = event.ydata - self.ypress\n #self.cur_xlim -= dx\n self.cur_ylim -= dy\n\n if (self.cur_ylim[1] < 0.0):\n self.cur_ylim = (self.axesHeight, 0.0)\n elif (self.cur_ylim[0] > self.imgHeight):\n self.cur_ylim = (self.imgHeight, self.imgHeight - self.axesHeight)\n\n self.scrollbar.setPosition(self.imgHeight - self.cur_ylim[0])\n\n #self.ax.set_xlim(self.cur_xlim)\n self.ax.set_ylim(self.cur_ylim)\n self.fig.canvas.draw_idle()\n return\n\n #------------------------------------------------------------------------------\n #\n def onRelease(self, event):\n self.press = None\n self.drag_active = False\n # release the mouse grab held by the axes\n event.canvas.release_mouse(self.ax)\n\n #------------------------------------------------------------------------------\n #\n def onScrollbarUpdate(self, position):\n position = self.imgHeight - position\n self.ax.set_ylim((position, position - self.axesHeight))\n\n # -----------------------------------------------------------------------------\n #\n def updateInputName(self):\n self.fig.canvas.set_window_title(self.strTitle + ' - [' + settings.application.strBeautifiedInputFile + ']')\n\n #------------------------------------------------------------------------------\n #\n def saveView(self):\n if (self.fig is None):\n return\n\n filePath = os.path.join(settings.application.outputFolder, settings.application.outputName + '_LabanotationScore.png')\n filePath = settings.checkFileAlreadyExists(filePath, fileExt=\".png\", fileTypes=[('png files', '.png'), ('all files', '.*')])\n if (filePath is None):\n return\n\n try:\n self.fig.savefig(filePath, bbox_inches='tight')\n settings.application.logMessage(\"Labanotation score view was saved to '\" + settings.beautifyPath(filePath) + \"'\")\n except Exception as e:\n strError = e\n settings.application.logMessage(\"Exception saving Labanotation score view to '\" + settings.beautifyPath(filePath) + \"': \" + str(e))\n\n #------------------------------------------------------------------------------\n #\n def saveImage(self):\n if (self.view is None):\n return\n\n filePath = settings.application.outputFilePathImg\n filePath = settings.checkFileAlreadyExists(filePath, fileExt=\".png\", fileTypes=[('png files', '.png'), ('all files', '.*')])\n if (filePath is None):\n return\n\n try:\n cv2.imwrite(filePath, self.view.img)\n\n settings.application.logMessage(\"Labanotation score image was saved to '\" + settings.beautifyPath(filePath) + \"'\")\n except Exception as e:\n strError = e\n settings.application.logMessage(\"Exception saving Labanotation score image to '\" + settings.beautifyPath(filePath) + \"': \" + str(e))\n\n #------------------------------------------------------------------------------\n #\n def setLabanotation(self, timeS, all_laban):\n cnt = len(timeS)\n\n script = settings.application.labanotation.labanToScript(timeS, all_laban)\n\n s = 60\n self.view = settings.application.labanotation.labanScriptToImage(s * 10, s * cnt, script)\n\n if (self.im != None):\n self.ax.images.remove(self.im)\n\n if (self.selectedFrameMarker != None):\n self.ax.patches.remove(self.selectedFrameMarker)\n self.selectedFrameMarker = None\n\n self.ax.clear()\n self.im = self.ax.imshow(self.view.img, interpolation=\"bicubic\", cmap=cm.gray)\n self.im.set_zorder(1)\n\n self.imgWidth = self.view.img.shape[1]-1\n self.imgHeight = self.view.img.shape[0]-1\n\n cx = self.imgWidth\n cy = self.imgHeight\n\n self.axesAspect = self.ax.figure.bbox_inches.width / self.ax.figure.bbox_inches.height\n self.axesHeight = self.imgWidth / self.axesAspect\n self.ax.set_xlim((0, self.imgWidth))\n self.ax.set_ylim((self.imgHeight, self.imgHeight - self.axesHeight))\n\n self.ax.get_xaxis().set_visible(False)\n self.ax.get_yaxis().set_visible(False)\n\n self.scrollbar.setScrollbarSize(self.imgHeight - self.axesHeight)\n self.scrollbar.setThumbSize(self.axesHeight)\n self.scrollbar.setPosition(0)\n\n self.selectTime(self.currentTime)\n\n self.fig.canvas.draw_idle()\n\n #------------------------------------------------------------------------------\n #\n def selectTime(self, time):\n if (self.view is None): return;\n\n self.currentTime = time\n\n padding = 3.0\n # time is [0..1]\n y = int(self.view.timeOffset) - int(time*self.view.timeScale)\n\n if (self.selectedFrameMarker is None):\n xx = self.ax.get_xlim()\n yy = self.ax.get_ylim()\n self.selectedFrameMarker = patches.Rectangle((xx[0], y-padding), (xx[1]-xx[0]), 2*padding, alpha=0.5, color='purple')\n self.selectedFrameMarker.set_zorder(10)\n self.ax.add_patch(self.selectedFrameMarker)\n else:\n self.selectedFrameMarker.set_y(y-padding)\n\n # scroll image into view\n halfHeight = (self.axesHeight / 2.0)\n position = y + halfHeight\n\n if (position < self.axesHeight):\n position = self.axesHeight\n if (position > self.imgHeight):\n position = self.imgHeight\n\n self.ax.set_ylim((position, position - self.axesHeight))\n\n # set scroll bar position\n self.scrollbar.setPosition(self.imgHeight - position)\n\n # update plot when idle\n self.fig.canvas.draw_idle()\n\n\n"
] | [
[
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luckystar9111/interpret-community | [
"3a4094d3aa516a39dc52d65183f8b1f9aa31a801"
] | [
"test/test_serialize_explanation.py"
] | [
"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n\"\"\"Tests the Explanation JSON serializer\"\"\"\n\nimport collections.abc\nimport pytest\nimport logging\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom interpret_community.common.constants import ExplainParams\nfrom interpret_community.explanation.explanation import save_explanation, load_explanation\nfrom interpret_community.mimic.models.lightgbm_model import LGBMExplainableModel\nfrom common_utils import (create_sklearn_svm_classifier, create_sklearn_linear_regressor,\n create_msx_data)\nfrom constants import DatasetConstants\nfrom constants import owner_email_tools_and_ux\nfrom interpret_community.dataset.dataset_wrapper import DatasetWrapper\nfrom shap.common import DenseData\n\ntest_logger = logging.getLogger(__name__)\n\n\[email protected](scope='class')\ndef iris_svm_model(iris):\n # uses iris DatasetConstants\n model = create_sklearn_svm_classifier(iris[DatasetConstants.X_TRAIN], iris[DatasetConstants.Y_TRAIN])\n yield model\n\n\[email protected](email=owner_email_tools_and_ux)\[email protected]('clean_dir')\nclass TestSerializeExplanation(object):\n\n def test_save_explanation(self, iris, tabular_explainer, iris_svm_model):\n explainer = tabular_explainer(iris_svm_model,\n iris[DatasetConstants.X_TRAIN],\n features=iris[DatasetConstants.FEATURES])\n explanation = explainer.explain_local(iris[DatasetConstants.X_TEST])\n save_explanation(explanation, 'brand/new/path')\n\n def test_save_and_load_explanation_local_only(self, iris, tabular_explainer, iris_svm_model):\n explainer = tabular_explainer(iris_svm_model,\n iris[DatasetConstants.X_TRAIN],\n features=iris[DatasetConstants.FEATURES])\n explanation = explainer.explain_local(iris[DatasetConstants.X_TEST])\n verify_serialization(explanation, assert_numpy_types=True)\n\n def test_save_and_load_explanation_global_only(self, iris, tabular_explainer, iris_svm_model):\n explainer = tabular_explainer(iris_svm_model,\n iris[DatasetConstants.X_TRAIN],\n features=iris[DatasetConstants.FEATURES])\n explanation = explainer.explain_global(iris[DatasetConstants.X_TEST], include_local=False)\n verify_serialization(explanation, assert_numpy_types=True)\n\n def test_save_and_load_explanation_global_and_local(self, iris, tabular_explainer, iris_svm_model):\n explainer = tabular_explainer(iris_svm_model,\n iris[DatasetConstants.X_TRAIN],\n features=iris[DatasetConstants.FEATURES])\n explanation = explainer.explain_global(iris[DatasetConstants.X_TEST])\n verify_serialization(explanation, assert_numpy_types=True)\n\n @pytest.mark.skip(reason=\"save_explanation and load_explanation do not support sparse data yet\")\n def test_save_and_load_sparse_explanation(self, mimic_explainer):\n x_train, x_test, y_train, y_test = create_msx_data(0.05)\n # Fit a linear regression model\n model = create_sklearn_linear_regressor(x_train, y_train.toarray().flatten())\n explainable_model = LGBMExplainableModel\n explainer = mimic_explainer(model, x_train, explainable_model, augment_data=False)\n explanation = explainer.explain_global(x_test)\n verify_serialization(explanation)\n\n\ndef _assert_explanation_equivalence(actual, expected):\n # get the non-null properties in the expected explanation\n paramkeys = filter(lambda x, expected=expected: hasattr(expected, getattr(ExplainParams, x)),\n list(ExplainParams.get_serializable()))\n for paramkey in paramkeys:\n param = getattr(ExplainParams, paramkey)\n actual_value = getattr(actual, param, None)\n expected_value = getattr(expected, param, None)\n if isinstance(actual_value, DatasetWrapper) or isinstance(actual_value, DenseData):\n if isinstance(actual_value.original_dataset, np.ndarray):\n actual_dataset = actual_value.original_dataset.tolist()\n else:\n actual_dataset = actual_value.original_dataset\n if isinstance(expected_value.original_dataset, np.ndarray):\n expected_dataset = expected_value.original_dataset.tolist()\n else:\n expected_dataset = expected_value.original_dataset\n np.testing.assert_array_equal(actual_dataset, expected_dataset)\n elif isinstance(actual_value, (np.ndarray, collections.abc.Sequence)):\n np.testing.assert_array_equal(actual_value, expected_value)\n elif isinstance(actual_value, pd.DataFrame) and isinstance(expected_value, pd.DataFrame):\n np.testing.assert_array_equal(actual_value.values, expected_value.values)\n else:\n assert actual_value == expected_value\n\n\ndef _assert_numpy_explanation_types(actual, expected):\n # assert \"_\" variables equivalence\n if hasattr(actual, ExplainParams.get_private(ExplainParams.LOCAL_IMPORTANCE_VALUES)):\n assert(isinstance(actual._local_importance_values, np.ndarray))\n assert(isinstance(expected._local_importance_values, np.ndarray))\n np.testing.assert_array_equal(actual._local_importance_values, expected._local_importance_values)\n if hasattr(actual, ExplainParams.get_private(ExplainParams.EVAL_DATA)):\n assert(isinstance(actual._eval_data, np.ndarray))\n assert(isinstance(expected._eval_data, np.ndarray))\n np.testing.assert_array_equal(actual._eval_data, expected._eval_data)\n\n\n# performs serialization and de-serialization for any explanation\n# tests to verify that the de-serialized result is equivalent to the original\n# exposed outside this module to allow any test involving an explanation to\n# incorporate serialization testing\ndef verify_serialization(explanation, extra_path=None, exist_ok=False, assert_numpy_types=False):\n path = 'brand/new/path'\n if extra_path is not None:\n path = os.path.join(path, extra_path)\n save_explanation(explanation, path, exist_ok=exist_ok)\n loaded_explanation = load_explanation(path)\n _assert_explanation_equivalence(explanation, loaded_explanation)\n if assert_numpy_types:\n _assert_numpy_explanation_types(explanation, loaded_explanation)\n"
] | [
[
"numpy.testing.assert_array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mikegrudic/CrunchSnaps | [
"0a6e3d15f7b682391094517b6b38e36d4173a5bf"
] | [
"src/CrunchSnaps/snapshot_tasks.py"
] | [
"import numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.colors import LightSource\nfrom meshoid import GridSurfaceDensity as GridSurfaceDensity\nimport aggdraw\nfrom PIL import Image, ImageDraw, ImageFont, ImageChops\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom .amuse_fresco import *\nfrom numba import get_num_threads, set_num_threads\nfrom .misc_functions import *\nfrom os.path import isfile\nimport json\nimport os\nimport sys\nhashseed = os.getenv('PYTHONHASHSEED')\nif not hashseed:\n os.environ['PYTHONHASHSEED'] = '0'\n os.execv(sys.executable, [sys.executable] + sys.argv)\n\nclass Task:\n \"\"\"Class containing generic routines common to all tasks, and assigns default (null/empty) attributes that any task should have\"\"\"\n def __init__(self,params):\n self.RequiredSnapdata = []\n self.params = params\n\n def GetRequiredSnapdata(self):\n return self.RequiredSnapdata\n\n def AssignDefaultParams(self):\n for k in self.default_params.keys():\n if not k in self.params.keys(): self.params[k] = self.default_params[k]\n\n def AssignDefaultParamsFromSnapdata(self,snapdata):\n return\n\n \n\nclass SinkVis(Task):\n def __init__(self, params):\n \"\"\"Class containing methods for coordinate transformations, rendering, etc. for a generic SinkVis-type map plot\"\"\"\n super().__init__(params) \n\n self.default_params = {\"Time\": 0,\n \"res\": 512,\n \"rmax\": None,\n \"limits\": [1,3e3],\n \"center\": None,\n \"pan\": 0,\n \"tilt\": 0,\n \"no_timestamp\": False,\n \"no_size_scale\": False,\n \"filename\": None,\n \"sink_scale\": 1,\n \"cmap\": \"viridis\",\n \"backend\": \"PIL\",\n \"rescale_hsml\": False,\n \"FOV\": 90,\n \"camera_distance\": np.inf,\n \"center_on_star\": False,\n \"fresco_stars\": False,\n \"fresco_param\": 0.001,\n \"fresco_mass_limits\": [0,0],\n \"fresco_mass_rescale\": 0.3,\n \"threads\": 1,\n \"cubemap_dir\": \"forward\",\n \"camera_dir\": None,\n \"camera_right\": None,\n \"camera_up\": None,\n \"index\": None,\n \"no_stars\": False,\n \"overwrite\": False\n }\n\n\n\n self.AssignDefaultParams()\n\n self.params_that_affect_maps = [\"Time\", \"res\", \"rmax\", \"center\", \"pan\", \"tilt\", \"FOV\", \"camera_distance\", \"center_on_star\", \"cubemap_dir\", \"camera_dir\", \"camera_right\", \"camera_up\", \"rescale_hsml\"]\n self.params_hash = str(hash(json.dumps(dict([(k, self.params[k]) for k in self.params_that_affect_maps]) ,sort_keys=True)))\n if not os.path.isdir(\".maps\"): os.mkdir(\".maps\")\n self.map_files = dict([(m, \".maps/\" + m + \"_\" + self.params_hash) for m in self.required_maps]) # filename for the saved maps will by MAPNAME_(hash # of input params)\n self.maps = {}\n\n self.DetermineRequiredSnapdata()\n\n if self.params[\"threads\"] != 1:\n self.parallel = True\n if self.params[\"threads\"] > 0: # if negative, just use all available threads, otherwise set to desired value\n set_num_threads(self.params[\"threads\"])\n else: self.parallel = False\n\n if isfile(self.params[\"filename\"]):\n self.RequiredSnapdata = []\n self.TaskDone = True\n else:\n self.TaskDone = False\n\n def DetermineRequiredSnapdata(self):\n self.RequiredSnapdata = [\"PartType5/Coordinates\",\"PartType5/Masses\",\"PartType5/ParticleIDs\", \"PartType5/BH_Mass\"]\n\n def AssignDefaultParams(self):\n super().AssignDefaultParams()\n if self.params[\"index\"] is None:\n self.params[\"index\"] = round(self.params[\"Time\"]/1e-6) \n self.params[\"filename_suffix\"] = \"%s_%s_%s.png\"%(str(self.params[\"index\"]).zfill(5), str(round(self.params[\"pan\"]*10)).zfill(4), self.params[\"cubemap_dir\"]) \n\n\n def CoordinateTransform(self,x,m=None,h=None, contravariant=False):\n # center on the designated center coordinate\n if not contravariant: x[:] -= self.params[\"center\"]\n\n if self.params[\"camera_dir\"] is None: # without a specified camera direction, we just use a simple tilt/pan scheme\n tilt, pan = self.params[\"tilt\"], self.params[\"pan\"]\n if contravariant: tilt, pan = -tilt, -pan\n # first pan\n cosphi, sinphi = np.cos(np.pi*pan/180), np.sin(np.pi*pan/180)\n x[:] = np.c_[cosphi*x[:,0] + sinphi*x[:,2],x[:,1], -sinphi*x[:,0] + cosphi*x[:,2]]\n # then tilt\n costheta, sintheta = np.cos(np.pi*tilt/180), np.sin(np.pi*tilt/180)\n x[:] = np.c_[x[:,0], costheta*x[:,1] + sintheta*x[:,2], -sintheta*x[:,1] + costheta*x[:,2]]\n else: # we have a camera position and coordinate basis\n if contravariant: x[:] = (self.camera_matrix_vectors @ x.T).T # note that @ performs matrix multiplication\n else: x[:] = (self.camera_matrix @ x.T).T\n\n if self.params[\"camera_distance\"] != np.inf and not contravariant:\n # transform so camera is at z=0:\n x[:,2] += self.params[\"camera_distance\"]\n \n # shuffle the axes to get the desired cubemap direction\n cubedir = self.params[\"cubemap_dir\"]\n if cubedir != \"forward\":\n if cubedir == \"right\": x[:] = np.c_[-x[:,2],x[:,1],x[:,0]]\n elif cubedir == \"left\": x[:] = np.c_[x[:,2],x[:,1],-x[:,0]]\n elif cubedir == \"up\": x[:] = np.c_[x[:,0],-x[:,2],x[:,1]]\n elif cubedir == \"down\": x[:] = np.c_[x[:,0],x[:,2],-x[:,1]]\n elif cubedir == \"backward\": x[:] = np.c_[-x[:,0],x[:,1],-x[:,2]] \n \n # then do projection if desired\n if self.params[\"camera_distance\"] != np.inf:\n if not contravariant:\n # now transform from 3D to angular system\n r = np.sum(x*x,axis=1)**0.5 # distance from camera \n x[:,:2] = x[:,:2] / x[:,2][:,None] # homogeneous coordinates\n r = np.abs(x[:,2])\n if h is not None:\n h[:] = h / r # kernel lengths are now angular (divide by distance)\n h[x[:,2]<0] = 0 # assign 0 weight/size to anything behind the camera\n if m is not None:\n m[:] /= r**2 # rescale mass weights so that integrated surface density remains the same\n m[x[:,2]<0] = 0\n\n else: # dealing with a contravariant vector such as velocity - want the [:,2] component to correspond to line-of-sight value\n global_coords = np.copy(self.pos) # this would have been converted to angular by now - let's convery back to real space\n global_coords[:,:2] *= global_coords[:,2][:,None] # multiply by z, now we're in the rotated real space frame\n x[:,2] = np.sum(x*global_coords,axis=1)/np.sum(global_coords**2,axis=1)**0.5 # get the radial component\n \n \n \n\n def SetupCoordsAndWeights(self, snapdata):\n res = self.params[\"res\"]\n if \"PartType0/Coordinates\" in snapdata.keys():\n self.pos, self.mass, self.hsml = np.copy(snapdata[\"PartType0/Coordinates\"]), np.copy(snapdata[\"PartType0/Masses\"]), np.copy(snapdata[\"PartType0/SmoothingLength\"]) # copy these because we don't want to modify them\n if self.params[\"rescale_hsml\"]: self.hsml *= self.params[\"rescale_hsml\"]\n\n # Setting up coordinate basis\n if self.params[\"camera_dir\"] is not None:\n self.camera_dir = self.params[\"camera_dir\"]\n NormalizeVector(self.params[\"camera_dir\"])\n if not self.params[\"camera_up\"]: self.camera_up = np.array([0,1.,0]) # default \"up\" direction is +y, we will project it out if the camera is tilted\n else: self.camera_up = self.params[\"camera_up\"]\n \n # if we've specified an up direction, project out the component parallel to the forward direction and normalize\n self.camera_up -= sum(self.camera_dir * self.camera_up).sum() * self.camera_dir\n NormalizeVector(self.camera_up)\n # now get the \"right\" vector as the cross product of forward x up. this will be normalized to machine precision\n self.camera_right = np.cross(self.camera_up, self.camera_dir)\n\n self.camera_matrix = np.c_[self.camera_right, self.camera_up, self.camera_dir].T # matrix of coordinate vectors - operate this on coordinates to apply transformation - operates on COORDINATES not vectors\n self.camera_matrix_vectors = self.camera_matrix.T # since vector fields are contravariant, this is the operator for transforming v and B (note that this is an orthogonal matrix so the transpose is the inverse)\n \n if \"PartType0/Coordinates\" in snapdata.keys(): \n self.CoordinateTransform(self.pos,self.mass,self.hsml)\n self.hsml = np.clip(self.hsml,2*self.params[\"rmax\"]/res, 1e100)\n\n\n def GenerateMaps(self,snapdata):\n return\n\n def SaveImage(self):\n print(\"saving \", self.params[\"filename\"]) \n if self.params[\"backend\"] == \"matplotlib\":\n rmax = self.params[\"rmax\"]\n self.ax.set(xlim=[-rmax,rmax],ylim=[-rmax,rmax])\n plt.savefig(self.params[\"filename_incomplete\"],bbox_inches='tight',dpi=200) \n plt.close()\n os.rename(self.params[\"filename_incomplete\"], self.params[\"filename\"]) \n\n def MakeImages(self,snapdata):\n if not self.params[\"no_stars\"]: self.AddStarsToImage(snapdata)\n self.AddSizeScaleToImage()\n self.AddTimestampToImage()\n self.SaveImage()\n \n\n def AddTimestampToImage(self):\n if self.params[\"no_timestamp\"]: return\n fname = self.params[\"filename_incomplete\"]\n time = self.params[\"Time\"]\n if (time*979>=1e-2):\n time_text=\"%3.2gMyr\"%(time*979)\n elif(time*979>=1e-4):\n time_text=\"%3.2gkyr\"%(time*979*1e3)\n else:\n time_text=\"%3.2gyr\"%(time*979*1e6)\n \n if self.params[\"backend\"]==\"PIL\":\n F = Image.open(fname)\n gridres = F.size[0]\n draw = ImageDraw.Draw(F)\n font = ImageFont.truetype(\"LiberationSans-Regular.ttf\", gridres//12)\n draw.text((gridres/16, gridres/24), time_text, font=font)\n F.save(fname)\n F.close()\n elif self.params[\"backend\"]==\"matplotlib\":\n self.ax.text(-self.params[\"rmax\"]*0.85, self.params[\"rmax\"]*0.85,time_text,color=\"#FFFFFF\")\n\n\n def AddSizeScaleToImage(self):\n if self.params[\"camera_distance\"] < np.inf: return\n if self.params[\"backend\"]==\"matplotlib\": return # matplotlib will have axis ticks for scale\n pc_to_AU = 206265.0\n if self.params[\"no_size_scale\"]: return\n fname = self.params[\"filename_incomplete\"]\n F = Image.open(fname)\n draw = ImageDraw.Draw(F)\n gridres = self.params[\"res\"]\n font = ImageFont.truetype(\"LiberationSans-Regular.ttf\", gridres//12)\n r = self.params[\"rmax\"]\n if (r>1000):\n scale_kpc=10**np.round(np.log10(r*0.5/1000))\n size_scale_text=\"%3.3gkpc\"%(scale_kpc)\n size_scale_ending=gridres/16+gridres*(scale_kpc*1000)/(2*r)\n if (r>1e-2):\n scale_pc=10**np.round(np.log10(r*0.5))\n size_scale_text=\"%3.3gpc\"%(scale_pc)\n size_scale_ending=gridres/16+gridres*(scale_pc)/(2*r)\n else:\n scale_AU=10**np.round(np.log10(r*0.5*pc_to_AU))\n size_scale_text=\"%3.4gAU\"%(scale_AU)\n size_scale_ending=gridres/16+gridres*(scale_AU)/(2*r*pc_to_AU)\n draw.line(((gridres/16, 7*gridres/8), (size_scale_ending, 7*gridres/8)), fill=\"#FFFFFF\", width=6)\n draw.text((gridres/16, 7*gridres/8 + 5), size_scale_text, font=font)\n F.save(fname)\n F.close() \n \n def AddStarsToImage(self,snapdata):\n# print([k for k in snapdata.keys()])\n if not \"PartType5/Coordinates\" in snapdata.keys(): return\n X_star = np.copy(snapdata[\"PartType5/Coordinates\"])\n m_star = snapdata[\"PartType5/BH_Mass\"]\n\n self.CoordinateTransform(X_star, np.ones(len(X_star)), np.ones(len(X_star)))\n \n if self.params[\"backend\"]==\"PIL\":\n fname = self.params[\"filename_incomplete\"]\n if self.params[\"fresco_stars\"]: # use fresco for stellar images\n if self.params[\"camera_distance\"] < np.inf:\n X_star, m_star = X_star[X_star[:,2]>0], m_star[X_star[:,2]>0]\n# m_star /= X_star[:,2]**2\n if len(X_star) == 0: return\n data_stars_fresco = make_amuse_fresco_stars_only(X_star,m_star, np.zeros_like(m_star),2*self.params[\"rmax\"],res=self.params[\"res\"],vmax=self.params[\"fresco_param\"],mass_rescale=self.params[\"fresco_mass_rescale\"],mass_limits=self.params[\"fresco_mass_limits\"])\n img = plt.imread(fname)\n plt.imsave(fname,np.clip(img[:,:,:3]+data_stars_fresco,0,1))\n else: # use derpy PIL circles\n F = Image.open(fname)\n gridres = F.size[0]\n draw = ImageDraw.Draw(F)\n d = aggdraw.Draw(F)\n pen = aggdraw.Pen(self.Star_Edge_Color(),1) #gridres/800\n sink_relscale = 0.0025\n X_star ,m_star = X_star[m_star.argsort()[::-1]], np.sort(m_star)[::-1]\n X_star, m_star = X_star[X_star[:,2]>0], m_star[X_star[:,2]>0] \n for j in np.arange(len(X_star))[m_star>1e-2]:\n X = X_star[j]\n ms = m_star[j]\n star_size = gridres * sink_relscale * (np.log10(ms/self.params[\"sink_scale\"]) + 1)\n# if self.params[\"camera_distance\"] < np.inf:\n # make 100msun ~ 0.03pc, scale down from there\n# if X[2] < 0: continue\n# star_size = gridres * 0.03 / dist_to_camera / self.params[\"rmax\"] * (ms/100)**(1./3) \n star_size = max(3,star_size)\n p = aggdraw.Brush(self.GetStarColor(ms))\n norm_coords = (X[:2]+self.params[\"rmax\"])/(2*self.params[\"rmax\"])*gridres\n #Pillow puts the origin in th top left corner, so we need to flip the y axis\n norm_coords[1] = gridres - norm_coords[1]\n coords = np.concatenate([norm_coords-star_size, norm_coords+star_size])\n d.ellipse(coords, pen, p)#, fill=(155, 176, 255))\n d.flush()\n F.save(fname)\n F.close()\n elif self.params[\"backend\"]==\"matplotlib\":\n star_size = np.log10(m_star/self.params[\"sink_scale\"])+2\n colors = np.array([self.GetStarColor(m) for m in m_star])/255\n \n self.ax.scatter(X_star[:,0], X_star[:,1],s=star_size*5,edgecolor=self.Star_Edge_Color(),lw=0.1,facecolor=colors,marker='*')\n \n def Star_Edge_Color(self):\n if self.params[\"cmap\"] in ('afmhot', 'inferno', \"Blues\"):\n return 'black'\n else:\n return 'white'\n \n def GetStarColor(self, mass_in_msun):\n if self.params[\"cmap\"] in ('afmhot', 'inferno', \"Blues\"):\n star_colors = np.array([[255, 100, 60],[120, 200, 150],[75, 80, 255]]) #alternate colors, red-green-blue, easier to see on a bright color map\n else:\n star_colors = np.array([[255, 203, 132],[255, 243, 233],[155, 176, 255]]) #default colors, reddish for small ones, yellow-white for mid sized and blue for large\n colors = np.int_([np.interp(np.log10(mass_in_msun),[-1,0,1],star_colors[:,i]) for i in range(3)])\n return (colors[0],colors[1],colors[2])# if len(colors)==1 else colors)\n\n def AssignDefaultParamsFromSnapdata(self,snapdata):\n if self.params[\"center\"] is None:\n if self.params[\"center_on_star\"]:\n if \"PartType5/Coordinates\" in snapdata.keys():\n self.params[\"center\"] = snapdata[\"PartType5/Coordinates\"][snapdata[\"PartType5/BH_Mass\"].argsort()[::-1]][self.params[\"center_on_star\"]-1] # center on the n'th most massive star\n else: # otherwise center on the densest gas cell\n self.params[\"center\"] = snapdata[\"PartType0/Coordinates\"][snapdata[\"PartType0/Density\"].argmax()]\n else:\n self.params[\"center\"] = np.repeat(snapdata[\"Header\"][\"BoxSize\"]*0.5,3)\n center = self.params[\"center\"]\n else: center = self.params[\"center\"]\n if self.params[\"rmax\"] is None:\n if self.params[\"camera_distance\"] < np.inf:\n self.params[\"rmax\"] = self.params[\"FOV\"]/90 # angular width\n else:\n self.params[\"rmax\"] = snapdata[\"Header\"][\"BoxSize\"]/10\n# if self.params[\"camera_distance\"] < np.inf and self.params[\"FOV\"] is None:\n# self.params[\"rmax\"] /= self.params[\"camera_distance\"] # convert to angular assuming rmax is real-space half-width at the focal distance\n\n \n def DoTask(self, snapdata):\n if self.TaskDone: return \n self.AssignDefaultParamsFromSnapdata(snapdata)\n if set(self.required_maps) != set(self.maps.keys()): # if we don't already have the maps we need\n self.SetupCoordsAndWeights(snapdata) \n self.GenerateMaps(snapdata)\n self.MakeImages(snapdata)\n return self.maps\n\n\nclass SinkVisSigmaGas(SinkVis):\n def __init__(self,params):\n self.required_maps = [\"sigma_gas\"] \n super().__init__(params)\n if self.TaskDone: return\n self.AssignDefaultParams()\n\n def AssignDefaultParams(self):\n super().AssignDefaultParams()\n# self.params[\"filename\"] = \"SurfaceDensity_%s_%s.png\"%(str(self.params[\"index\"]).zfill(4), str(round(self.params[\"pan\"])).zfill(4))\n# else:\n if self.params[\"filename\"] is None:\n self.params[\"filename\"] = \"SurfaceDensity_\" + self.params[\"filename_suffix\"]\n self.params[\"filename_incomplete\"] = self.params[\"filename\"].replace(\".png\",\".incomplete.png\")\n\n def DetermineRequiredSnapdata(self):\n super().DetermineRequiredSnapdata()\n # check if we have sigma_gas map already saved\n if isfile(self.map_files[\"sigma_gas\"] + \".npz\"):\n self.maps[\"sigma_gas\"] = np.load(self.map_files[\"sigma_gas\"] + \".npz\")['sigma_gas'] \n else:\n self.RequiredSnapdata += [\"PartType0/Coordinates\",\"PartType0/Masses\",\"PartType0/ParticleIDs\", \"PartType0/BH_Mass\", \"PartType0/SmoothingLength\",\"PartType0/ParticleChildIDsNumber\",\"PartType0/ParticleIDGenerationNumber\"]\n \n def GenerateMaps(self,snapdata):\n if not \"sigma_gas\" in self.maps.keys():\n self.maps[\"sigma_gas\"] = GridSurfaceDensity(self.mass, self.pos, self.hsml, np.zeros(3), 2*self.params[\"rmax\"], res=self.params[\"res\"],parallel=self.parallel).T \n np.savez_compressed(self.map_files[\"sigma_gas\"], sigma_gas=self.maps[\"sigma_gas\"]) \n\n def MakeImages(self,snapdata):\n vmin, vmax = self.params[\"limits\"]\n \n f = (np.log10(self.maps[\"sigma_gas\"])-np.log10(vmin))/(np.log10(vmax)-np.log10(vmin))\n\n if self.params[\"backend\"]==\"PIL\":\n plt.imsave(self.params[\"filename_incomplete\"], plt.get_cmap(self.params[\"cmap\"])(np.flipud(f))) # NOTE - we invert this to get the coordinate system right\n elif self.params[\"backend\"]==\"matplotlib\":\n self.fig, self.ax = plt.subplots(figsize=(4,4))\n X = Y = np.linspace(-self.params[\"rmax\"], self.params[\"rmax\"], self.params[\"res\"])\n X, Y = np.meshgrid(X, Y)\n p = self.ax.pcolormesh(X, Y, self.maps[\"sigma_gas\"], norm=matplotlib.colors.LogNorm(vmin=self.params[\"limits\"][0],vmax=self.params[\"limits\"][1]),cmap=self.params[\"cmap\"])\n self.ax.set_aspect('equal')\n \n divider = make_axes_locatable(self.ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.0)\n self.fig.colorbar(p,label=r\"$\\Sigma_{\\rm gas}$ $(\\rm M_\\odot\\,pc^{-2})$\",cax=cax)\n if self.params[\"camera_distance\"] == np.inf:\n self.ax.set_xlabel(\"X (pc)\")\n self.ax.set_ylabel(\"Y (pc)\")\n else:\n self.ax.set_xlabel(\"X (rad)\")\n self.ax.set_ylabel(\"Y (rad)\")\n\n super().MakeImages(snapdata)\n\n\n\n\nclass SinkVisCoolMap(SinkVis):\n def __init__(self,params):\n self.required_maps = [\"sigma_gas\", \"sigma_1D\"] #physical rendered quantities that can get saved and reused\n super().__init__(params)\n if self.TaskDone: return \n self.default_params[\"cool_cmap\"] = 'magma'\n self.AssignDefaultParams()\n\n def DetermineRequiredSnapdata(self):\n super().DetermineRequiredSnapdata()\n # check if we have sigma_gas map already saved\n if isfile(self.map_files[\"sigma_gas\"] + \".npz\"):\n# print(\"loading \" + self.map_files[\"sigma_gas\"] + \".npz\")\n self.maps[\"sigma_gas\"] = np.load(self.map_files[\"sigma_gas\"]+\".npz\")[\"sigma_gas\"] \n else:\n self.RequiredSnapdata += [\"PartType0/Coordinates\",\"PartType0/Masses\",\"PartType0/ParticleIDs\", \"PartType0/BH_Mass\", \"PartType0/SmoothingLength\",\"PartType0/ParticleChildIDsNumber\",\"PartType0/ParticleIDGenerationNumber\"]\n\n if isfile(self.map_files[\"sigma_1D\"] + \".npz\"):\n# print(\"loading map!\")\n self.maps[\"sigma_1D\"] = np.load(self.map_files[\"sigma_1D\"] + \".npz\")[\"sigma_1D\"]\n else:\n self.RequiredSnapdata += [\"PartType0/Velocities\"]\n self.RequiredSnapdata += [\"PartType0/Coordinates\",\"PartType0/Masses\",\"PartType0/ParticleIDs\", \"PartType0/BH_Mass\", \"PartType0/SmoothingLength\",\"PartType0/ParticleChildIDsNumber\",\"PartType0/ParticleIDGenerationNumber\"]\n# print(self.RequiredSnapdata)\n \n\n def AssignDefaultParams(self):\n super().AssignDefaultParams()\n# if self.params[\"filename\"] is None: self.params[\"filename\"] = \"CoolMap_%s_%s.png\"%(str(self.params[\"Index\"]).zfill(4), str(round(self.params[\"pan\"])).zfill(4))\n# else:\n if self.params[\"filename\"] is None:\n self.params[\"filename\"] = \"CoolMap_\" + self.params[\"filename_suffix\"]\n self.params[\"filename_incomplete\"] = self.params[\"filename\"].replace(\".png\",\".incomplete.png\")\n \n \n def GenerateMaps(self,snapdata):\n super().GenerateMaps(snapdata)\n \n if not \"sigma_gas\" in self.maps.keys():\n self.maps[\"sigma_gas\"] = GridSurfaceDensity(self.mass, self.pos, self.hsml, np.zeros(3), 2*self.params[\"rmax\"], res=self.params[\"res\"],parallel=self.parallel).T \n np.savez_compressed(self.map_files[\"sigma_gas\"], sigma_gas=self.maps[\"sigma_gas\"])\n if not \"sigma_1D\" in self.maps.keys():\n # need to apply coordinate transforms to z-velocity\n v = np.copy(snapdata[\"PartType0/Velocities\"])\n self.CoordinateTransform(v,contravariant=True)\n sigma_1D = GridSurfaceDensity(self.mass * v[:,2]**2, self.pos, self.hsml, np.zeros(3), 2*self.params[\"rmax\"], res=self.params[\"res\"],parallel=self.parallel).T/self.maps[\"sigma_gas\"]\n v_avg = GridSurfaceDensity(self.mass * v[:,2], self.pos, self.hsml, np.zeros(3), 2*self.params[\"rmax\"], res=self.params[\"res\"],parallel=self.parallel).T/self.maps[\"sigma_gas\"]\n self.maps[\"sigma_1D\"] = np.sqrt(sigma_1D - v_avg**2)/1e3\n np.savez_compressed(self.map_files[\"sigma_1D\"], sigma_1D=self.maps[\"sigma_1D\"])\n\n fgas = (np.log10(self.maps[\"sigma_gas\"])-np.log10(self.params[\"limits\"][0]))/np.log10(self.params[\"limits\"][1]/self.params[\"limits\"][0])\n fgas = np.clip(fgas,0,1)\n ls = LightSource(azdeg=315, altdeg=45)\n #lightness = ls.hillshade(z, vert_exag=4)\n mapcolor = plt.get_cmap(self.params[\"cool_cmap\"])(np.log10(self.maps[\"sigma_1D\"]/0.1)/2)\n cool_data = ls.blend_hsv(mapcolor[:,:,:3], fgas[:,:,None])\n self.maps[\"coolmap\"] = cool_data\n\n def MakeImages(self,snapdata):\n plt.imsave(self.params[\"filename_incomplete\"], np.flipud(self.maps[\"coolmap\"])) # NOTE - we invert this to get the coordinate system right\n super().MakeImages(snapdata)\n #self.AddStarsToImage(snapdata) \n# self.AddSizeScaleToImage()\n# self.AddTimestampToImage()\n \n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.imread",
"numpy.flipud",
"matplotlib.pyplot.get_cmap",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.cross",
"numpy.clip",
"numpy.sin",
"numpy.copy",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.repeat",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"matplotlib.colors.LightSource",
"numpy.log10",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"matplotlib.colors.LogNorm",
"numpy.abs",
"matplotlib.use",
"numpy.cos",
"matplotlib.pyplot.subplots",
"numpy.sort",
"numpy.savez_compressed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kschamplin/astro-classifier-neo | [
"44fcb8ba41ef549c16360df7fd470f56c42da9b3"
] | [
"src/pytorch_test/models/ncde.py"
] | [
"import pytorch_lightning as pl\nimport torch\nimport torchcde\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom pytorch_test.plasticc.constants import class_weights_target_list\n\n\nclass NCDEFunction(torch.nn.Module):\n def __init__(self, input_channels, hidden_channels):\n super(F, self).__init__()\n # For illustrative purposes only. You should usually use an MLP or something. A single linear layer won't be\n # that great.\n self.linear = torch.nn.Linear(hidden_channels,\n hidden_channels * input_channels)\n self.hidden_channels = hidden_channels\n self.input_channels = input_channels\n\n def forward(self, t, z):\n batch_dims = z.shape[:-1]\n return self.linear(z).tanh().view(*batch_dims, self.hidden_channels, self.input_channels)\n\n\nclass NCDE(pl.LightningModule):\n \"\"\"Neural Controlled Differential Equation model for classification on irregular, multi-modal time series\"\"\"\n\n def __init__(self, input_channels=7, hidden_channels=128, output_channels=14, interpolation=\"cubic\"):\n super().__init__()\n\n self.initial = nn.Linear(input_channels, hidden_channels)\n self.model = NCDEFunction(input_channels, hidden_channels)\n self.output = nn.Linear(hidden_channels, output_channels)\n\n self.loss = nn.CrossEntropyLoss(weight=torch.tensor(class_weights_target_list))\n\n self.interpolation = interpolation\n\n def forward(self, x):\n # NOTE: x should be the natural cubic spline coefficients. Look into datasets.py for how to generate these.\n x = torchcde.natural_cubic_coeffs(x)\n if self.interpolation == \"cubic\":\n x = torchcde.NaturalCubicSpline(x)\n elif self.interpolation == \"linear\":\n x = torchcde.LinearInterpolation(x)\n else:\n raise ValueError(\"invalid interpolation given\")\n\n x0 = x.evaluate(x.interval[0])\n z0 = self.initial(x0)\n zt = torchcde.cdeint(X=x, func=self.model, z0=z0, t=x.interval)\n\n return self.output(zt[..., -1, :])\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=1e-3)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n pred_y = self(x).squeeze(-1)\n loss = self.loss(pred_y, y)\n return loss"
] | [
[
"torch.nn.Linear",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GLOMICON/emp | [
"c1f752d1ae4c009328bbdcecf9666dbd4dac39b6"
] | [
"legacy/code/tests/test_most_wanted_otus.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import division\n\n__author__ = \"Jai Ram Rideout\"\n__copyright__ = \"Copyright 2012, The QIIME project\"\n__credits__ = [\"Jai Ram Rideout\"]\n__license__ = \"GPL\"\n__version__ = \"1.5.0-dev\"\n__maintainer__ = \"Jai Ram Rideout\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n\n\"\"\"Test suite for the most_wanted_otus.py module.\"\"\"\n\nfrom os import makedirs, getcwd, chdir\nfrom os.path import basename, exists, join, normpath\nfrom shutil import rmtree\nfrom tempfile import mkdtemp, NamedTemporaryFile\n\nfrom numpy import array\n\nfrom biom.table import table_factory, SparseOTUTable\nfrom cogent.app.formatdb import build_blast_db_from_fasta_path\nfrom cogent.util.misc import remove_files\nfrom cogent.util.unit_test import TestCase, main\nfrom qiime.test import initiate_timeout, disable_timeout\nfrom qiime.util import get_qiime_temp_dir, get_tmp_filename\nfrom qiime.workflow.util import WorkflowError\n\nfrom emp.most_wanted_otus import (generate_most_wanted_list,\n _get_most_wanted_filtering_commands, _get_top_n_blast_results,\n _get_rep_set_lookup, _format_top_n_results_table,\n _format_pie_chart_data, _format_legend_html)\n\nclass MostWantedOtusTests(TestCase):\n \"\"\"Tests for the most_wanted_otus.py module.\"\"\"\n\n def setUp(self):\n \"\"\"Set up files/environment that will be used by the tests.\"\"\"\n # The prefix to use for temporary files. This prefix may be added to,\n # but all temp dirs and files created by the tests will have this\n # prefix at a minimum.\n self.prefix = 'most_wanted_otus_tests_'\n self.files_to_remove = []\n self.dirs_to_remove = []\n\n self.output_dir = mkdtemp(prefix='%soutput_dir_' % self.prefix)\n self.dirs_to_remove.append(self.output_dir)\n\n self.grouping_category = 'Environment'\n self.top_n = 100\n\n self.blast_results_lines = blast_results.split('\\n')\n self.blast_results_dupes_lines = blast_results_dupes.split('\\n')\n self.rep_set_lines = rep_set.split('\\n')\n self.top_n_mw = [('a', 'gi|7|emb|T51700.1|', 87.0),\n ('b', 'gi|8|emb|Z700.1|', 89.5)]\n self.mw_seqs = {'b':'AAGGTT', 'a':'AGT'}\n self.master_otu_table_ms = table_factory(\n array([[1.0, 2.0], [2.0, 5.0]]), ['Env1', 'Env2'], ['a', 'b'],\n sample_metadata=None,\n observation_metadata=[{'taxonomy':'foo;bar;baz'},\n {'taxonomy':'foo;baz;bar'}], table_id=None,\n constructor=SparseOTUTable)\n\n def tearDown(self):\n \"\"\"Remove temporary files/dirs.\"\"\"\n remove_files(self.files_to_remove)\n # remove directories last, so we don't get errors\n # trying to remove files which may be in the directories\n for d in self.dirs_to_remove:\n if exists(d):\n rmtree(d)\n\n def test_get_most_wanted_filtering_commands(self):\n obs = _get_most_wanted_filtering_commands('/foo', ['/a.biom',\n '/b.biom', '/c.biom'], '/rs.fna', '/gg.fasta', '/nt',\n '/map.txt', 'Env', 30, 100, 5, 0.70, 1e-4, 25, None, 55)\n self.assertEqual(obs, exp_commands)\n\n def test_get_most_wanted_filtering_commands_merged_master_otu_table(self):\n obs = _get_most_wanted_filtering_commands('/foo', ['/a.biom',\n '/b.biom', '/c.biom'], '/rs.fna', '/gg.fasta', '/nt',\n '/map.txt', 'Env', 30, 100, 5, 0.70, 1e-4, 25, '/master.biom',\n 55)\n self.assertEqual(obs, exp_commands_merged_master_otu_table)\n\n def test_get_top_n_blast_results(self):\n exp = [('New.CleanUp.ReferenceOTU969', 'gi|16|emb|Z52700.1|', 90.0),\n ('New.CleanUp.ReferenceOTU999', 'gi|7|emb|X51700.1|', 100.0),\n ('New.CleanUp.ReferenceOTU972', 'gi|7|emb|T51700.1|', 100.0)]\n obs = _get_top_n_blast_results(self.blast_results_lines, self.top_n,\n 1.0)\n self.assertFloatEqual(obs, exp)\n\n def test_get_top_n_blast_results_max_nt_similarity(self):\n exp = [('New.CleanUp.ReferenceOTU969', 'gi|16|emb|Z52700.1|', 90.0)]\n obs = _get_top_n_blast_results(self.blast_results_lines, self.top_n,\n 0.97)\n self.assertFloatEqual(obs, exp)\n\n obs = _get_top_n_blast_results(self.blast_results_lines, self.top_n,\n 0.90)\n self.assertFloatEqual(obs, exp)\n\n def test_get_top_n_blast_results_duplicate_blast_hits(self):\n exp = [('New.CleanUp.ReferenceOTU969', 'gi|16|emb|Z52700.1|', 90.0),\n ('New.CleanUp.ReferenceOTU972', 'gi|7|emb|T51700.1|', 95.0)]\n obs = _get_top_n_blast_results(self.blast_results_dupes_lines,\n 2, 1.0)\n self.assertFloatEqual(obs, exp)\n\n def test_get_rep_set_lookup(self):\n obs = _get_rep_set_lookup(self.rep_set_lines)\n self.assertEqual(obs, exp_rep_set_lookup)\n\n def test_format_top_n_results_table(self):\n obs = _format_top_n_results_table(self.top_n_mw, self.mw_seqs,\n self.master_otu_table_ms, self.output_dir,\n self.grouping_category, False, 8)\n\n obs_plot_paths = [fp.replace(self.output_dir, 'foo') for fp in obs[3]]\n obs_plot_data_paths = [fp.replace(self.output_dir, 'foo')\n for fp in obs[4]]\n obs = (obs[0],\n obs[1].replace(basename(normpath(self.output_dir)), 'foo'),\n obs[2],\n obs_plot_paths,\n obs_plot_data_paths)\n self.assertEqual(obs, exp_output_tables)\n\n def test_format_top_n_results_table_suppress_taxonomy(self):\n obs = _format_top_n_results_table(self.top_n_mw, self.mw_seqs,\n self.master_otu_table_ms, self.output_dir,\n self.grouping_category, True, 8)\n\n obs_plot_paths = [fp.replace(self.output_dir, 'foo') for fp in obs[3]]\n obs_plot_data_paths = [fp.replace(self.output_dir, 'foo')\n for fp in obs[4]]\n obs = (obs[0],\n obs[1].replace(basename(normpath(self.output_dir)), 'foo'),\n obs[2],\n obs_plot_paths,\n obs_plot_data_paths)\n self.assertEqual(obs, exp_output_tables_suppressed_taxonomy)\n\n def test_format_pie_chart_data(self):\n exp = ([0.6666666666666666, 0.3333333333333333],\n ['b (66.67%)', 'a (33.33%)'], ['#0000ff', '#ff0000'])\n obs = _format_pie_chart_data(['a', 'b'], [1, 2], 2)\n self.assertFloatEqual(obs, exp)\n\n obs = _format_pie_chart_data(['a', 'b'], [1.0, 2.0], 3)\n self.assertFloatEqual(obs, exp)\n\n def test_format_pie_chart_data_max_count(self):\n exp = ([1.0], ['b (100.00%)'], ['#0000ff'])\n obs = _format_pie_chart_data(['a', 'b'], [1, 2], 1)\n self.assertFloatEqual(obs, exp)\n\n def test_format_pie_chart_data_cycle_colors(self):\n exp = ([0.5, 0.5], ['a (50.00%)', '4 (50.00%)'],\n ['#ff0000', '#ff0000'])\n obs = _format_pie_chart_data(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',\n 'v', 'w', 'x', 'y', 'z', '1', '2', '3', '4'],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1], 2)\n self.assertFloatEqual(obs, exp)\n\n def test_format_legend_html(self):\n exp = ('<ul class=\"most_wanted_otus_legend\"><li><div class=\"key\" style=\"background-color:#0000ff\"></div>b (66.67%)</li><li><div class=\"key\" style=\"background-color:#ff0000\"></div>a (33.33%)</li>'\n '</ul>')\n obs = _format_legend_html(([0.6666666666666666, 0.3333333333333333],\n ['b (66.67%)', 'a (33.33%)'], ['#0000ff', '#ff0000']))\n self.assertEqual(obs, exp)\n\n\nrep_set = \"\"\"\n>New.CleanUp.ReferenceOTU999 S1_18210\nATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGGGTGCGTAGGCGGATGTTTAAGTGGGATGTGAAATCCCCGGGCTTAACCTGGGGGCTGC\n>10113 S1_88960\nATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTCTGTTAAGTCAGATGTGAAATCCCCGGGCTCCACCTGGGCACTGC\n>10115 S2_9552\nATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTTTGATGTGAAATCCCCGGGCTTAACCTGGGAACTGC\n>102506 S1_46428\nATACGTATGGTGCAAGCGTTATCCGGATTTACTGGGTGTAAAGGGAGCGCAGGCGGTACGGCAAGTCTGATGTGAAAGTCCGGGGCTCAACCCCGGTACTGC\nAAACGTAGGGTGCAAGCGTTGTCCGGAATTACTGGGTGTAAAGGGAGCGTAGACGGCTGTGCAAGTCTGAAGTGAAAGGCATGGGCTCAACCTGTGGACTGC\n>New.CleanUp.ReferenceOTU964 S2_295794\nATACGGAGGATGCGAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGACGGCGAAGCAAGTCTGAAGTGAAAGCCCGGGGCTCAACCGCGGGACTGC\n>New.CleanUp.ReferenceOTU969 S2_166346\nATACGTAGGTCCCGAGCGTTGTCCGGATTTACTGGGTGTAAAGGGAGCGTAGACGGCATGGCAAGTCTGAAGTGAAAACCCAGGGCTCAACCCTGGGACTGC\n>New.CleanUp.ReferenceOTU972 S1_18219\nATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGGGTGCGTAGGCGGATGTTTAAGTGGGATGTGAAATCCCCGGGCTTAACCTGGGGGCTGC\n\"\"\"\n\nblast_results = \"\"\"\n# BLASTN 2.2.22 [Sep-27-2009]\n# Query: New.CleanUp.ReferenceOTU999\n# Database: small_nt\n# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\nNew.CleanUp.ReferenceOTU999\tgi|7|emb|X51700.1|\t100.00\t11\t0\t0\t92\t102\t367\t357\t0.25\t22.3\n# BLASTN 2.2.22 [Sep-27-2009]\n# Query: New.CleanUp.ReferenceOTU972\n# Database: small_nt\n# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\nNew.CleanUp.ReferenceOTU972\tgi|7|emb|T51700.1|\t100.00\t11\t0\t0\t92\t102\t367\t357\t0.25\t22.3\n# BLASTN 2.2.22 [Sep-27-2009]\n# Query: New.CleanUp.ReferenceOTU969\n# Database: small_nt\n# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\nNew.CleanUp.ReferenceOTU969\tgi|16|emb|Z52700.1|\t90.00\t13\t0\t0\t33\t45\t1604\t1616\t0.016\t26.3\n\"\"\"\n\nblast_results_dupes = \"\"\"\n# BLASTN 2.2.22 [Sep-27-2009]\n# Query: New.CleanUp.ReferenceOTU999\n# Database: small_nt\n# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\nNew.CleanUp.ReferenceOTU999\tgi|7|emb|X51700.1|\t100.00\t11\t0\t0\t92\t102\t367\t357\t0.25\t22.3\n# BLASTN 2.2.22 [Sep-27-2009]\n# Query: New.CleanUp.ReferenceOTU972\n# Database: small_nt\n# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\nNew.CleanUp.ReferenceOTU972\tgi|7|emb|T51700.1|\t95.00\t11\t0\t0\t92\t102\t367\t357\t0.25\t22.3\nNew.CleanUp.ReferenceOTU972\tgi|7|emb|T51700.1|\t95.00\t11\t0\t0\t92\t102\t367\t357\t0.27\t22.3\n# BLASTN 2.2.22 [Sep-27-2009]\n# Query: New.CleanUp.ReferenceOTU969\n# Database: small_nt\n# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score\nNew.CleanUp.ReferenceOTU969\tgi|16|emb|Z52700.1|\t90.00\t13\t0\t0\t33\t45\t1604\t1616\t0.016\t26.3\n\"\"\"\n\nexp_txt = \"\"\"OTU ID\tSequence\tTaxonomy\tNCBI nr closest match\nNew.CleanUp.ReferenceOTU972\tATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGGGTGCGTAGGCGGATGTTTAAGTGGGATGTGAAATCCCCGGGCTTAACCTGGGGGCTGC\tfoo;bar;baz\thttp://foo.com\nNew.CleanUp.ReferenceOTU969\tATACGTAGGTCCCGAGCGTTGTCCGGATTTACTGGGTGTAAAGGGAGCGTAGACGGCATGGCAAGTCTGAAGTGAAAACCCAGGGCTCAACCCTGGGACTGC\tfoo;bar;baz\thttp://foo.com\nNew.CleanUp.ReferenceOTU964\tATACGGAGGATGCGAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGACGGCGAAGCAAGTCTGAAGTGAAAGCCCGGGGCTCAACCGCGGGACTGC\tfoo;bar;baz\thttp://foo.com\nNew.CleanUp.ReferenceOTU999\tATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGGGTGCGTAGGCGGATGTTTAAGTGGGATGTGAAATCCCCGGGCTTAACCTGGGGGCTGC\tfoo;bar;bazz\thttp://foo.com\n\"\"\"\n\nexp_commands = ([[('Filtering out all GG reference OTUs', 'filter_otus_from_otu_table.py -i /a.biom -o /foo/a_novel.biom -e /gg.fasta')], [('Filtering out all OTUs that do not fall within the specified abundance threshold', 'filter_otus_from_otu_table.py -i /foo/a_novel.biom -o /foo/a_novel_min30_max100.biom -n 30 -x 100')], [('Filtering out samples that are not in the mapping file', 'filter_samples_from_otu_table.py -i /foo/a_novel_min30_max100.biom -o /foo/a_novel_min30_max100_known_samples.biom --sample_id_fp /map.txt')], [('Collapsing OTU table by Env', 'summarize_otu_by_cat.py -c /foo/a_novel_min30_max100_known_samples.biom -o /foo/a_novel_min30_max100_known_samples_Env.biom -m Env -i /map.txt')], [('Filtering out all GG reference OTUs', 'filter_otus_from_otu_table.py -i /b.biom -o /foo/b_novel.biom -e /gg.fasta')], [('Filtering out all OTUs that do not fall within the specified abundance threshold', 'filter_otus_from_otu_table.py -i /foo/b_novel.biom -o /foo/b_novel_min30_max100.biom -n 30 -x 100')], [('Filtering out samples that are not in the mapping file', 'filter_samples_from_otu_table.py -i /foo/b_novel_min30_max100.biom -o /foo/b_novel_min30_max100_known_samples.biom --sample_id_fp /map.txt')],[('Collapsing OTU table by Env', 'summarize_otu_by_cat.py -c /foo/b_novel_min30_max100_known_samples.biom -o /foo/b_novel_min30_max100_known_samples_Env.biom -m Env -i /map.txt')], [('Filtering out all GG reference OTUs', 'filter_otus_from_otu_table.py -i /c.biom -o /foo/c_novel.biom -e /gg.fasta')], [('Filtering out all OTUs that do not fall within the specified abundance threshold', 'filter_otus_from_otu_table.py -i /foo/c_novel.biom -o /foo/c_novel_min30_max100.biom -n 30 -x 100')], [('Filtering out samples that are not in the mapping file', 'filter_samples_from_otu_table.py -i /foo/c_novel_min30_max100.biom -o /foo/c_novel_min30_max100_known_samples.biom --sample_id_fp /map.txt')], [('Collapsing OTU table by Env', 'summarize_otu_by_cat.py -c /foo/c_novel_min30_max100_known_samples.biom -o /foo/c_novel_min30_max100_known_samples_Env.biom -m Env -i /map.txt')], [('Merging collapsed OTU tables', 'merge_otu_tables.py -i /foo/a_novel_min30_max100_known_samples_Env.biom,/foo/b_novel_min30_max100_known_samples_Env.biom,/foo/c_novel_min30_max100_known_samples_Env.biom -o /foo/master_otu_table_novel_min30_max100_Env.biom')], [('Filtering OTU table to include only OTUs that appear in at least 5 sample groups', 'filter_otus_from_otu_table.py -i /foo/master_otu_table_novel_min30_max100_Env.biom -o /foo/master_otu_table_novel_min30_max100_Env_ms5.biom -s 5')], [('Filtering representative set to include only the latest candidate OTUs', 'filter_fasta.py -f /rs.fna -o /foo/rs_candidates.fna -b /foo/master_otu_table_novel_min30_max100_Env_ms5.biom')], [(\"Running uclust to get list of sequences that don't hit the maximum GG similarity threshold\", 'parallel_pick_otus_uclust_ref.py -i /foo/rs_candidates.fna -o /foo/most_wanted_candidates_gg.fasta_0.7 -r /gg.fasta -s 0.7 -O 55')], [('Filtering candidate sequences to only include uclust failures', 'filter_fasta.py -f /foo/rs_candidates.fna -s /foo/most_wanted_candidates_gg.fasta_0.7/rs_candidates_failures.txt -o /foo/rs_candidates_failures.fna')], [('BLASTing filtered candidate sequences against nt database', 'parallel_blast.py -i /foo/rs_candidates_failures.fna -o /foo/blast_output -r /nt -D -e 0.000100 -w 25 -O 55')]], '/foo/blast_output/rs_candidates_failures_blast_out.txt', '/foo/rs_candidates_failures.fna', '/foo/master_otu_table_novel_min30_max100_Env_ms5.biom')\n\nexp_commands_merged_master_otu_table = ([[('Filtering OTU table to include only OTUs that appear in at least 5 sample groups', 'filter_otus_from_otu_table.py -i /master.biom -o /foo/master_ms5.biom -s 5')], [('Filtering representative set to include only the latest candidate OTUs', 'filter_fasta.py -f /rs.fna -o /foo/rs_candidates.fna -b /foo/master_ms5.biom')], [(\"Running uclust to get list of sequences that don't hit the maximum GG similarity threshold\", 'parallel_pick_otus_uclust_ref.py -i /foo/rs_candidates.fna -o /foo/most_wanted_candidates_gg.fasta_0.7 -r /gg.fasta -s 0.7 -O 55')], [('Filtering candidate sequences to only include uclust failures', 'filter_fasta.py -f /foo/rs_candidates.fna -s /foo/most_wanted_candidates_gg.fasta_0.7/rs_candidates_failures.txt -o /foo/rs_candidates_failures.fna')], [('BLASTing filtered candidate sequences against nt database', 'parallel_blast.py -i /foo/rs_candidates_failures.fna -o /foo/blast_output -r /nt -D -e 0.000100 -w 25 -O 55')]], '/foo/blast_output/rs_candidates_failures_blast_out.txt', '/foo/rs_candidates_failures.fna', '/foo/master_ms5.biom')\n\nexp_rep_set_lookup = {'New.CleanUp.ReferenceOTU999': 'ATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGGGTGCGTAGGCGGATGTTTAAGTGGGATGTGAAATCCCCGGGCTTAACCTGGGGGCTGC', '102506': 'ATACGTATGGTGCAAGCGTTATCCGGATTTACTGGGTGTAAAGGGAGCGCAGGCGGTACGGCAAGTCTGATGTGAAAGTCCGGGGCTCAACCCCGGTACTGCAAACGTAGGGTGCAAGCGTTGTCCGGAATTACTGGGTGTAAAGGGAGCGTAGACGGCTGTGCAAGTCTGAAGTGAAAGGCATGGGCTCAACCTGTGGACTGC', 'New.CleanUp.ReferenceOTU972': 'ATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGGGTGCGTAGGCGGATGTTTAAGTGGGATGTGAAATCCCCGGGCTTAACCTGGGGGCTGC', 'New.CleanUp.ReferenceOTU969': 'ATACGTAGGTCCCGAGCGTTGTCCGGATTTACTGGGTGTAAAGGGAGCGTAGACGGCATGGCAAGTCTGAAGTGAAAACCCAGGGCTCAACCCTGGGACTGC', 'New.CleanUp.ReferenceOTU964': 'ATACGGAGGATGCGAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGACGGCGAAGCAAGTCTGAAGTGAAAGCCCGGGGCTCAACCGCGGGACTGC', '10115': 'ATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTTTGATGTGAAATCCCCGGGCTTAACCTGGGAACTGC', '10113': 'ATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTCTGTTAAGTCAGATGTGAAATCCCCGGGCTCCACCTGGGCACTGC'}\n\nexp_output_tables = ('#\\tOTU ID\\tSequence\\tGreengenes taxonomy\\tNCBI nt closest match\\tNCBI nt % identity\\n1\\ta\\tAGT\\tfoo;bar;baz\\tT51700.1\\t87.0\\n2\\tb\\tAAGGTT\\tfoo;baz;bar\\tZ700.1\\t89.5\\n', '<table id=\"most_wanted_otus_table\" border=\"border\"><tr><th>#</th><th>OTU</th><th>Greengenes taxonomy</th><th>NCBI nt closest match</th><th>Abundance by Environment</th></tr><tr><td>1</td><td><pre>>a\\nAGT</pre></td><td>foo;bar;baz</td><td><a href=\"http://www.ncbi.nlm.nih.gov/nuccore/T51700.1\" target=\"_blank\">T51700.1</a> (87.0% sim.)</td><td><table><tr><td><img src=\"foo/abundance_by_Environment_a.png\" width=\"300\" height=\"300\" /></td><td><ul class=\"most_wanted_otus_legend\"><li><div class=\"key\" style=\"background-color:#0000ff\"></div>Env2 (66.67%)</li><li><div class=\"key\" style=\"background-color:#ff0000\"></div>Env1 (33.33%)</li></ul></td></tr></table></tr><tr><td>2</td><td><pre>>b\\nAAGGTT</pre></td><td>foo;baz;bar</td><td><a href=\"http://www.ncbi.nlm.nih.gov/nuccore/Z700.1\" target=\"_blank\">Z700.1</a> (89.5% sim.)</td><td><table><tr><td><img src=\"foo/abundance_by_Environment_b.png\" width=\"300\" height=\"300\" /></td><td><ul class=\"most_wanted_otus_legend\"><li><div class=\"key\" style=\"background-color:#0000ff\"></div>Env2 (71.43%)</li><li><div class=\"key\" style=\"background-color:#ff0000\"></div>Env1 (28.57%)</li></ul></td></tr></table></tr></table>', '>a\\nAGT\\n>b\\nAAGGTT\\n', ['foo/abundance_by_Environment_a.png', 'foo/abundance_by_Environment_b.png'], ['foo/abundance_by_Environment_a.p', 'foo/abundance_by_Environment_b.p'])\n\nexp_output_tables_suppressed_taxonomy = ('#\\tOTU ID\\tSequence\\tNCBI nt closest match\\tNCBI nt % identity\\n1\\ta\\tAGT\\tT51700.1\\t87.0\\n2\\tb\\tAAGGTT\\tZ700.1\\t89.5\\n', '<table id=\"most_wanted_otus_table\" border=\"border\"><tr><th>#</th><th>OTU</th><th>NCBI nt closest match</th><th>Abundance by Environment</th></tr><tr><td>1</td><td><pre>>a\\nAGT</pre></td><td><a href=\"http://www.ncbi.nlm.nih.gov/nuccore/T51700.1\" target=\"_blank\">T51700.1</a> (87.0% sim.)</td><td><table><tr><td><img src=\"foo/abundance_by_Environment_a.png\" width=\"300\" height=\"300\" /></td><td><ul class=\"most_wanted_otus_legend\"><li><div class=\"key\" style=\"background-color:#0000ff\"></div>Env2 (66.67%)</li><li><div class=\"key\" style=\"background-color:#ff0000\"></div>Env1 (33.33%)</li></ul></td></tr></table></tr><tr><td>2</td><td><pre>>b\\nAAGGTT</pre></td><td><a href=\"http://www.ncbi.nlm.nih.gov/nuccore/Z700.1\" target=\"_blank\">Z700.1</a> (89.5% sim.)</td><td><table><tr><td><img src=\"foo/abundance_by_Environment_b.png\" width=\"300\" height=\"300\" /></td><td><ul class=\"most_wanted_otus_legend\"><li><div class=\"key\" style=\"background-color:#0000ff\"></div>Env2 (71.43%)</li><li><div class=\"key\" style=\"background-color:#ff0000\"></div>Env1 (28.57%)</li></ul></td></tr></table></tr></table>', '>a\\nAGT\\n>b\\nAAGGTT\\n', ['foo/abundance_by_Environment_a.png', 'foo/abundance_by_Environment_b.png'], ['foo/abundance_by_Environment_a.p', 'foo/abundance_by_Environment_b.p'])\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cclauss/Pyto | [
"1c4ccc47e3a91e996bf6ec38c527d244de2cf7ed"
] | [
"Pyto/Samples/Pandas Plotting.py"
] | [
"\"\"\"\nAn example of plotting with Pandas.\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.DataFrame({\n 'name':['john','mary','peter','jeff','bill','lisa','jose'],\n 'age':[23,78,22,19,45,33,20],\n 'gender':['M','F','M','M','M','F','M'],\n 'state':['california','dc','california','dc','california','texas','texas'],\n 'num_children':[2,0,0,3,2,1,4],\n 'num_pets':[5,1,0,5,2,2,3]\n})\n\ndf.plot(kind='scatter',x='num_children',y='num_pets',color='red')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rudolfwilliam/satellite_image_forecasting | [
"164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc"
] | [
"drought_impact_forecasting/models/model_parts/Conv_Transformer.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom einops import rearrange\nfrom .shared import Conv_Block\nfrom ..utils.utils import zeros, mean_cube, last_frame, ENS\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(torch.stack([self.norm(x[..., i]) for i in range(x.size()[-1])], dim=-1), **kwargs)\n\nclass FeedForward(nn.Module):\n def __init__(self, kernel_size, num_hidden, dilation_rate, num_conv_layers):\n super().__init__()\n self.kernel_size = kernel_size\n self.num_hidden = num_hidden\n self.num_conv_layers = num_conv_layers\n self.dilation_rate = dilation_rate\n self.conv = Conv_Block(self.num_hidden, self.num_hidden, kernel_size=self.kernel_size,\n dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)\n\n def forward(self, x):\n return torch.stack([self.conv(x[..., i]) for i in range(x.size()[-1])], dim=-1)\n\n\nclass ConvAttention(nn.Module):\n def __init__(self, num_hidden, kernel_size, enc=True, mask=False):\n super(ConvAttention, self).__init__()\n self.enc = enc\n self.mask = mask\n self.kernel_size = kernel_size\n self.num_hidden = num_hidden\n # important note: shared convolution is intentional here\n if self.enc:\n # 3 times num_hidden for out_channels due to queries, keys & values\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=self.num_hidden, out_channels=3*self.num_hidden, kernel_size=1, padding=\"same\", padding_mode=\"reflect\")\n )\n else:\n # only 2 times num_hidden for keys & values\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=self.num_hidden, out_channels=2*self.num_hidden, kernel_size=1, padding=\"same\", padding_mode=\"reflect\")\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=self.num_hidden*2, out_channels=1, kernel_size=self.kernel_size, padding=\"same\", padding_mode=\"reflect\")\n )\n\n def forward(self, x, enc_out=None):\n # s is num queries, t is num keys/values\n b, _, _, _, s = x.shape\n if self.enc:\n t = s\n qkv_set = torch.stack([self.conv1(x[..., i]) for i in range(t)], dim=-1)\n Q, K, V = torch.split(qkv_set, self.num_hidden, dim=1)\n else:\n # x correspond to queries\n t = enc_out.size()[-1]\n kv_set = torch.stack([self.conv1(enc_out[..., i]) for i in range(t)], dim=-1) \n K, V = torch.split(kv_set, self.num_hidden, dim=1)\n Q = x\n\n K_rep = torch.stack([K] * s, dim=-2)\n V_rep = torch.stack([V] * s, dim=-1)\n Q_rep = torch.stack([Q] * t, dim=-1)\n # concatenate queries and keys for cross-channel convolution\n Q_K = torch.concat((Q_rep, K_rep), dim=1) \n if self.mask:\n # only feed in 'previous' keys & values for computing softmax\n V_out = []\n # for each query\n for i in range(t):\n Q_K_temp = rearrange(Q_K[..., :i+1, i], 'b c h w t -> (b t) c h w')\n extr_feat = rearrange(torch.squeeze(self.conv2(Q_K_temp), dim=1), '(b t) h w -> b h w t', b=b, t=i+1)\n attn_mask = F.softmax(extr_feat, dim=-1)\n # convex combination over values using weights from attention mask, per channel c\n V_out.append(torch.stack([torch.sum(torch.mul(attn_mask, V_rep[:, c, :, :, i, :i+1]), dim=-1) for c in range(V_rep.size()[1])], dim=1))\n V_out = torch.stack(V_out, dim=-1)\n else:\n Q_K = rearrange(Q_K, 'b c h w s t -> (b s t) c h w') # no convolution across time dim!\n extr_feat = rearrange(torch.squeeze(self.conv2(Q_K), dim=1), '(b s t) h w -> b h w t s', b=b, t=t)\n attn_mask = F.softmax(extr_feat, dim=-2)\n V_out = torch.stack([torch.sum(torch.mul(attn_mask, V_rep[:, c, ...]), dim=-2) for c in range(V_rep.size()[1])], dim=1)\n\n return V_out\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, num_hidden, img_width):\n # no differentiation should happen with respect to the params in here!\n super(PositionalEncoding, self).__init__()\n self.num_hidden = num_hidden\n self.img_width = img_width\n\n def _get_sinusoid_encoding_table(self, t, device):\n ''' Sinusoid position encoding table '''\n sinusoid_table = torch.stack([self._get_position_angle_vec(pos_i) for pos_i in range(t)], dim=0)\n sinusoid_table[:, :, 0::2] = torch.sin(sinusoid_table[:, :, 0::2]) # even dim\n sinusoid_table[:, :, 1::2] = torch.cos(sinusoid_table[:, :, 1::2]) # odd dim\n\n return torch.moveaxis(sinusoid_table, 0, -1)\n \n def _get_position_angle_vec(self, position):\n return_list = [torch.ones((1,\n self.img_width,\n self.img_width),\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")) * \n (position / np.power(10000, 2 * (hid_j // 2) / self.num_hidden[-1])) for hid_j in range(self.num_hidden[-1])]\n return torch.stack(return_list, dim=1)\n\n def forward(self, x, t, single=False):\n \"\"\"Returns entire positional encoding until step T if not single, otherwise only encoding of time step T.\"\"\"\n if not single:\n self.register_buffer('pos_table', self._get_sinusoid_encoding_table(t, x.get_device()))\n return torch.squeeze(x + self.pos_table.clone().detach(), dim=0)\n else:\n if t % 2 == 0:\n return x + torch.unsqueeze(torch.sin(self._get_position_angle_vec(t)), dim=-1).clone().detach()\n else:\n return x + torch.unsqueeze(torch.cos(self._get_position_angle_vec(t)), dim=-1).clone().detach() \n\nclass Encoder(nn.Module):\n def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width):\n super().__init__()\n self.num_hidden = num_hidden\n self.depth = depth\n self.dilation_rate = dilation_rate\n self.num_conv_layers = num_conv_layers\n self.kernel_size = kernel_size\n self.img_width = img_width\n self.layers = nn.ModuleList([])\n self.num_hidden = self.num_hidden\n for _ in range(self.depth):\n self.layers.append(nn.ModuleList([\n Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],\n ConvAttention(kernel_size=self.kernel_size, num_hidden=self.num_hidden[-1], enc=True))),\n Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],\n FeedForward(kernel_size=self.kernel_size, num_hidden=self.num_hidden[-1], \n dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)))\n ]))\n\n def forward(self, x):\n for attn, ff in self.layers:\n x = attn(x)\n x = ff(x)\n\n return x\n\nclass Decoder(nn.Module):\n def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels):\n super().__init__()\n self.layers = nn.ModuleList([])\n self.dilation_rate = dilation_rate\n self.num_conv_layers = num_conv_layers\n self.depth = depth\n self.kernel_size = kernel_size\n self.img_width = img_width\n self.num_hidden = num_hidden\n self.num_non_pred_feat = non_pred_channels\n for _ in range(self.depth):\n self.layers.append(nn.ModuleList([\n # (masked) query self-attention\n Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],\n ConvAttention(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, mask=True))),\n # encoder-decoder attention\n Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],\n ConvAttention(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, enc=False))),\n # feed forward\n Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],\n FeedForward(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)))\n ]))\n\n def forward(self, queries, enc_out):\n for query_attn, attn, ff in self.layers:\n queries = query_attn(queries)\n x = attn(queries, enc_out=enc_out)\n x = ff(x)\n\n return x\n\nclass Conv_Transformer(nn.Module):\n\n \"\"\"Standard, single-headed ConvTransformer like in https://arxiv.org/pdf/2011.10185.pdf\"\"\"\n\n def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels):\n super(Conv_Transformer, self).__init__()\n self.num_hidden = num_hidden\n self.depth = depth\n self.num_layers_query_feat = num_layers_query_feat\n self.dilation_rate = dilation_rate\n self.num_conv_layers = num_conv_layers\n self.kernel_size = kernel_size\n self.img_width = img_width\n self.in_channels = in_channels\n self.non_pred_channels = non_pred_channels\n self.pos_embedding = PositionalEncoding(self.num_hidden, self.img_width)\n self.Encoder = Encoder(num_hidden=self.num_hidden, depth=self.depth, dilation_rate=self.dilation_rate, \n num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size, img_width=self.img_width)\n self.Decoder = Decoder(num_hidden=self.num_hidden, depth=self.depth, dilation_rate=self.dilation_rate, \n num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size, img_width=self.img_width, non_pred_channels=self.non_pred_channels)\n self.input_feat_gen = Conv_Block(self.in_channels, self.num_hidden[-1], num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size)\n # TODO (optionally): replace this by SFFN\n self.back_to_pixel = nn.Sequential(\n nn.Conv2d(self.num_hidden[-1], 4, kernel_size=1)\n )\n\n def forward(self, frames, n_predictions):\n _, _, _, _, T = frames.size()\n feature_map = self.feature_embedding(img=frames, network=self.input_feat_gen)\n enc_in = self.pos_embedding(feature_map, T)\n # encode all input values\n enc_out = torch.concat(self.Encoder(enc_in), dim=-1)\n\n out_list = []\n queries = self.feature_embedding(img=feature_map[..., -1], network=self.query_feat_gen)\n for _ in range(n_predictions):\n dec_out = self.Decoder(queries, enc_out)\n pred = self.feature_embedding(dec_out)\n out_list.append(pred)\n queries = torch.concat((queries, pred), dim=-1)\n \n x = torch.stack(out_list, dim=-1)\n\n return x\n\n def feature_embedding(self, img, network):\n generator = network\n gen_img = []\n for i in range(img.shape[-1]):\n gen_img.append(generator(img[..., i]))\n gen_img = torch.stack(gen_img, dim=-1)\n\n return gen_img\n\nclass ENS_Conv_Transformer(Conv_Transformer):\n\n \"\"\"ConvTransformer that employs delta model and can read in non-pred future features, hence taylored to the ENS challenge.\"\"\"\n\n def __init__(self, num_hidden, output_dim, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels, baseline):\n super(ENS_Conv_Transformer, self).__init__(num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels - 1)\n # remove cloud mask\n self.in_channels = self.in_channels - 1\n self.baseline = baseline\n self.output_dim = output_dim\n \n def forward(self, input_tensor, non_pred_feat=None, prediction_count=1):\n baseline = eval(self.baseline + \"(input_tensor[:, 0:5, :, :, :], 4)\")\n\n b, _, width, height, T = input_tensor.size()\n\n pred_deltas = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())\n preds = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())\n baselines = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())\n\n # remove cloud mask channel for feature embedding\n feature_map = torch.concat((input_tensor[:, :4, ...], input_tensor[:, 5:, ...]), dim=1)\n features = self.feature_embedding(img=feature_map, network=self.input_feat_gen)\n \n enc_in = torch.stack([self.pos_embedding(features[i, ...], T) for i in range(b)], dim=0)\n enc_out = self.Encoder(enc_in)\n\n # first query stems from last input frame\n queries = features[..., -1:]\n baselines[..., 0] = baseline\n pred_deltas[..., 0] = self.back_to_pixel(self.Decoder(queries, enc_out)[..., 0])\n preds[..., 0] = pred_deltas[..., 0] + baselines[..., 0]\n\n for t in range(1, prediction_count):\n if self.baseline == \"mean_cube\":\n baselines[..., t] = (preds[..., t - 1] + (baselines[..., t - 1] * (T + t)))/(T + t + 1)\n if self.baseline == \"zeros\":\n pass\n else:\n baselines[..., t] = preds[..., t - 1]\n\n # concatenate with non-pred features & feature embedding & do positional encoding\n query = self.pos_embedding(self.feature_embedding(torch.concat((preds[..., t-1:t], non_pred_feat[..., t-1:t]), dim=1), network=self.input_feat_gen), t, single=True)\n queries = torch.concat((queries, query), dim=-1)\n pred_deltas[..., :t] = torch.stack([self.back_to_pixel(self.Decoder(queries, enc_out)[..., i]) for i in range(t)], dim=-1)\n\n preds[..., t] = pred_deltas[..., t] + baselines[..., t]\n\n return preds, pred_deltas, baselines\n \n def _get_device(self):\n return next(self.parameters()).device"
] | [
[
"torch.concat",
"torch.nn.functional.softmax",
"torch.sin",
"torch.moveaxis",
"numpy.power",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.LayerNorm",
"torch.mul",
"torch.cuda.is_available",
"torch.split",
"torch.stack",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GQCG-oss/libcint | [
"8bd670bc67ff3fc2d29a3ad6d061555082023661"
] | [
"testsuite/test_cint.py"
] | [
"#!/usr/bin/env python\n# $Id$\n# -*- coding: utf-8\nfrom __future__ import print_function\n\n'''\ntest libcint\n'''\n\n__author__ = \"Qiming Sun <[email protected]>\"\n\nimport sys\nimport os\nimport ctypes\nimport numpy\n\n_cint = numpy.ctypeslib.load_library('lib/libcint', '.')\n\n\nPTR_LIGHT_SPEED = 0\nPTR_COMMON_ORIG = 1\nPTR_SHIELDING_ORIG = 4\nPTR_RINV_ORIG = 4\nPTR_RINV_ZETA = 7\nPTR_ENV_START = 20\n\nCHARGE_OF = 0\nPTR_COORD = 1\nNUC_MOD_OF = 2\nPTR_ZETA = 3\nRAD_GRIDS = 4\nANG_GRIDS = 5\nATM_SLOTS = 6\n\nATOM_OF = 0\nANG_OF = 1\nNPRIM_OF = 2\nNCTR_OF = 3\nKAPPA_OF = 4\nPTR_EXP = 5\nPTR_COEFF = 6\nBAS_SLOTS = 8\n\nnatm = 4\nnbas = 0\natm = numpy.zeros((natm,ATM_SLOTS), dtype=numpy.int32)\nbas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)\nenv = numpy.zeros(10000)\noff = PTR_ENV_START\nfor i in range(natm):\n atm[i, CHARGE_OF] = (i+1)*2\n atm[i, PTR_COORD] = off\n env[off+0] = .2 * (i+1)\n env[off+1] = .3 + (i+1) * .5\n env[off+2] = .1 - (i+1) * .5\n off += 3\noff0 = off\n\n# basis with kappa > 0\nnh = 0\n\nbas[nh,ATOM_OF ] = 0\nbas[nh,ANG_OF ] = 1\nbas[nh,KAPPA_OF] = 1\nbas[nh,NPRIM_OF] = 1\nbas[nh,NCTR_OF ] = 1\nbas[nh,PTR_EXP] = off\nenv[off+0] = 1\nbas[nh,PTR_COEFF] = off + 1\nenv[off+1] = 1\noff += 2\nnh += 1\n\nbas[nh,ATOM_OF ] = 1\nbas[nh,ANG_OF ] = 2\nbas[nh,KAPPA_OF] = 2\nbas[nh,NPRIM_OF] = 2\nbas[nh,NCTR_OF ] = 2\nbas[nh,PTR_EXP] = off\nenv[off+0] = 5\nenv[off+1] = 3\nbas[nh,PTR_COEFF] = off + 2\nenv[off+2] = 1\nenv[off+3] = 2\nenv[off+4] = 4\nenv[off+5] = 1\noff += 6\nnh += 1\n\nbas[nh,ATOM_OF ] = 2\nbas[nh,ANG_OF ] = 3\nbas[nh,KAPPA_OF] = 3\nbas[nh,NPRIM_OF] = 1\nbas[nh,NCTR_OF ] = 1\nbas[nh,PTR_EXP ] = off\nenv[off+0] = 1\nbas[nh,PTR_COEFF] = off + 1\nenv[off+1] = 1\noff += 2\nnh += 1\n\nbas[nh,ATOM_OF ] = 3\nbas[nh,ANG_OF ] = 4\nbas[nh,KAPPA_OF] = 4\nbas[nh,NPRIM_OF] = 1\nbas[nh,NCTR_OF ] = 1\nbas[nh,PTR_EXP ] = off\nenv[off+0] = .5\nbas[nh,PTR_COEFF] = off + 1\nenv[off+1] = 1.\noff = off + 2\nnh += 1\n\nnbas = nh\n\n# basis with kappa < 0\nn = off - off0\nfor i in range(n):\n env[off+i] = env[off0+i]\n\nfor i in range(nh):\n bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]\n bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1\n bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]\n bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]\n bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]\n bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n\n bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n\n env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]\n\nenv[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])\nenv[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])\nenv[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])\nenv[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])\n\nnatm = ctypes.c_int(natm)\nnbas = ctypes.c_int(nbas)\nc_atm = atm.ctypes.data_as(ctypes.c_void_p)\nc_bas = bas.ctypes.data_as(ctypes.c_void_p)\nc_env = env.ctypes.data_as(ctypes.c_void_p)\n\nopt = ctypes.POINTER(ctypes.c_void_p)()\n_cint.CINTlen_spinor.restype = ctypes.c_int\n\n\ndef close(v1, vref, count, place):\n return round(abs(v1-vref)/count, place) == 0\n\ndef test_int1e_sph(name, vref, dim, place):\n intor = getattr(_cint, name)\n intor.restype = ctypes.c_void_p\n op = (ctypes.c_double * (10000 * dim))()\n v1 = 0\n cnt = 0\n for j in range(nbas.value*2):\n for i in range(j+1):\n di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]\n dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]\n shls = (ctypes.c_int * 2)(i, j)\n intor(op, shls, c_atm, natm, c_bas, nbas, c_env);\n v1 += abs(numpy.array(op[:di*dj*dim])).sum()\n cnt += di*dj*dim\n if close(v1, vref, cnt, place):\n print(\"pass: \", name)\n else:\n print(\"* FAIL: \", name, \". err:\", '%.16g' % abs(v1-vref), \"/\", vref)\n\ndef cdouble_to_cmplx(arr):\n return numpy.array(arr)[0::2] + numpy.array(arr)[1::2] * 1j\n\ndef test_int1e_spinor(name, vref, dim, place):\n intor = getattr(_cint, name)\n intor.restype = ctypes.c_void_p\n op = (ctypes.c_double * (20000 * dim))()\n v1 = 0\n cnt = 0\n for j in range(nbas.value*2):\n for i in range(j+1):\n di = _cint.CINTlen_spinor(i, c_bas, nbas) * bas[i,NCTR_OF]\n dj = _cint.CINTlen_spinor(j, c_bas, nbas) * bas[j,NCTR_OF]\n shls = (ctypes.c_int * 2)(i, j)\n intor(op, shls, c_atm, natm, c_bas, nbas, c_env);\n v1 += abs(cdouble_to_cmplx(op[:di*dj*dim*2])).sum()\n cnt += di*dj*dim*2\n if close(v1, vref, cnt, place):\n print(\"pass: \", name)\n else:\n print(\"* FAIL: \", name, \". err:\", '%.16g' % abs(v1-vref), \"/\", vref)\n\ndef max_loc(arr):\n loc = []\n maxi = arr.argmax()\n n = maxi\n for i in arr.shape:\n loc.append(n % i)\n n /= i\n loc.reverse()\n return maxi, loc\n\ndef test_comp1e_spinor(name1, name_ref, shift, dim, place):\n intor = getattr(_cint, name1)\n intor.restype = ctypes.c_void_p\n intor_ref = getattr(_cint, name_ref)\n intor_ref.restype = ctypes.c_void_p\n op = (ctypes.c_double * (20000 * dim))()\n op_ref = (ctypes.c_double * (20000 * dim))()\n\n pfac = 1\n if shift[0] > 0:\n pfac *= -1j\n if shift[1] > 0:\n pfac *= 1j\n\n for j in range(nbas.value*2 - shift[1]):\n for i in range(min(nbas.value*2-shift[0],j+1)):\n di = _cint.CINTlen_spinor(i, c_bas, nbas) * bas[i,NCTR_OF]\n dj = _cint.CINTlen_spinor(j, c_bas, nbas) * bas[j,NCTR_OF]\n shls = (ctypes.c_int * 2)(i+shift[0], j+shift[1])\n intor(op, shls, c_atm, natm, c_bas, nbas, c_env);\n shls_ref = (ctypes.c_int * 2)(i, j)\n intor_ref(op_ref, shls_ref, c_atm, natm, c_bas, nbas, c_env);\n dd = abs(pfac * cdouble_to_cmplx(op[:di*dj*dim*2]).reshape(di,dj,dim)\n - cdouble_to_cmplx(op_ref[:di*dj*dim*2]).reshape(di,dj,dim))\n if numpy.round(dd, place).sum():\n maxi = dd.argmax()\n print(\"* FAIL: \", name1, \"/\", name_ref, \". shell:\", i, j, \\\n \"err:\", dd.flatten()[maxi], \\\n \"/\", op_ref[maxi*2]+op_ref[maxi*2+1]*1j)\n return\n print(\"pass: \", name1, \"/\", name_ref)\n\n####################\ndef test_int2e_sph(name, vref, dim, place):\n intor = getattr(_cint, name)\n intor.restype = ctypes.c_void_p\n op = (ctypes.c_double * (1000000 * dim))()\n v1 = 0\n cnt = 0\n for l in range(nbas.value*2):\n for k in range(l+1):\n for j in range(nbas.value*2):\n for i in range(j+1):\n di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]\n dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]\n dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]\n dl = (bas[l,ANG_OF] * 2 + 1) * bas[l,NCTR_OF]\n shls = (ctypes.c_int * 4)(i, j, k, l)\n intor(op, shls, c_atm, natm, c_bas, nbas, c_env, opt);\n v1 += abs(numpy.array(op[:di*dj*dk*dl*dim])).sum()\n cnt += di*dj*dk*dl*dim\n if close(v1, vref, cnt, place):\n print(\"pass: \", name)\n else:\n print(\"* FAIL: \", name, \". err:\", '%.16g' % abs(v1-vref), \"/\", vref)\n\ndef test_int2e_spinor(name, vref, dim, place):\n intor = getattr(_cint, name)\n intor.restype = ctypes.c_void_p\n op = (ctypes.c_double * (2000000 * dim))()\n v1 = 0\n cnt = 0\n for l in range(nbas.value*2):\n for k in range(l+1):\n for j in range(nbas.value*2):\n for i in range(j+1):\n di = _cint.CINTlen_spinor(i, c_bas, nbas) * bas[i,NCTR_OF]\n dj = _cint.CINTlen_spinor(j, c_bas, nbas) * bas[j,NCTR_OF]\n dk = _cint.CINTlen_spinor(k, c_bas, nbas) * bas[k,NCTR_OF]\n dl = _cint.CINTlen_spinor(l, c_bas, nbas) * bas[l,NCTR_OF]\n shls = (ctypes.c_int * 4)(i, j, k, l)\n intor(op, shls, c_atm, natm, c_bas, nbas, c_env, opt);\n v1 += abs(cdouble_to_cmplx(op[:di*dj*dk*dl*dim*2])).sum()\n cnt += di*dj*dk*dl*dim*2\n if close(v1, vref, cnt, place):\n print(\"pass: \", name)\n else:\n print(\"* FAIL: \", name, \". err:\", '%.16g' % abs(v1-vref), \"/\", vref)\n\ndef test_comp2e_spinor(name1, name_ref, shift, dim, place):\n intor = getattr(_cint, name1)\n intor.restype = ctypes.c_void_p\n intor_ref = getattr(_cint, name_ref)\n intor_ref.restype = ctypes.c_void_p\n op = (ctypes.c_double * (2000000 * dim))()\n op_ref = (ctypes.c_double * (2000000 * dim))()\n\n pfac = 1\n if shift[0] > 0:\n pfac *= -1j\n if shift[1] > 0:\n pfac *= 1j\n if shift[2] > 0:\n pfac *= -1j\n if shift[3] > 0:\n pfac *= 1j\n\n for l in range(nbas.value*2 - shift[3]):\n for k in range(min(nbas.value*2-shift[0],l+1)):\n for j in range(nbas.value*2 - shift[1]):\n for i in range(min(nbas.value*2-shift[0],j+1)):\n di = _cint.CINTlen_spinor(i, c_bas, nbas) * bas[i,NCTR_OF]\n dj = _cint.CINTlen_spinor(j, c_bas, nbas) * bas[j,NCTR_OF]\n dk = _cint.CINTlen_spinor(k, c_bas, nbas) * bas[k,NCTR_OF]\n dl = _cint.CINTlen_spinor(l, c_bas, nbas) * bas[l,NCTR_OF]\n shls = (ctypes.c_int * 4)(i+shift[0], j+shift[1], k+shift[2], l+shift[3])\n intor(op, shls, c_atm, natm, c_bas, nbas, c_env, opt)\n shls_ref = (ctypes.c_int * 4)(i, j, k, l)\n intor_ref(op_ref, shls_ref, c_atm, natm, c_bas, nbas, c_env, opt)\n dd = abs(pfac * cdouble_to_cmplx(op[:di*dj*dk*dl*dim*2]).reshape(di,dj,dk,dl,dim)\n - cdouble_to_cmplx(op_ref[:di*dj*dk*dl*dim*2]).reshape(di,dj,dk,dl,dim))\n if numpy.round(dd, place).sum():\n maxi = dd.argmax()\n print(\"* FAIL: \", name1, \"/\", name_ref, \". shell:\", i, j, k, l, \\\n \"err:\", dd.flatten()[maxi], \\\n \"/\", op_ref[maxi*2]+op_ref[maxi*2+1]*1j)\n return\n print(\"pass: \", name1, \"/\", name_ref)\n\n\n\nif __name__ == \"__main__\":\n if \"--high-prec\" in sys.argv:\n def close(v1, vref, count, place):\n return round(abs(v1-vref), place) == 0\n\n for f in (('cint1e_ovlp_sph' , 320.9470780962389, 1, 11),\n ('cint1e_nuc_sph' , 3664.898206036863, 1, 10),\n ('cint1e_kin_sph' , 887.2525599069498, 1, 11),\n ('cint1e_ia01p_sph' , 210.475021425001 , 3, 12),\n ('cint1e_cg_irxp_sph', 3464.41761486531, 3, 10),\n ('cint1e_giao_irjxp_sph', 2529.89787038728, 3, 10),\n ('cint1e_igkin_sph' , 107.6417224161130, 3, 11),\n ('cint1e_igovlp_sph', 37.94968860771099, 3, 12),\n ('cint1e_ignuc_sph' , 478.5594827282386, 3, 11),\n ('cint1e_ipovlp_sph', 429.5284222008585, 3, 11),\n ('cint1e_ipkin_sph' , 1307.395170673386, 3, 10),\n ('cint1e_ipnuc_sph' , 8358.422626593954, 3, 10),\n ('cint1e_iprinv_sph', 385.1108471512923, 3, 11),\n ('cint1e_prinvxp_sph',210.475021425001, 3, 11),\n ('cint1e_z_sph' , 651.8811101988866, 1, 11),\n ('cint1e_zz_sph' , 1881.075059037941, 1, 10),\n ('cint1e_r_sph' , 1803.13043674652 , 3, 10),\n ('cint1e_rr_sph' , 13379.47937680471, 9, 9),\n ('cint1e_r2_sph' , 5237.899221349136, 1, 10),\n ):\n test_int1e_sph(*f)\n\n for f in (('cint1e_ovlp' , 284.4528456839759, 1, 11),\n ('cint1e_nuc' , 3025.766689838620, 1, 10),\n ('cint1e_gnuc' , 296.6160944673867, 3, 11),\n ('cint1e_srsr' , 1430.424389624617, 1, 10),\n ('cint1e_sr' , 240.4064385362524, 1, 11),\n ('cint1e_srsp' , 1022.805155947573, 1, 10),\n ('cint1e_spsp' , 1554.251610462129, 1, 10),\n ('cint1e_sp' , 265.2448605537422, 1, 11),\n ('cint1e_spspsp' , 1551.856568558924, 1, 10),\n ('cint1e_spnuc' , 3905.024862120781, 1, 10),\n ('cint1e_spnucsp' , 20689.33926165072, 1, 9 ),\n ('cint1e_srnucsr' , 13408.06084488522, 1, 9 ),\n ('cint1e_cg_sa10sa01', 319.6545034966355, 9, 11),\n ('cint1e_cg_sa10sp' , 1705.563585675829, 3, 10),\n ('cint1e_cg_sa10nucsp',16506.04502697362, 3, 9 ),\n ('cint1e_giao_sa10sa01' , 358.7833729392868, 9, 11),\n ('cint1e_giao_sa10sp' , 1070.550400465705, 3, 10),\n ('cint1e_giao_sa10nucsp', 12819.05472701636, 3, 9 ),\n ('cint1e_govlp' , 23.2674074483772, 3, 12),\n ('cint1e_sa01sp' , 218.244203172625, 3, 11),\n ('cint1e_spgsp' , 96.9346217249868, 3, 12),\n ('cint1e_spgnucsp' , 1659.37670007911, 3, 10),\n ('cint1e_spgsa01' , 37.8884662927634, 9, 12),\n ('cint1e_ipovlp' , 153.860148521121, 3, 12),\n ('cint1e_ipkin' , 497.249399637873, 3, 11),\n ('cint1e_ipnuc' , 4506.61348255897, 3, 10),\n ('cint1e_iprinv' , 240.036283917245, 3, 11),\n ('cint1e_ipspnucsp', 35059.4071347107, 3, 9),\n ('cint1e_ipsprinvsp',1166.20850563398, 3, 10),\n ):\n test_int1e_spinor(*f)\n\n for f in (# rys_roots for i,j,k,l=3,3,3,3 has round-off error ~ 1e-5\n ('cint2e_sph' , 56243.88328768107 , 1, 8 ),\n ('cint2e_ip1_sph', 115489.8643866550 , 3, 8 ),\n ):\n test_int2e_sph(*f)\n if \"--quick\" not in sys.argv:\n # Four tests marked with \"# *\" may fail in quadmath mode\n for f in (('cint2e_ip1_sph', 115489.8643866550 , 3, 8 ),\n ('cint2e_p1vxp1_sph', 89014.88169743448, 3, 9),\n ):\n test_int2e_sph(*f)\n for f in (('cint2e' , 37737.11365710611, 1, 8),\n ('cint2e_spsp1' , 221528.4764668166, 1, 8),\n ('cint2e_spsp1spsp2' , 1391716.876869147, 1, 7), # *\n ('cint2e_srsr1' , 178572.7398308939, 1, 8),\n ('cint2e_srsr1srsr2' , 860883.6288270953, 1, 8), # *\n ('cint2e_cg_sa10sp1' , 241519.2143647713, 3, 8),\n ('cint2e_cg_sa10sp1spsp2' , 1419443.469767018, 3, 7), # *\n ('cint2e_giao_sa10sp1' , 153861.920807804 , 3, 8),\n ('cint2e_giao_sa10sp1spsp2', 918284.9464686266, 3, 8), # *\n ('cint2e_g1' , 3755.251591892025, 3, 10),\n ('cint2e_spgsp1' , 16626.99103794526, 3, 9 ),\n ('cint2e_g1spsp2' , 22186.56654833549, 3, 9 ),\n ('cint2e_spgsp1spsp2' , 107110.2340526177, 3, 8 ),\n ('cint2e_ip1' , 34912.85433806438, 3, 9 ),\n ('cint2e_ipspsp1' , 221092.5556043494, 3, 8 ),\n ('cint2e_ip1spsp2' , 212447.1029358293, 3, 8 ),\n ('cint2e_ipspsp1spsp2', 1443972.936563201, 3, 7 ),\n ):\n test_int2e_spinor(*f)\n\n test_comp2e_spinor('cint2e_spsp1', 'cint2e', (4,4,0,0), 1, 11)\n test_comp2e_spinor('cint2e_spsp1spsp2', 'cint2e', (4,4,4,4), 1, 11)\n test_comp2e_spinor('cint2e_spsp1spsp2', 'cint2e_spsp1', (0,0,4,4), 1, 11)\n test_comp2e_spinor('cint2e_spgsp1', 'cint2e_g1', (4,4,0,0), 3, 11)\n test_comp2e_spinor('cint2e_g1spsp2', 'cint2e_g1', (0,0,4,4), 3, 11)\n test_comp2e_spinor('cint2e_spgsp1spsp2', 'cint2e_g1', (4,4,4,4), 3, 11)\n test_comp2e_spinor('cint2e_ipspsp1', 'cint2e_ip1', (4,4,0,0), 3, 11)\n test_comp2e_spinor('cint2e_ip1spsp2', 'cint2e_ip1', (0,0,4,4), 3, 11)\n test_comp2e_spinor('cint2e_ipspsp1spsp2', 'cint2e_ip1', (4,4,4,4), 3, 11)\n\n fz = getattr(_cint, 'cint1e_z_sph')\n fzz = getattr(_cint, 'cint1e_zz_sph')\n fr = getattr(_cint, 'cint1e_r_sph')\n fr2 = getattr(_cint, 'cint1e_r2_sph')\n frr = getattr(_cint, 'cint1e_rr_sph')\n v1 = 0\n for j in range(nbas.value*2):\n for i in range(j+1):\n di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]\n dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]\n opz = numpy.empty((di,dj) , order='F')\n opzz = numpy.empty((di,dj) , order='F')\n opr = numpy.empty((di,dj,3), order='F')\n opr2 = numpy.empty((di,dj) , order='F')\n oprr = numpy.empty((di,dj,9), order='F')\n shls = (ctypes.c_int * 2)(i, j)\n fz ( opz.ctypes.data_as(ctypes.c_void_p), shls, c_atm, natm, c_bas, nbas, c_env)\n fzz(opzz.ctypes.data_as(ctypes.c_void_p), shls, c_atm, natm, c_bas, nbas, c_env)\n fr ( opr.ctypes.data_as(ctypes.c_void_p), shls, c_atm, natm, c_bas, nbas, c_env)\n fr2(opr2.ctypes.data_as(ctypes.c_void_p), shls, c_atm, natm, c_bas, nbas, c_env)\n frr(oprr.ctypes.data_as(ctypes.c_void_p), shls, c_atm, natm, c_bas, nbas, c_env)\n v1 = abs(opz-opr[:,:,2]).sum()\n v1 += abs(opzz-oprr[:,:,8]).sum()\n v1 += abs(opr2-oprr[:,:,0]-oprr[:,:,4]-oprr[:,:,8]).sum()\n if round(v1/(di*dj), 13):\n print(\"* FAIL: \", i, j, v1)\n"
] | [
[
"numpy.round",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.ctypeslib.load_library"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
entn-at/pyroomacoustics | [
"6572f6d0cde1a4de8d27caa43a7a67fc0ba91c9a"
] | [
"examples/hmm_training.py"
] | [
"'''\nThis is a simple example of training a Hidden Markov Model.\n\nWe create a random left-right model with a number of states K and emissions of\ndimension O. We sample from this model a number of examples.\n\nThen we train a second models on these examples.\n'''\n\nfrom __future__ import print_function, division\nimport numpy as np\nfrom pyroomacoustics import HMM, CircularGaussianEmission, GaussianEmission\n\nif __name__ == '__main__':\n\n K = 4 # number of states\n O = 6 # dimension of the emission vector\n model = 'left-right' # transition matrix model\n leftright_jump_max = K\n n_examples = 200\n example_size = np.arange(40,60)\n\n # create our Ground truth model\n hmm = HMM(K, \n CircularGaussianEmission(K,O), \n model=model,\n leftright_jump_max=leftright_jump_max)\n\n # Sample examples from the model\n examples = []\n for i in range(n_examples):\n N = np.random.choice(example_size)\n examples.append(hmm.generate(N))\n\n # Now create a new model and train it\n emission2 = CircularGaussianEmission(K, O, examples=examples)\n hmm2 = HMM(K, emission2, model=model)\n\n # We want to properly initialize all parameters\n X = np.concatenate(examples, axis=0) # put all examples in big array\n\n # Initialize for a left right model\n hmm2.pi[:] = 0\n hmm2.pi[0] = 1\n hmm2.A[:,:] = np.triu(np.tril(np.random.uniform(size=(K,K)), k=K))\n hmm2.A[:,:] += np.diag(np.sum(hmm2.A[:,:], axis=1)*2)\n # Normalize the distributions\n for row in hmm2.A:\n row[:] /= row.sum()\n hmm2.pi[:] /= hmm2.pi.sum()\n\n # Initialize the emission parameters to mean and variance of dataset\n emission2.mu[:,:] = np.array([np.mean(X, axis=0)]*K)\n centered = X - emission2.mu[0]\n emission2.Sigma[:,:] = np.array([np.mean(centered**2, axis=0)]*K)\n\n # Now try to fit the model\n niter = hmm2.fit(examples, tol=1e-8, max_iter=1000, verbose=True)\n\n print('EM finished in {0} iterations'.format(niter))\n\n for k in range(K):\n print('True mu:',hmm.emission.mu[k],'Estimated:',hmm2.emission.mu[k])\n\n print('hmm A:',hmm.A)\n print('hmm2 A:',hmm2.A)\n\n"
] | [
[
"numpy.random.choice",
"numpy.arange",
"numpy.concatenate",
"numpy.mean",
"numpy.random.uniform",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akleinau/pgmpy | [
"24279929a28082ea994c52f3d165ca63fc56b02b"
] | [
"pgmpy/tests/test_sampling/test_continuous_sampling.py"
] | [
"import sys\nimport unittest\n\nimport numpy as np\n\nfrom pgmpy.factors.distributions import GaussianDistribution as JGD\nfrom pgmpy.sampling import (\n HamiltonianMC as HMC,\n HamiltonianMCDA as HMCda,\n GradLogPDFGaussian,\n NoUTurnSampler as NUTS,\n NoUTurnSamplerDA as NUTSda,\n)\n\n\nclass TestHMCInference(unittest.TestCase):\n def setUp(self):\n mean = [-1, 1, -1]\n covariance = np.array([[3, 0.8, 0.2], [0.8, 2, 0.3], [0.2, 0.3, 1]])\n self.test_model = JGD([\"x\", \"y\", \"z\"], mean, covariance)\n self.hmc_sampler = HMCda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian)\n\n def test_errors(self):\n with self.assertRaises(TypeError):\n HMCda(model=self.test_model, grad_log_pdf=1)\n with self.assertRaises(TypeError):\n HMCda(\n model=self.test_model,\n grad_log_pdf=GradLogPDFGaussian,\n simulate_dynamics=1,\n )\n with self.assertRaises(ValueError):\n HMCda(model=self.test_model, delta=-1)\n with self.assertRaises(TypeError):\n self.hmc_sampler.sample(\n initial_pos=1, num_adapt=1, num_samples=1, trajectory_length=1\n )\n with self.assertRaises(TypeError):\n self.hmc_sampler.generate_sample(1, 1, 1, 1).send(None)\n with self.assertRaises(TypeError):\n HMC(model=self.test_model).sample(\n initial_pos=1, num_samples=1, trajectory_length=1\n )\n with self.assertRaises(TypeError):\n HMC(model=self.test_model).generate_sample(1, 1, 1).send(None)\n\n def test_acceptance_prob(self):\n acceptance_probability = self.hmc_sampler._acceptance_prob(\n np.array([1, 2, 3]),\n np.array([2, 3, 4]),\n np.array([1, -1, 1]),\n np.array([0, 0, 0]),\n )\n np.testing.assert_almost_equal(acceptance_probability, 0.0347363)\n\n def test_find_resonable_stepsize(self):\n np.random.seed(987654321)\n stepsize = self.hmc_sampler._find_reasonable_stepsize(np.array([-1, 1, -1]))\n np.testing.assert_almost_equal(stepsize, 2.0)\n\n def test_adapt_params(self):\n stepsize, stepsize_bar, h_bar = self.hmc_sampler._adapt_params(\n 0.0025, 1, 1, np.log(0.025), 2, 1\n )\n np.testing.assert_almost_equal(stepsize, 3.13439452e-13)\n np.testing.assert_almost_equal(stepsize_bar, 3.6742481e-08)\n np.testing.assert_almost_equal(h_bar, 0.8875)\n\n def test_sample(self):\n # Seeding is done for _find_reasonable_stepsize method\n # Testing sample method simple HMC\n np.random.seed(3124141)\n samples = self.hmc_sampler.sample(\n initial_pos=[0.3, 0.4, 0.2],\n num_adapt=0,\n num_samples=10000,\n trajectory_length=4,\n )\n covariance = np.cov(samples.values.T)\n self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)\n\n # Testing sample of method of HMCda\n np.random.seed(3124141)\n samples = self.hmc_sampler.sample(\n initial_pos=[0.6, 0.2, 0.8],\n num_adapt=10000,\n num_samples=10000,\n trajectory_length=4,\n )\n covariance = np.cov(samples.values.T)\n self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)\n\n # Testing generate_sample method of simple HMC\n np.random.seed(3124141)\n gen_samples = self.hmc_sampler.generate_sample(\n initial_pos=[0.3, 0.4, 0.2],\n num_adapt=0,\n num_samples=10000,\n trajectory_length=4,\n )\n samples = np.array([sample for sample in gen_samples])\n covariance = np.cov(samples.T)\n self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)\n\n # Testing sample of method of HMCda\n np.random.seed(3124141)\n gen_samples = self.hmc_sampler.generate_sample(\n initial_pos=[0.6, 0.2, 0.8],\n num_adapt=10000,\n num_samples=10000,\n trajectory_length=4,\n )\n samples = np.array([sample for sample in gen_samples])\n covariance = np.cov(samples.T)\n self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)\n\n def tearDown(self):\n del self.hmc_sampler\n del self.test_model\n\n\nclass TestNUTSInference(unittest.TestCase):\n def setUp(self):\n mean = np.array([-1, 1, 0])\n covariance = np.array([[6, 0.7, 0.2], [0.7, 3, 0.9], [0.2, 0.9, 1]])\n self.test_model = JGD([\"x\", \"y\", \"z\"], mean, covariance)\n self.nuts_sampler = NUTSda(\n model=self.test_model, grad_log_pdf=GradLogPDFGaussian\n )\n\n def test_errors(self):\n with self.assertRaises(TypeError):\n NUTS(model=self.test_model, grad_log_pdf=JGD)\n with self.assertRaises(TypeError):\n NUTS(\n model=self.test_model,\n grad_log_pdf=None,\n simulate_dynamics=GradLogPDFGaussian,\n )\n with self.assertRaises(ValueError):\n NUTSda(model=self.test_model, delta=-0.2, grad_log_pdf=None)\n with self.assertRaises(ValueError):\n NUTSda(model=self.test_model, delta=1.1, grad_log_pdf=GradLogPDFGaussian)\n with self.assertRaises(TypeError):\n NUTS(self.test_model, GradLogPDFGaussian).sample(\n initial_pos={1, 1, 1}, num_samples=1\n )\n with self.assertRaises(ValueError):\n NUTS(self.test_model, GradLogPDFGaussian).sample(\n initial_pos=[1, 1], num_samples=1\n )\n with self.assertRaises(TypeError):\n NUTSda(self.test_model, GradLogPDFGaussian).sample(\n initial_pos=1, num_samples=1, num_adapt=1\n )\n with self.assertRaises(ValueError):\n NUTSda(self.test_model, GradLogPDFGaussian).sample(\n initial_pos=[1, 1, 1, 1], num_samples=1, num_adapt=1\n )\n with self.assertRaises(TypeError):\n NUTS(self.test_model, GradLogPDFGaussian).generate_sample(\n initial_pos=0.1, num_samples=1\n ).send(None)\n with self.assertRaises(ValueError):\n NUTS(self.test_model, GradLogPDFGaussian).generate_sample(\n initial_pos=(0, 1, 1, 1), num_samples=1\n ).send(None)\n with self.assertRaises(TypeError):\n NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(\n initial_pos=[[1, 2, 3]], num_samples=1, num_adapt=1\n ).send(None)\n with self.assertRaises(ValueError):\n NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(\n initial_pos=[1], num_samples=1, num_adapt=1\n ).send(None)\n\n @unittest.skipIf(\n sys.platform.startswith(\"win\") or sys.platform.startswith(\"darwin\"),\n reason=\"Failing on Win and Mac\",\n )\n def test_sampling(self):\n np.random.seed(1010101)\n samples = self.nuts_sampler.sample(\n initial_pos=[-0.4, 1, 3.6],\n num_adapt=0,\n num_samples=10000,\n return_type=\"recarray\",\n )\n sample_array = np.array(\n [samples[var_name] for var_name in self.test_model.variables]\n )\n sample_covariance = np.cov(sample_array)\n self.assertTrue(\n np.linalg.norm(sample_covariance - self.test_model.covariance) < 3\n )\n\n np.random.seed(1210161)\n samples = self.nuts_sampler.generate_sample(\n initial_pos=[-0.4, 1, 3.6], num_adapt=0, num_samples=10000\n )\n samples_array = np.array([sample for sample in samples])\n sample_covariance = np.cov(samples_array.T)\n self.assertTrue(\n np.linalg.norm(sample_covariance - self.test_model.covariance) < 3\n )\n\n np.random.seed(12313131)\n samples = self.nuts_sampler.sample(\n initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=10000\n )\n sample_covariance = np.cov(samples.values.T)\n self.assertTrue(\n np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4\n )\n\n np.random.seed(921312312)\n samples = self.nuts_sampler.generate_sample(\n initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=30000\n )\n samples_array = np.array([sample for sample in samples])\n sample_covariance = np.cov(samples_array.T)\n self.assertTrue(\n np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4\n )\n\n def tearDown(self):\n del self.test_model\n del self.nuts_sampler\n"
] | [
[
"numpy.log",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.testing.assert_almost_equal",
"numpy.cov",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ScottHoang/ml4c0 | [
"e9b7a2a41d217bf0c08388d4d17c23f7c56930c3"
] | [
"baseline/config/agents/config.py"
] | [
"import ecole as ec\nimport numpy as np\n\n\nclass ObservationFunction():\n\n def __init__(self, problem):\n self.problem = problem # to devise problem-specific observations\n\n def before_reset(self, model):\n pass\n\n def extract(self, model, done):\n return None\n\nclass Policy():\n\n def __init__(self, problem):\n self.rng = np.random.RandomState()\n self.problem = problem # to devise problem-specific policies\n\n def seed(self, seed):\n self.rng = np.random.RandomState(seed)\n\n def reset(self):\n # called before an episode starts\n pass\n\n def __call__(self, action_set, observation):\n\n if self.problem == 'item_placement':\n scip_params = {'branching/clamp': 0.4057581460701715, 'branching/lpgainnormalize': 'l',\n 'branching/midpull': 0.024561156375531357, 'branching/midpullreldomtrig': 0.44584662726953606,\n 'branching/preferbinary': 'TRUE', 'branching/scorefac': 0.440795504211412,\n 'branching/scorefunc': 'q', 'lp/colagelimit': 1734636514, 'lp/pricing': 's',\n 'lp/rowagelimit': 1131389812, 'nodeselection/childsel': 'p', 'separating/cutagelimit': 453697232,\n 'separating/maxcuts': 264981451, 'separating/maxcutsroot': 1670303952, 'separating/minortho': 0.7151858341872178,\n 'separating/minorthoroot': 0.8521039212771241,'separating/poolfreq': 54298}\n elif self.problem == 'load_balancing':\n scip_params = {'branching/clamp': 0.3811269248003202, 'branching/lpgainnormalize': 's',\n 'branching/midpull': 0.3453080682951223, 'branching/midpullreldomtrig': 0.05970995942365931,\n 'branching/preferbinary': 'TRUE', 'branching/scorefac': 0.536176849053012,\n 'branching/scorefunc': 's', 'lp/colagelimit': 888577009, 'lp/pricing': 'd',\n 'lp/rowagelimit': 1027409045, 'nodeselection/childsel': 'l', 'separating/cutagelimit': 16983954,\n 'separating/maxcuts': 688798976, 'separating/maxcutsroot': 394234897, 'separating/minortho': 0.24479292773399786,\n 'separating/minorthoroot': 0.5665907046327899, 'separating/poolfreq': 8764}\n else:\n scip_params = {'branching/clamp': 0.006590788269541181, 'branching/lpgainnormalize': 's',\n 'branching/midpull': 0.4161569073081126, 'branching/midpullreldomtrig': 0.9683848340733884,\n 'branching/preferbinary': 'FALSE', 'branching/scorefac': 0.9953185866847221,\n 'branching/scorefunc': 's', 'lp/colagelimit': 656974625, 'lp/pricing': 'd',\n 'lp/rowagelimit': 1327520915, 'nodeselection/childsel': 'i', 'separating/cutagelimit': 768058951,\n 'separating/maxcuts': 863446296, 'separating/maxcutsroot': 1930125520, 'separating/minortho': 0.5471674120926198,\n 'separating/minorthoroot': 0.5873810294839703, 'separating/poolfreq': 8888}\n\n action = scip_params\n\n return action\n"
] | [
[
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kalekundert/sgrna_sensor_designs | [
"5b1bbc823810c869d415fd4a428833c709765d54"
] | [
"sgrna_sensor/plate_reader.py"
] | [
"#!/usr/bin/env python3\n\nimport re, nonstdlib\nimport numpy as np\nimport pandas as pd\nfrom openpyxl import load_workbook\n\nclass BiotekExperiment:\n # This file format sucks.\n\n class Parser:\n\n def __init__(self, expt):\n self.expt = expt\n\n def parse_row(self, row):\n raise NotImplementedError\n\n def finish(self):\n pass\n\n class NullParser(Parser):\n\n def parse_row(self, row):\n pass\n\n class HeaderParser(Parser):\n\n header_keys = {\n 'Software Version',\n 'Experiment File Path:',\n 'Protocol File Path:',\n 'Plate Number',\n 'Date',\n 'Time',\n 'Reader Type:',\n 'Reader Serial Number:',\n 'Reading Type',\n 'Procedure Details',\n 'Plate Type',\n }\n\n def parse_row(self, row):\n key = row[0].value\n value = row[1].value\n\n if key in self.header_keys:\n attr = nonstdlib.slugify(key)\n setattr(self.expt, attr, value)\n \n class ReadParser(Parser):\n\n def parse_row(self, row):\n indent = row[0].value\n key = row[1].value\n read_pattern = re.compile('(?:(?:Blank )?Read \\d:)?(\\d+)')\n\n if indent is not None:\n return\n if key is None:\n return\n if key == 'Well':\n self.wells = [x.value for x in row[2:] if x.value]\n return\n \n read_match = read_pattern.match(str(key))\n if read_match:\n wavelength = int(read_match.group(1))\n reads = {\n k: x.value\n for k,x in zip(self.wells, row[2:])\n if x.value and x.value != '?????'\n }\n self.expt.reads[wavelength] = reads\n\n class KineticParser(Parser):\n\n def parse_row(self, row):\n title = row[0].value\n key = row[1].value\n\n if title is not None:\n self.wavelength = int(title.split(':')[-1])\n self.kinetic = []\n self.prev_minutes = 0\n\n if key == 'Time':\n self.header = ['minutes'] + [x.value for x in row[2:]]\n\n elif key is not None:\n for i, cell in enumerate(row[3:], 2):\n # Update the minutes accounting for the fact that openpyxl \n # wraps hours after 1 day. Fucking stupid library.\n minutes = 60 * key.hour + key.minute\n while self.prev_minutes > minutes:\n minutes += 60 * 24\n self.prev_minutes = minutes\n\n entry = {\n 'well': self.header[i],\n 'temperature': row[2].value,\n 'minutes': minutes,\n 'wavelength': self.wavelength,\n 'read': cell.value,\n }\n self.kinetic.append(entry)\n\n def finish(self):\n df = pd.DataFrame(self.kinetic)\n df[df == '?????'] = np.nan\n #df = df.dropna(axis='columns', how='all')\n self.expt.kinetic[self.wavelength] = df\n\n def __init__(self, path):\n self.reads = {}\n self.kinetic = {}\n\n wb = load_workbook(path)\n ws = wb.active\n parser = self.HeaderParser(self)\n kinetic_pattern = re.compile('^.*:\\d+')\n\n for row in ws.rows:\n key = row[0].value\n subkey = row[1].value\n\n if key == 'Results':\n parser.finish()\n parser = self.ReadParser(self)\n\n if isinstance(key, str) and kinetic_pattern.match(key):\n parser.finish()\n parser = self.KineticParser(self)\n\n parser.parse_row(row)\n\n parser.finish()\n\n def __str__(self):\n from pprint import pformat\n lines = ['BetaGalExperiment']\n for attr, value in self.__dict__.items():\n if attr.startswith('_'):\n continue\n if (attr == 'kinetic' and self.kinetic) or \\\n (attr == 'reads' and self.reads):\n lines += [f' {attr}:', f'{pformat(value)}']\n else:\n lines += [f' {attr + \":\":25s} {value}']\n return '\\n'.join(lines)\n\nclass PerkinElmerExperiment:\n\n def __init__(self, path):\n\n def find_list_sheet(wb):\n for name in wb.sheetnames:\n if name.startswith('List'):\n return wb[name]\n\n def min_from_days(days):\n return days * 24 * 60\n\n wb = load_workbook(path)\n ws = find_list_sheet(wb)\n\n # The time column is in units of days (?).\n col_titles = []\n time_cols = []\n data = {}\n\n for i, row in enumerate(ws.rows):\n cells = [x.value for x in row if x.value]\n\n if i == 0:\n col_titles = cells\n plate_col = cells.index('Plate')\n well_col = cells.index('Well')\n repeat_col = cells.index('Repeat')\n type_col = cells.index('Type')\n time_cols = [i for i,x in enumerate(cells) if x == 'Time']\n\n else:\n for time_col in time_cols:\n datum_col = time_col + 1\n datum_name = nonstdlib.slugify(col_titles[datum_col])\n datum = { #\n 'plate': cells[plate_col],\n 'well': cells[well_col],\n 'repeat': cells[repeat_col],\n 'type': cells[type_col],\n 'minutes': min_from_days(cells[time_col]),\n datum_name: cells[datum_col],\n }\n data.setdefault(datum_name, []).append(datum)\n\n self.reads = {k: pd.DataFrame(v) for k,v in data.items()}\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lgsaber/recurrent-intensity-model-experiments | [
"d56febecbb64c73a912d36e623103657c045d4cc"
] | [
"src/rim_experiments/models/hawkes_poisson.py"
] | [
"import numpy as np, pandas as pd\nimport scipy.optimize\nimport functools\nfrom ..util import ExponentiatedLowRankDataFrame\n\nclass HawkesPoisson:\n \"\"\" intensity is additive function over non-negative states \"\"\"\n def __init__(self, hawkes_model):\n self.hawkes_model = hawkes_model\n\n def fit(self, V):\n self.V = V\n H = self.hawkes_model.transform(V, state_only=True)\n X = np.vstack(H.values)\n Y = V.target_df.sum(axis=1).reindex(H.index).values\n\n self.coeffs = scipy.optimize.minimize(\n loss, np.zeros(X.shape[1]), (X, Y), options={\"disp\": True}\n ) #, method='Nelder-Mead')\n print(f\"fit loss {loss(self.coeffs.x, X, Y, 0)}\")\n return self\n\n @functools.lru_cache(1)\n def transform(self, D):\n H = self.hawkes_model.transform(D, state_only=True)\n X = np.vstack(H.values)\n intensity = np.vstack(H.values) @ np.log(1 + np.exp(self.coeffs.x))\n\n if hasattr(D, \"target_df\"):\n Y = D.target_df.sum(axis=1).reindex(H.index).values\n print(f\"transform loss {loss(self.coeffs.x, X, Y, 0)}\")\n\n return ExponentiatedLowRankDataFrame(\n np.log(intensity)[:, None], np.ones(len(D.item_df))[:, None], 1,\n index=H.index, columns=D.item_df.index)\n\n\ndef loss(x, H, Y, alpha=1e-3):\n w = np.log(1 + np.exp(x))\n Lamb = H @ w\n loglik = Y * np.log(1e-10 + Lamb) - Lamb\n return -loglik.mean() + alpha/2*(w**2).sum()\n"
] | [
[
"numpy.log",
"numpy.exp",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akweury/improved_normal_inference | [
"a10ed16f43362c15f2220345275be5c029f31198"
] | [
"common/ResNormalGuided.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.conv import _ConvNd\n\nfrom common.ResNet import ResNet\nfrom common.ResNet import Bottleneck\n\n\n# Normalized Convolution Layer\nclass GConv(_ConvNd):\n def __init__(self, in_channels, out_channels, kernel_size, stride=(1, 1),\n padding=(0, 0), dilation=(1, 1), groups=1, bias=True):\n # Call _ConvNd constructor\n super(GConv, self).__init__(in_channels, out_channels, kernel_size,\n stride, padding, dilation, False, (0, 0),\n groups, bias, padding_mode='zeros')\n\n self.conv_g = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation=dilation)\n self.conv_f = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation=dilation)\n\n self.active_f = nn.LeakyReLU(0.01)\n self.active_g = nn.Sigmoid()\n\n def forward(self, x):\n # Normalized Convolution\n x_g = self.active_g(self.conv_g(x))\n x_f = self.active_f(self.conv_f(x))\n x = x_f * x_g\n return x\n\n\nclass NormalGuided(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.__name__ = 'resng'\n kernel_down = (3, 3)\n kernel_down_2 = (5, 5)\n kernel_up = (3, 3)\n kernel_up_2 = (5, 5)\n padding_down = (1, 1)\n padding_down_2 = (2, 2)\n padding_down_3 = (4, 4)\n padding_down_4 = (8, 8)\n padding_down_5 = (16, 16)\n padding_up = (1, 1)\n padding_up_2 = (2, 2)\n stride = (1, 1)\n stride_2 = (2, 2)\n\n dilate1 = (2, 2)\n dilate2 = (4, 4)\n dilate3 = (8, 8)\n dilate4 = (16, 16)\n\n self.active_f = nn.LeakyReLU(0.01)\n self.active_g = nn.Sigmoid()\n self.active_img = nn.LeakyReLU(0.01)\n\n self.epsilon = 1e-20\n channel_size_1 = 32\n channel_size_2 = 64\n # https://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/PIRODDI1/NormConv/node2.html#:~:text=The%20idea%20of%20normalized%20convolution,them%20is%20equal%20to%20zero.\n\n self.resnet50 = ResNet(Bottleneck, layers=[1, 3, 4, 5])\n\n # branch 1\n self.dconv1 = GConv(in_ch, channel_size_1, kernel_down, stride, padding_down)\n self.dconv2 = GConv(channel_size_1, channel_size_1, kernel_down, stride, padding_down)\n self.dconv3 = GConv(channel_size_1, channel_size_1, kernel_down, stride, padding_down)\n self.dconv4 = GConv(channel_size_1, channel_size_1, kernel_down, stride_2, padding_down)\n\n self.dilated1 = GConv(channel_size_1, channel_size_1, kernel_down, stride, padding_down_2, dilate1)\n self.dilated2 = GConv(channel_size_1, channel_size_1, kernel_down, stride, padding_down_3, dilate2)\n self.dilated3 = GConv(channel_size_1, channel_size_1, kernel_down, stride, padding_down_4, dilate3)\n self.dilated4 = GConv(channel_size_1, channel_size_1, kernel_down, stride, padding_down_5, dilate4)\n\n self.uconv1 = GConv(channel_size_2, channel_size_1, kernel_up, stride, padding_up)\n self.uconv2 = GConv(channel_size_2, channel_size_1, kernel_up, stride, padding_up)\n self.uconv3 = GConv(channel_size_2, channel_size_1, kernel_up, stride, padding_up)\n self.uconv4 = GConv(channel_size_2, channel_size_1, kernel_up, stride, padding_up)\n\n self.conv1 = nn.Conv2d(channel_size_1, out_ch, (1, 1), (1, 1), (0, 0))\n self.conv2 = nn.Conv2d(out_ch, out_ch, (1, 1), (1, 1), (0, 0))\n\n\n def forward(self, x1, x_img_1, cin):\n\n x1 = self.dconv1(x1)\n x1 = self.dconv2(x1)\n x1 = self.dconv3(x1)\n\n x_img = self.resnet50(x_img_1)\n\n # x_img_1 = self.active_img(self.img_conv1(x_img_1))\n # x_img_1 = self.active_img(self.img_conv2(x_img_1))\n # x_img_1 = self.active_img(self.img_conv3(x_img_1))\n #\n # Downsample 1\n x2 = self.dconv4(x1)\n x2 = self.dconv2(x2)\n x2 = self.dconv3(x2)\n\n # x_img_2 = self.active_img(self.img_conv4(x_img_1))\n # x_img_2 = self.active_img(self.img_conv2(x_img_2))\n # x_img_2 = self.active_img(self.img_conv3(x_img_2))\n\n # Downsample 2\n x3 = self.dconv4(x2)\n x3 = self.dconv2(x3)\n x3 = self.dconv3(x3)\n\n # x_img_3 = self.active_img(self.img_conv4(x_img_2))\n # x_img_3 = self.active_img(self.img_conv2(x_img_3))\n # x_img_3 = self.active_img(self.img_conv3(x_img_3))\n\n # Downsample 3\n x4 = self.dconv4(x3)\n x4 = self.dconv2(x4)\n x4 = self.dconv3(x4)\n\n # x_img_4 = self.active_img(self.img_conv4(x_img_3))\n # x_img_4 = self.active_img(self.img_conv2(x_img_4))\n # x_img_4 = self.active_img(self.img_conv3(x_img_4))\n\n # dilated conv\n x4 = self.dilated1(x4)\n x4 = self.dilated2(x4)\n x4 = self.dilated3(x4)\n x4 = self.dilated4(x4)\n x4 = self.dconv2(x4)\n x4 = self.dconv3(x4)\n\n # merge image feature and vertex feature\n x4 = torch.cat((x4, x_img), 1)\n\n # Upsample 1\n x3_us = F.interpolate(x4, x3.size()[2:], mode='nearest') # 128,128\n x3_mus = self.uconv1(x3_us)\n x3 = self.uconv2(torch.cat((x3, x3_mus), 1))\n\n # Upsample 2\n x2_us = F.interpolate(x3, x2.size()[2:], mode='nearest')\n x2 = self.uconv3(torch.cat((x2, x2_us), 1))\n\n # # Upsample 3\n x1_us = F.interpolate(x2, x1.size()[2:], mode='nearest') # 512, 512\n x1 = self.uconv4(torch.cat((x1, x1_us), 1))\n\n xout = self.conv1(x1) # 512, 512\n xout = self.conv2(xout)\n\n return xout, cin\n"
] | [
[
"torch.nn.Conv2d",
"torch.cat",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EinfachTuen/waveglow | [
"1a1131365233941db9079c63814afe9fb2f96d07"
] | [
"distributed.py"
] | [
"# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\nimport os\nimport sys\nimport time\nimport subprocess\nimport argparse\n\nimport torch\nimport torch.distributed as dist\nfrom torch.autograd import Variable\n\ndef reduce_tensor(tensor, num_gpus):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n rt /= num_gpus\n return rt\n\ndef init_distributed(rank, num_gpus, group_name, dist_backend, dist_url):\n assert torch.cuda.is_available(), \"Distributed mode requires CUDA.\"\n print(\"Initializing Distributed\")\n\n # Set cuda device so everything is done on the right GPU.\n torch.cuda.set_device(rank % torch.cuda.device_count())\n print(\"device count\")\n print(torch.cuda.device_count())\n # Initialize distributed communication\n dist.init_process_group(dist_backend, init_method=dist_url,\n world_size=num_gpus, rank=rank,\n group_name=group_name)\n\ndef _flatten_dense_tensors(tensors):\n \"\"\"Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of\n same dense type.\n Since inputs are dense, the resulting tensor will be a concatenated 1D\n buffer. Element-wise operation on this buffer will be equivalent to\n operating individually.\n Arguments:\n tensors (Iterable[Tensor]): dense tensors to flatten.\n Returns:\n A contiguous 1D buffer containing input tensors.\n \"\"\"\n if len(tensors) == 1:\n return tensors[0].contiguous().view(-1)\n flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)\n return flat\n\ndef _unflatten_dense_tensors(flat, tensors):\n \"\"\"View a flat buffer using the sizes of tensors. Assume that tensors are of\n same dense type, and that flat is given by _flatten_dense_tensors.\n Arguments:\n flat (Tensor): flattened dense tensors to unflatten.\n tensors (Iterable[Tensor]): dense tensors whose sizes will be used to\n unflatten flat.\n Returns:\n Unflattened dense tensors with sizes same as tensors and values from\n flat.\n \"\"\"\n outputs = []\n offset = 0\n for tensor in tensors:\n numel = tensor.numel()\n outputs.append(flat.narrow(0, offset, numel).view_as(tensor))\n offset += numel\n return tuple(outputs)\n\ndef apply_gradient_allreduce(module):\n \"\"\"\n Modifies existing model to do gradient allreduce, but doesn't change class\n so you don't need \"module\"\n \"\"\"\n if not hasattr(dist, '_backend'):\n module.warn_on_half = True\n else:\n module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False\n\n for p in module.state_dict().values():\n if not torch.is_tensor(p):\n continue\n dist.broadcast(p, 0)\n\n def allreduce_params():\n if(module.needs_reduction):\n module.needs_reduction = False\n buckets = {}\n for param in module.parameters():\n if param.requires_grad and param.grad is not None:\n tp = type(param.data)\n if tp not in buckets:\n buckets[tp] = []\n buckets[tp].append(param)\n if module.warn_on_half:\n if torch.cuda.HalfTensor in buckets:\n print(\"WARNING: gloo dist backend for half parameters may be extremely slow.\" +\n \" It is recommended to use the NCCL backend in this case. This currently requires\" +\n \"PyTorch built from top of tree master.\")\n module.warn_on_half = False\n\n for tp in buckets:\n bucket = buckets[tp]\n grads = [param.grad.data for param in bucket]\n coalesced = _flatten_dense_tensors(grads)\n dist.all_reduce(coalesced)\n coalesced /= dist.get_world_size()\n for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):\n buf.copy_(synced)\n\n for param in list(module.parameters()):\n def allreduce_hook(*unused):\n Variable._execution_engine.queue_callback(allreduce_params)\n if param.requires_grad:\n param.register_hook(allreduce_hook)\n dir(param)\n\n def set_needs_reduction(self, input, output):\n self.needs_reduction = True\n\n module.register_forward_hook(set_needs_reduction)\n return module\n\n\ndef main(config, stdout_dir, args_str):\n args_list = ['train.py']\n args_list += args_str.split(' ') if len(args_str) > 0 else []\n\n args_list.append('--config={}'.format(config))\n\n num_gpus = torch.cuda.device_count()\n args_list.append('--num_gpus={}'.format(num_gpus))\n args_list.append(\"--group_name=group_{}\".format(time.strftime(\"%Y_%m_%d-%H%M%S\")))\n\n if not os.path.isdir(stdout_dir):\n os.makedirs(stdout_dir)\n os.chmod(stdout_dir, 0o775)\n\n workers = []\n\n for i in range(num_gpus):\n args_list[-2] = '--rank={}'.format(i)\n stdout = None if i == 0 else open(\n os.path.join(stdout_dir, \"GPU_{}.log\".format(i)), \"w\")\n print(args_list)\n p = subprocess.Popen([str(sys.executable)]+args_list, stdout=stdout)\n workers.append(p)\n\n for p in workers:\n p.wait()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=str, required=True,\n help='JSON file for configuration')\n parser.add_argument('-s', '--stdout_dir', type=str, default=\".\",\n help='directory to save stoud logs')\n parser.add_argument(\n '-a', '--args_str', type=str, default='',\n help='double quoted string with space separated key value pairs')\n\n args = parser.parse_args()\n main(args.config, args.stdout_dir, args.args_str)\n"
] | [
[
"torch.distributed.broadcast",
"torch.distributed.init_process_group",
"torch.autograd.Variable._execution_engine.queue_callback",
"torch.is_tensor",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.distributed.all_reduce",
"torch.distributed.get_world_size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CamilaAlvarez/tensorflow-demo | [
"57f576bafe97054046610ded7a9ce39caa7e84b4"
] | [
"create_database.py"
] | [
"import os\nimport random\nimport argparse\nimport tensorflow as tf\nfrom skimage import transform, io\nimport numpy as np\nimport yaml\nimport logging\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config', dest='config_file', required=True, help='YAML config file')\nparser.add_argument('--log', dest='log_level', default=logging.WARNING, help='Logging level')\nargs = parser.parse_args()\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=args.log_level)\nclasses = []\nimages = []\nwith open(args.config_file, 'r') as config_file:\n config = yaml.load(config_file)\n output_file = config['output-file']\n output_dir = os.path.dirname(output_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n classes_file = config['class-list']\n images_file = config['image-list']\n red_mean_pixel = float(config['mean-pixel']['r'])\n green_mean_pixel = float(config['mean-pixel']['g'])\n blue_mean_pixel = float(config['mean-pixel']['b'])\n new_image_size = (int(config['image-size']['width']), int(config['image-size']['height']))\n with open(classes_file) as classes_list, open(images_file) as image_list:\n for class_line in classes_list:\n classes.append(class_line.strip())\n for image_line in image_list:\n image_info = image_line.strip().split('\\t')\n klass = image_info[2]\n if klass not in classes:\n logging.warning('{} not in class list'.format(klass))\n logging.warning('Skipping image: {}'.format(image_info[1]))\n continue\n images.append({'path':image_info[1], 'label': classes.index(image_info[2])})\n random.shuffle(images)\n writer = tf.python_io.TFRecordWriter(output_file)\n logging.info('Opening writer {}'.format(output_file))\n for image_info in images:\n try:\n image = io.imread(image_info['path'])\n resized_image = transform.resize(image, new_image_size)\n resized_image = resized_image - [red_mean_pixel, green_mean_pixel, blue_mean_pixel]\n resized_image = resized_image.astype(np.float32)\n height, width, channels = resized_image.shape\n #channels = resized_image.split()\n #substract_mean_pixel = lambda mean_value: lambda pixel_value: pixel_value - mean_value\n #if len(channels) < 3:\n # logging.warning('Non color image found: {}'.format(image_info['path']))\n # final_image = Image.eval(channels[0], substract_mean_pixel(red_mean_pixel))\n #else:\n # final_image_red = Image.eval(channels[0], substract_mean_pixel(red_mean_pixel))\n # final_image_green = Image.eval(channels[1], substract_mean_pixel(green_mean_pixel))\n # final_image_blue = Image.eval(channels[2], substract_mean_pixel(blue_mean_pixel))\n # final_image = Image.merge('RGB', [final_image_red, final_image_green, final_image_blue])\n #image_data = io.BytesIO()\n #final_image.save(image_data, format=image.format)\n #image_bytes = image_data.getvalue()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),\n 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),\n 'depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[channels])),\n 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[image_info['label']])),\n 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[resized_image.tostring()]))\n }))\n logging.info('Writing image to record: {}'.format(image_info['path']))\n writer.write(example.SerializeToString())\n except IOError:\n logging.warning('Image not found: {}'.format(image_info['path']))\n logging.info('Closing writer')\n writer.close()\n\n\n\n\n"
] | [
[
"tensorflow.train.Int64List",
"tensorflow.python_io.TFRecordWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.